1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4*4882a593Smuzhiyun * Initial release: Matias Bjorling <m@bjorling.me>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) "nvm: " fmt
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/sem.h>
12*4882a593Smuzhiyun #include <linux/bitmap.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/moduleparam.h>
15*4882a593Smuzhiyun #include <linux/miscdevice.h>
16*4882a593Smuzhiyun #include <linux/lightnvm.h>
17*4882a593Smuzhiyun #include <linux/sched/sysctl.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static LIST_HEAD(nvm_tgt_types);
20*4882a593Smuzhiyun static DECLARE_RWSEM(nvm_tgtt_lock);
21*4882a593Smuzhiyun static LIST_HEAD(nvm_devices);
22*4882a593Smuzhiyun static DECLARE_RWSEM(nvm_lock);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Map between virtual and physical channel and lun */
25*4882a593Smuzhiyun struct nvm_ch_map {
26*4882a593Smuzhiyun int ch_off;
27*4882a593Smuzhiyun int num_lun;
28*4882a593Smuzhiyun int *lun_offs;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct nvm_dev_map {
32*4882a593Smuzhiyun struct nvm_ch_map *chnls;
33*4882a593Smuzhiyun int num_ch;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun static void nvm_free(struct kref *ref);
37*4882a593Smuzhiyun
nvm_find_target(struct nvm_dev * dev,const char * name)38*4882a593Smuzhiyun static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct nvm_target *tgt;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun list_for_each_entry(tgt, &dev->targets, list)
43*4882a593Smuzhiyun if (!strcmp(name, tgt->disk->disk_name))
44*4882a593Smuzhiyun return tgt;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return NULL;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
nvm_target_exists(const char * name)49*4882a593Smuzhiyun static bool nvm_target_exists(const char *name)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct nvm_dev *dev;
52*4882a593Smuzhiyun struct nvm_target *tgt;
53*4882a593Smuzhiyun bool ret = false;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun down_write(&nvm_lock);
56*4882a593Smuzhiyun list_for_each_entry(dev, &nvm_devices, devices) {
57*4882a593Smuzhiyun mutex_lock(&dev->mlock);
58*4882a593Smuzhiyun list_for_each_entry(tgt, &dev->targets, list) {
59*4882a593Smuzhiyun if (!strcmp(name, tgt->disk->disk_name)) {
60*4882a593Smuzhiyun ret = true;
61*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
62*4882a593Smuzhiyun goto out;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun out:
69*4882a593Smuzhiyun up_write(&nvm_lock);
70*4882a593Smuzhiyun return ret;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
nvm_reserve_luns(struct nvm_dev * dev,int lun_begin,int lun_end)73*4882a593Smuzhiyun static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun int i;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun for (i = lun_begin; i <= lun_end; i++) {
78*4882a593Smuzhiyun if (test_and_set_bit(i, dev->lun_map)) {
79*4882a593Smuzhiyun pr_err("lun %d already allocated\n", i);
80*4882a593Smuzhiyun goto err;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun err:
86*4882a593Smuzhiyun while (--i >= lun_begin)
87*4882a593Smuzhiyun clear_bit(i, dev->lun_map);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return -EBUSY;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
nvm_release_luns_err(struct nvm_dev * dev,int lun_begin,int lun_end)92*4882a593Smuzhiyun static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
93*4882a593Smuzhiyun int lun_end)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun int i;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun for (i = lun_begin; i <= lun_end; i++)
98*4882a593Smuzhiyun WARN_ON(!test_and_clear_bit(i, dev->lun_map));
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
nvm_remove_tgt_dev(struct nvm_tgt_dev * tgt_dev,int clear)101*4882a593Smuzhiyun static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
104*4882a593Smuzhiyun struct nvm_dev_map *dev_map = tgt_dev->map;
105*4882a593Smuzhiyun int i, j;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun for (i = 0; i < dev_map->num_ch; i++) {
108*4882a593Smuzhiyun struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109*4882a593Smuzhiyun int *lun_offs = ch_map->lun_offs;
110*4882a593Smuzhiyun int ch = i + ch_map->ch_off;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (clear) {
113*4882a593Smuzhiyun for (j = 0; j < ch_map->num_lun; j++) {
114*4882a593Smuzhiyun int lun = j + lun_offs[j];
115*4882a593Smuzhiyun int lunid = (ch * dev->geo.num_lun) + lun;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun WARN_ON(!test_and_clear_bit(lunid,
118*4882a593Smuzhiyun dev->lun_map));
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun kfree(ch_map->lun_offs);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun kfree(dev_map->chnls);
126*4882a593Smuzhiyun kfree(dev_map);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun kfree(tgt_dev->luns);
129*4882a593Smuzhiyun kfree(tgt_dev);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
nvm_create_tgt_dev(struct nvm_dev * dev,u16 lun_begin,u16 lun_end,u16 op)132*4882a593Smuzhiyun static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133*4882a593Smuzhiyun u16 lun_begin, u16 lun_end,
134*4882a593Smuzhiyun u16 op)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct nvm_tgt_dev *tgt_dev = NULL;
137*4882a593Smuzhiyun struct nvm_dev_map *dev_rmap = dev->rmap;
138*4882a593Smuzhiyun struct nvm_dev_map *dev_map;
139*4882a593Smuzhiyun struct ppa_addr *luns;
140*4882a593Smuzhiyun int num_lun = lun_end - lun_begin + 1;
141*4882a593Smuzhiyun int luns_left = num_lun;
142*4882a593Smuzhiyun int num_ch = num_lun / dev->geo.num_lun;
143*4882a593Smuzhiyun int num_ch_mod = num_lun % dev->geo.num_lun;
144*4882a593Smuzhiyun int bch = lun_begin / dev->geo.num_lun;
145*4882a593Smuzhiyun int blun = lun_begin % dev->geo.num_lun;
146*4882a593Smuzhiyun int lunid = 0;
147*4882a593Smuzhiyun int lun_balanced = 1;
148*4882a593Smuzhiyun int sec_per_lun, prev_num_lun;
149*4882a593Smuzhiyun int i, j;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
154*4882a593Smuzhiyun if (!dev_map)
155*4882a593Smuzhiyun goto err_dev;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
158*4882a593Smuzhiyun if (!dev_map->chnls)
159*4882a593Smuzhiyun goto err_chnls;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
162*4882a593Smuzhiyun if (!luns)
163*4882a593Smuzhiyun goto err_luns;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun prev_num_lun = (luns_left > dev->geo.num_lun) ?
166*4882a593Smuzhiyun dev->geo.num_lun : luns_left;
167*4882a593Smuzhiyun for (i = 0; i < num_ch; i++) {
168*4882a593Smuzhiyun struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169*4882a593Smuzhiyun int *lun_roffs = ch_rmap->lun_offs;
170*4882a593Smuzhiyun struct nvm_ch_map *ch_map = &dev_map->chnls[i];
171*4882a593Smuzhiyun int *lun_offs;
172*4882a593Smuzhiyun int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173*4882a593Smuzhiyun dev->geo.num_lun : luns_left;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (lun_balanced && prev_num_lun != luns_in_chnl)
176*4882a593Smuzhiyun lun_balanced = 0;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun ch_map->ch_off = ch_rmap->ch_off = bch;
179*4882a593Smuzhiyun ch_map->num_lun = luns_in_chnl;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
182*4882a593Smuzhiyun if (!lun_offs)
183*4882a593Smuzhiyun goto err_ch;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun for (j = 0; j < luns_in_chnl; j++) {
186*4882a593Smuzhiyun luns[lunid].ppa = 0;
187*4882a593Smuzhiyun luns[lunid].a.ch = i;
188*4882a593Smuzhiyun luns[lunid++].a.lun = j;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun lun_offs[j] = blun;
191*4882a593Smuzhiyun lun_roffs[j + blun] = blun;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun ch_map->lun_offs = lun_offs;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* when starting a new channel, lun offset is reset */
197*4882a593Smuzhiyun blun = 0;
198*4882a593Smuzhiyun luns_left -= luns_in_chnl;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun dev_map->num_ch = num_ch;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
204*4882a593Smuzhiyun if (!tgt_dev)
205*4882a593Smuzhiyun goto err_ch;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Inherit device geometry from parent */
208*4882a593Smuzhiyun memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Target device only owns a portion of the physical device */
211*4882a593Smuzhiyun tgt_dev->geo.num_ch = num_ch;
212*4882a593Smuzhiyun tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213*4882a593Smuzhiyun tgt_dev->geo.all_luns = num_lun;
214*4882a593Smuzhiyun tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun tgt_dev->geo.op = op;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219*4882a593Smuzhiyun tgt_dev->geo.total_secs = num_lun * sec_per_lun;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun tgt_dev->q = dev->q;
222*4882a593Smuzhiyun tgt_dev->map = dev_map;
223*4882a593Smuzhiyun tgt_dev->luns = luns;
224*4882a593Smuzhiyun tgt_dev->parent = dev;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return tgt_dev;
227*4882a593Smuzhiyun err_ch:
228*4882a593Smuzhiyun while (--i >= 0)
229*4882a593Smuzhiyun kfree(dev_map->chnls[i].lun_offs);
230*4882a593Smuzhiyun kfree(luns);
231*4882a593Smuzhiyun err_luns:
232*4882a593Smuzhiyun kfree(dev_map->chnls);
233*4882a593Smuzhiyun err_chnls:
234*4882a593Smuzhiyun kfree(dev_map);
235*4882a593Smuzhiyun err_dev:
236*4882a593Smuzhiyun return tgt_dev;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
__nvm_find_target_type(const char * name)239*4882a593Smuzhiyun static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct nvm_tgt_type *tt;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun list_for_each_entry(tt, &nvm_tgt_types, list)
244*4882a593Smuzhiyun if (!strcmp(name, tt->name))
245*4882a593Smuzhiyun return tt;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return NULL;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
nvm_find_target_type(const char * name)250*4882a593Smuzhiyun static struct nvm_tgt_type *nvm_find_target_type(const char *name)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct nvm_tgt_type *tt;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun down_write(&nvm_tgtt_lock);
255*4882a593Smuzhiyun tt = __nvm_find_target_type(name);
256*4882a593Smuzhiyun up_write(&nvm_tgtt_lock);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return tt;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
nvm_config_check_luns(struct nvm_geo * geo,int lun_begin,int lun_end)261*4882a593Smuzhiyun static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
262*4882a593Smuzhiyun int lun_end)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun if (lun_begin > lun_end || lun_end >= geo->all_luns) {
265*4882a593Smuzhiyun pr_err("lun out of bound (%u:%u > %u)\n",
266*4882a593Smuzhiyun lun_begin, lun_end, geo->all_luns - 1);
267*4882a593Smuzhiyun return -EINVAL;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
__nvm_config_simple(struct nvm_dev * dev,struct nvm_ioctl_create_simple * s)273*4882a593Smuzhiyun static int __nvm_config_simple(struct nvm_dev *dev,
274*4882a593Smuzhiyun struct nvm_ioctl_create_simple *s)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (s->lun_begin == -1 && s->lun_end == -1) {
279*4882a593Smuzhiyun s->lun_begin = 0;
280*4882a593Smuzhiyun s->lun_end = geo->all_luns - 1;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
__nvm_config_extended(struct nvm_dev * dev,struct nvm_ioctl_create_extended * e)286*4882a593Smuzhiyun static int __nvm_config_extended(struct nvm_dev *dev,
287*4882a593Smuzhiyun struct nvm_ioctl_create_extended *e)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
290*4882a593Smuzhiyun e->lun_begin = 0;
291*4882a593Smuzhiyun e->lun_end = dev->geo.all_luns - 1;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* op not set falls into target's default */
295*4882a593Smuzhiyun if (e->op == 0xFFFF) {
296*4882a593Smuzhiyun e->op = NVM_TARGET_DEFAULT_OP;
297*4882a593Smuzhiyun } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
298*4882a593Smuzhiyun pr_err("invalid over provisioning value\n");
299*4882a593Smuzhiyun return -EINVAL;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
nvm_create_tgt(struct nvm_dev * dev,struct nvm_ioctl_create * create)305*4882a593Smuzhiyun static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct nvm_ioctl_create_extended e;
308*4882a593Smuzhiyun struct request_queue *tqueue;
309*4882a593Smuzhiyun struct gendisk *tdisk;
310*4882a593Smuzhiyun struct nvm_tgt_type *tt;
311*4882a593Smuzhiyun struct nvm_target *t;
312*4882a593Smuzhiyun struct nvm_tgt_dev *tgt_dev;
313*4882a593Smuzhiyun void *targetdata;
314*4882a593Smuzhiyun unsigned int mdts;
315*4882a593Smuzhiyun int ret;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun switch (create->conf.type) {
318*4882a593Smuzhiyun case NVM_CONFIG_TYPE_SIMPLE:
319*4882a593Smuzhiyun ret = __nvm_config_simple(dev, &create->conf.s);
320*4882a593Smuzhiyun if (ret)
321*4882a593Smuzhiyun return ret;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun e.lun_begin = create->conf.s.lun_begin;
324*4882a593Smuzhiyun e.lun_end = create->conf.s.lun_end;
325*4882a593Smuzhiyun e.op = NVM_TARGET_DEFAULT_OP;
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun case NVM_CONFIG_TYPE_EXTENDED:
328*4882a593Smuzhiyun ret = __nvm_config_extended(dev, &create->conf.e);
329*4882a593Smuzhiyun if (ret)
330*4882a593Smuzhiyun return ret;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun e = create->conf.e;
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun default:
335*4882a593Smuzhiyun pr_err("config type not valid\n");
336*4882a593Smuzhiyun return -EINVAL;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun tt = nvm_find_target_type(create->tgttype);
340*4882a593Smuzhiyun if (!tt) {
341*4882a593Smuzhiyun pr_err("target type %s not found\n", create->tgttype);
342*4882a593Smuzhiyun return -EINVAL;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
346*4882a593Smuzhiyun pr_err("device is incompatible with target L2P type.\n");
347*4882a593Smuzhiyun return -EINVAL;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (nvm_target_exists(create->tgtname)) {
351*4882a593Smuzhiyun pr_err("target name already exists (%s)\n",
352*4882a593Smuzhiyun create->tgtname);
353*4882a593Smuzhiyun return -EINVAL;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
357*4882a593Smuzhiyun if (ret)
358*4882a593Smuzhiyun return ret;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
361*4882a593Smuzhiyun if (!t) {
362*4882a593Smuzhiyun ret = -ENOMEM;
363*4882a593Smuzhiyun goto err_reserve;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
367*4882a593Smuzhiyun if (!tgt_dev) {
368*4882a593Smuzhiyun pr_err("could not create target device\n");
369*4882a593Smuzhiyun ret = -ENOMEM;
370*4882a593Smuzhiyun goto err_t;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun tdisk = alloc_disk(0);
374*4882a593Smuzhiyun if (!tdisk) {
375*4882a593Smuzhiyun ret = -ENOMEM;
376*4882a593Smuzhiyun goto err_dev;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun tqueue = blk_alloc_queue(dev->q->node);
380*4882a593Smuzhiyun if (!tqueue) {
381*4882a593Smuzhiyun ret = -ENOMEM;
382*4882a593Smuzhiyun goto err_disk;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
386*4882a593Smuzhiyun tdisk->flags = GENHD_FL_EXT_DEVT;
387*4882a593Smuzhiyun tdisk->major = 0;
388*4882a593Smuzhiyun tdisk->first_minor = 0;
389*4882a593Smuzhiyun tdisk->fops = tt->bops;
390*4882a593Smuzhiyun tdisk->queue = tqueue;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun targetdata = tt->init(tgt_dev, tdisk, create->flags);
393*4882a593Smuzhiyun if (IS_ERR(targetdata)) {
394*4882a593Smuzhiyun ret = PTR_ERR(targetdata);
395*4882a593Smuzhiyun goto err_init;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun tdisk->private_data = targetdata;
399*4882a593Smuzhiyun tqueue->queuedata = targetdata;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
402*4882a593Smuzhiyun if (dev->geo.mdts) {
403*4882a593Smuzhiyun mdts = min_t(u32, dev->geo.mdts,
404*4882a593Smuzhiyun (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun blk_queue_max_hw_sectors(tqueue, mdts);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun set_capacity(tdisk, tt->capacity(targetdata));
409*4882a593Smuzhiyun add_disk(tdisk);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
412*4882a593Smuzhiyun ret = -ENOMEM;
413*4882a593Smuzhiyun goto err_sysfs;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun t->type = tt;
417*4882a593Smuzhiyun t->disk = tdisk;
418*4882a593Smuzhiyun t->dev = tgt_dev;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun mutex_lock(&dev->mlock);
421*4882a593Smuzhiyun list_add_tail(&t->list, &dev->targets);
422*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun __module_get(tt->owner);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun err_sysfs:
428*4882a593Smuzhiyun if (tt->exit)
429*4882a593Smuzhiyun tt->exit(targetdata, true);
430*4882a593Smuzhiyun err_init:
431*4882a593Smuzhiyun blk_cleanup_queue(tqueue);
432*4882a593Smuzhiyun tdisk->queue = NULL;
433*4882a593Smuzhiyun err_disk:
434*4882a593Smuzhiyun put_disk(tdisk);
435*4882a593Smuzhiyun err_dev:
436*4882a593Smuzhiyun nvm_remove_tgt_dev(tgt_dev, 0);
437*4882a593Smuzhiyun err_t:
438*4882a593Smuzhiyun kfree(t);
439*4882a593Smuzhiyun err_reserve:
440*4882a593Smuzhiyun nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
441*4882a593Smuzhiyun return ret;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
__nvm_remove_target(struct nvm_target * t,bool graceful)444*4882a593Smuzhiyun static void __nvm_remove_target(struct nvm_target *t, bool graceful)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct nvm_tgt_type *tt = t->type;
447*4882a593Smuzhiyun struct gendisk *tdisk = t->disk;
448*4882a593Smuzhiyun struct request_queue *q = tdisk->queue;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun del_gendisk(tdisk);
451*4882a593Smuzhiyun blk_cleanup_queue(q);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (tt->sysfs_exit)
454*4882a593Smuzhiyun tt->sysfs_exit(tdisk);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (tt->exit)
457*4882a593Smuzhiyun tt->exit(tdisk->private_data, graceful);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun nvm_remove_tgt_dev(t->dev, 1);
460*4882a593Smuzhiyun put_disk(tdisk);
461*4882a593Smuzhiyun module_put(t->type->owner);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun list_del(&t->list);
464*4882a593Smuzhiyun kfree(t);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /**
468*4882a593Smuzhiyun * nvm_remove_tgt - Removes a target from the media manager
469*4882a593Smuzhiyun * @remove: ioctl structure with target name to remove.
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * Returns:
472*4882a593Smuzhiyun * 0: on success
473*4882a593Smuzhiyun * 1: on not found
474*4882a593Smuzhiyun * <0: on error
475*4882a593Smuzhiyun */
nvm_remove_tgt(struct nvm_ioctl_remove * remove)476*4882a593Smuzhiyun static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct nvm_target *t = NULL;
479*4882a593Smuzhiyun struct nvm_dev *dev;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun down_read(&nvm_lock);
482*4882a593Smuzhiyun list_for_each_entry(dev, &nvm_devices, devices) {
483*4882a593Smuzhiyun mutex_lock(&dev->mlock);
484*4882a593Smuzhiyun t = nvm_find_target(dev, remove->tgtname);
485*4882a593Smuzhiyun if (t) {
486*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
487*4882a593Smuzhiyun break;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun up_read(&nvm_lock);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (!t) {
494*4882a593Smuzhiyun pr_err("failed to remove target %s\n",
495*4882a593Smuzhiyun remove->tgtname);
496*4882a593Smuzhiyun return 1;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun __nvm_remove_target(t, true);
500*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
nvm_register_map(struct nvm_dev * dev)505*4882a593Smuzhiyun static int nvm_register_map(struct nvm_dev *dev)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct nvm_dev_map *rmap;
508*4882a593Smuzhiyun int i, j;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
511*4882a593Smuzhiyun if (!rmap)
512*4882a593Smuzhiyun goto err_rmap;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
515*4882a593Smuzhiyun GFP_KERNEL);
516*4882a593Smuzhiyun if (!rmap->chnls)
517*4882a593Smuzhiyun goto err_chnls;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun for (i = 0; i < dev->geo.num_ch; i++) {
520*4882a593Smuzhiyun struct nvm_ch_map *ch_rmap;
521*4882a593Smuzhiyun int *lun_roffs;
522*4882a593Smuzhiyun int luns_in_chnl = dev->geo.num_lun;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun ch_rmap = &rmap->chnls[i];
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun ch_rmap->ch_off = -1;
527*4882a593Smuzhiyun ch_rmap->num_lun = luns_in_chnl;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
530*4882a593Smuzhiyun if (!lun_roffs)
531*4882a593Smuzhiyun goto err_ch;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun for (j = 0; j < luns_in_chnl; j++)
534*4882a593Smuzhiyun lun_roffs[j] = -1;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun ch_rmap->lun_offs = lun_roffs;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun dev->rmap = rmap;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return 0;
542*4882a593Smuzhiyun err_ch:
543*4882a593Smuzhiyun while (--i >= 0)
544*4882a593Smuzhiyun kfree(rmap->chnls[i].lun_offs);
545*4882a593Smuzhiyun err_chnls:
546*4882a593Smuzhiyun kfree(rmap);
547*4882a593Smuzhiyun err_rmap:
548*4882a593Smuzhiyun return -ENOMEM;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
nvm_unregister_map(struct nvm_dev * dev)551*4882a593Smuzhiyun static void nvm_unregister_map(struct nvm_dev *dev)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct nvm_dev_map *rmap = dev->rmap;
554*4882a593Smuzhiyun int i;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun for (i = 0; i < dev->geo.num_ch; i++)
557*4882a593Smuzhiyun kfree(rmap->chnls[i].lun_offs);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun kfree(rmap->chnls);
560*4882a593Smuzhiyun kfree(rmap);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
nvm_map_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)563*4882a593Smuzhiyun static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct nvm_dev_map *dev_map = tgt_dev->map;
566*4882a593Smuzhiyun struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
567*4882a593Smuzhiyun int lun_off = ch_map->lun_offs[p->a.lun];
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun p->a.ch += ch_map->ch_off;
570*4882a593Smuzhiyun p->a.lun += lun_off;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
nvm_map_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)573*4882a593Smuzhiyun static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
576*4882a593Smuzhiyun struct nvm_dev_map *dev_rmap = dev->rmap;
577*4882a593Smuzhiyun struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
578*4882a593Smuzhiyun int lun_roff = ch_rmap->lun_offs[p->a.lun];
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun p->a.ch -= ch_rmap->ch_off;
581*4882a593Smuzhiyun p->a.lun -= lun_roff;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
nvm_ppa_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)584*4882a593Smuzhiyun static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
585*4882a593Smuzhiyun struct ppa_addr *ppa_list, int nr_ppas)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun int i;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun for (i = 0; i < nr_ppas; i++) {
590*4882a593Smuzhiyun nvm_map_to_dev(tgt_dev, &ppa_list[i]);
591*4882a593Smuzhiyun ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
nvm_ppa_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)595*4882a593Smuzhiyun static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
596*4882a593Smuzhiyun struct ppa_addr *ppa_list, int nr_ppas)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun int i;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun for (i = 0; i < nr_ppas; i++) {
601*4882a593Smuzhiyun ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
602*4882a593Smuzhiyun nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
nvm_rq_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)606*4882a593Smuzhiyun static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
nvm_rq_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)613*4882a593Smuzhiyun static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
nvm_register_tgt_type(struct nvm_tgt_type * tt)620*4882a593Smuzhiyun int nvm_register_tgt_type(struct nvm_tgt_type *tt)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun int ret = 0;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun down_write(&nvm_tgtt_lock);
625*4882a593Smuzhiyun if (__nvm_find_target_type(tt->name))
626*4882a593Smuzhiyun ret = -EEXIST;
627*4882a593Smuzhiyun else
628*4882a593Smuzhiyun list_add(&tt->list, &nvm_tgt_types);
629*4882a593Smuzhiyun up_write(&nvm_tgtt_lock);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return ret;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_register_tgt_type);
634*4882a593Smuzhiyun
nvm_unregister_tgt_type(struct nvm_tgt_type * tt)635*4882a593Smuzhiyun void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun if (!tt)
638*4882a593Smuzhiyun return;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun down_write(&nvm_tgtt_lock);
641*4882a593Smuzhiyun list_del(&tt->list);
642*4882a593Smuzhiyun up_write(&nvm_tgtt_lock);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_unregister_tgt_type);
645*4882a593Smuzhiyun
nvm_dev_dma_alloc(struct nvm_dev * dev,gfp_t mem_flags,dma_addr_t * dma_handler)646*4882a593Smuzhiyun void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
647*4882a593Smuzhiyun dma_addr_t *dma_handler)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
650*4882a593Smuzhiyun dma_handler);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_dev_dma_alloc);
653*4882a593Smuzhiyun
nvm_dev_dma_free(struct nvm_dev * dev,void * addr,dma_addr_t dma_handler)654*4882a593Smuzhiyun void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_dev_dma_free);
659*4882a593Smuzhiyun
nvm_find_nvm_dev(const char * name)660*4882a593Smuzhiyun static struct nvm_dev *nvm_find_nvm_dev(const char *name)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct nvm_dev *dev;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun list_for_each_entry(dev, &nvm_devices, devices)
665*4882a593Smuzhiyun if (!strcmp(name, dev->name))
666*4882a593Smuzhiyun return dev;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun return NULL;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
nvm_set_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,const struct ppa_addr * ppas,int nr_ppas)671*4882a593Smuzhiyun static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
672*4882a593Smuzhiyun const struct ppa_addr *ppas, int nr_ppas)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
675*4882a593Smuzhiyun struct nvm_geo *geo = &tgt_dev->geo;
676*4882a593Smuzhiyun int i, plane_cnt, pl_idx;
677*4882a593Smuzhiyun struct ppa_addr ppa;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
680*4882a593Smuzhiyun rqd->nr_ppas = nr_ppas;
681*4882a593Smuzhiyun rqd->ppa_addr = ppas[0];
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun return 0;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun rqd->nr_ppas = nr_ppas;
687*4882a593Smuzhiyun rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
688*4882a593Smuzhiyun if (!rqd->ppa_list) {
689*4882a593Smuzhiyun pr_err("failed to allocate dma memory\n");
690*4882a593Smuzhiyun return -ENOMEM;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun plane_cnt = geo->pln_mode;
694*4882a593Smuzhiyun rqd->nr_ppas *= plane_cnt;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun for (i = 0; i < nr_ppas; i++) {
697*4882a593Smuzhiyun for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
698*4882a593Smuzhiyun ppa = ppas[i];
699*4882a593Smuzhiyun ppa.g.pl = pl_idx;
700*4882a593Smuzhiyun rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
nvm_free_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)707*4882a593Smuzhiyun static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
708*4882a593Smuzhiyun struct nvm_rq *rqd)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun if (!rqd->ppa_list)
711*4882a593Smuzhiyun return;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
nvm_set_flags(struct nvm_geo * geo,struct nvm_rq * rqd)716*4882a593Smuzhiyun static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun int flags = 0;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (geo->version == NVM_OCSSD_SPEC_20)
721*4882a593Smuzhiyun return 0;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (rqd->is_seq)
724*4882a593Smuzhiyun flags |= geo->pln_mode >> 1;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (rqd->opcode == NVM_OP_PREAD)
727*4882a593Smuzhiyun flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
728*4882a593Smuzhiyun else if (rqd->opcode == NVM_OP_PWRITE)
729*4882a593Smuzhiyun flags |= NVM_IO_SCRAMBLE_ENABLE;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return flags;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
nvm_submit_io(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)734*4882a593Smuzhiyun int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
737*4882a593Smuzhiyun int ret;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (!dev->ops->submit_io)
740*4882a593Smuzhiyun return -ENODEV;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun nvm_rq_tgt_to_dev(tgt_dev, rqd);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun rqd->dev = tgt_dev;
745*4882a593Smuzhiyun rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* In case of error, fail with right address format */
748*4882a593Smuzhiyun ret = dev->ops->submit_io(dev, rqd, buf);
749*4882a593Smuzhiyun if (ret)
750*4882a593Smuzhiyun nvm_rq_dev_to_tgt(tgt_dev, rqd);
751*4882a593Smuzhiyun return ret;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_submit_io);
754*4882a593Smuzhiyun
nvm_sync_end_io(struct nvm_rq * rqd)755*4882a593Smuzhiyun static void nvm_sync_end_io(struct nvm_rq *rqd)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct completion *waiting = rqd->private;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun complete(waiting);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
nvm_submit_io_wait(struct nvm_dev * dev,struct nvm_rq * rqd,void * buf)762*4882a593Smuzhiyun static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
763*4882a593Smuzhiyun void *buf)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(wait);
766*4882a593Smuzhiyun int ret = 0;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun rqd->end_io = nvm_sync_end_io;
769*4882a593Smuzhiyun rqd->private = &wait;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun ret = dev->ops->submit_io(dev, rqd, buf);
772*4882a593Smuzhiyun if (ret)
773*4882a593Smuzhiyun return ret;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun wait_for_completion_io(&wait);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
nvm_submit_io_sync(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)780*4882a593Smuzhiyun int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
781*4882a593Smuzhiyun void *buf)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
784*4882a593Smuzhiyun int ret;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (!dev->ops->submit_io)
787*4882a593Smuzhiyun return -ENODEV;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun nvm_rq_tgt_to_dev(tgt_dev, rqd);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun rqd->dev = tgt_dev;
792*4882a593Smuzhiyun rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun ret = nvm_submit_io_wait(dev, rqd, buf);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun return ret;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_submit_io_sync);
799*4882a593Smuzhiyun
nvm_end_io(struct nvm_rq * rqd)800*4882a593Smuzhiyun void nvm_end_io(struct nvm_rq *rqd)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct nvm_tgt_dev *tgt_dev = rqd->dev;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* Convert address space */
805*4882a593Smuzhiyun if (tgt_dev)
806*4882a593Smuzhiyun nvm_rq_dev_to_tgt(tgt_dev, rqd);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (rqd->end_io)
809*4882a593Smuzhiyun rqd->end_io(rqd);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_end_io);
812*4882a593Smuzhiyun
nvm_submit_io_sync_raw(struct nvm_dev * dev,struct nvm_rq * rqd)813*4882a593Smuzhiyun static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun if (!dev->ops->submit_io)
816*4882a593Smuzhiyun return -ENODEV;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun rqd->dev = NULL;
819*4882a593Smuzhiyun rqd->flags = nvm_set_flags(&dev->geo, rqd);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun return nvm_submit_io_wait(dev, rqd, NULL);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
nvm_bb_chunk_sense(struct nvm_dev * dev,struct ppa_addr ppa)824*4882a593Smuzhiyun static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct nvm_rq rqd = { NULL };
827*4882a593Smuzhiyun struct bio bio;
828*4882a593Smuzhiyun struct bio_vec bio_vec;
829*4882a593Smuzhiyun struct page *page;
830*4882a593Smuzhiyun int ret;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL);
833*4882a593Smuzhiyun if (!page)
834*4882a593Smuzhiyun return -ENOMEM;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun bio_init(&bio, &bio_vec, 1);
837*4882a593Smuzhiyun bio_add_page(&bio, page, PAGE_SIZE, 0);
838*4882a593Smuzhiyun bio_set_op_attrs(&bio, REQ_OP_READ, 0);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun rqd.bio = &bio;
841*4882a593Smuzhiyun rqd.opcode = NVM_OP_PREAD;
842*4882a593Smuzhiyun rqd.is_seq = 1;
843*4882a593Smuzhiyun rqd.nr_ppas = 1;
844*4882a593Smuzhiyun rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun ret = nvm_submit_io_sync_raw(dev, &rqd);
847*4882a593Smuzhiyun __free_page(page);
848*4882a593Smuzhiyun if (ret)
849*4882a593Smuzhiyun return ret;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun return rqd.error;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /*
855*4882a593Smuzhiyun * Scans a 1.2 chunk first and last page to determine if its state.
856*4882a593Smuzhiyun * If the chunk is found to be open, also scan it to update the write
857*4882a593Smuzhiyun * pointer.
858*4882a593Smuzhiyun */
nvm_bb_chunk_scan(struct nvm_dev * dev,struct ppa_addr ppa,struct nvm_chk_meta * meta)859*4882a593Smuzhiyun static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
860*4882a593Smuzhiyun struct nvm_chk_meta *meta)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
863*4882a593Smuzhiyun int ret, pg, pl;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* sense first page */
866*4882a593Smuzhiyun ret = nvm_bb_chunk_sense(dev, ppa);
867*4882a593Smuzhiyun if (ret < 0) /* io error */
868*4882a593Smuzhiyun return ret;
869*4882a593Smuzhiyun else if (ret == 0) /* valid data */
870*4882a593Smuzhiyun meta->state = NVM_CHK_ST_OPEN;
871*4882a593Smuzhiyun else if (ret > 0) {
872*4882a593Smuzhiyun /*
873*4882a593Smuzhiyun * If empty page, the chunk is free, else it is an
874*4882a593Smuzhiyun * actual io error. In that case, mark it offline.
875*4882a593Smuzhiyun */
876*4882a593Smuzhiyun switch (ret) {
877*4882a593Smuzhiyun case NVM_RSP_ERR_EMPTYPAGE:
878*4882a593Smuzhiyun meta->state = NVM_CHK_ST_FREE;
879*4882a593Smuzhiyun return 0;
880*4882a593Smuzhiyun case NVM_RSP_ERR_FAILCRC:
881*4882a593Smuzhiyun case NVM_RSP_ERR_FAILECC:
882*4882a593Smuzhiyun case NVM_RSP_WARN_HIGHECC:
883*4882a593Smuzhiyun meta->state = NVM_CHK_ST_OPEN;
884*4882a593Smuzhiyun goto scan;
885*4882a593Smuzhiyun default:
886*4882a593Smuzhiyun return -ret; /* other io error */
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* sense last page */
891*4882a593Smuzhiyun ppa.g.pg = geo->num_pg - 1;
892*4882a593Smuzhiyun ppa.g.pl = geo->num_pln - 1;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun ret = nvm_bb_chunk_sense(dev, ppa);
895*4882a593Smuzhiyun if (ret < 0) /* io error */
896*4882a593Smuzhiyun return ret;
897*4882a593Smuzhiyun else if (ret == 0) { /* Chunk fully written */
898*4882a593Smuzhiyun meta->state = NVM_CHK_ST_CLOSED;
899*4882a593Smuzhiyun meta->wp = geo->clba;
900*4882a593Smuzhiyun return 0;
901*4882a593Smuzhiyun } else if (ret > 0) {
902*4882a593Smuzhiyun switch (ret) {
903*4882a593Smuzhiyun case NVM_RSP_ERR_EMPTYPAGE:
904*4882a593Smuzhiyun case NVM_RSP_ERR_FAILCRC:
905*4882a593Smuzhiyun case NVM_RSP_ERR_FAILECC:
906*4882a593Smuzhiyun case NVM_RSP_WARN_HIGHECC:
907*4882a593Smuzhiyun meta->state = NVM_CHK_ST_OPEN;
908*4882a593Smuzhiyun break;
909*4882a593Smuzhiyun default:
910*4882a593Smuzhiyun return -ret; /* other io error */
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun scan:
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun * chunk is open, we scan sequentially to update the write pointer.
917*4882a593Smuzhiyun * We make the assumption that targets write data across all planes
918*4882a593Smuzhiyun * before moving to the next page.
919*4882a593Smuzhiyun */
920*4882a593Smuzhiyun for (pg = 0; pg < geo->num_pg; pg++) {
921*4882a593Smuzhiyun for (pl = 0; pl < geo->num_pln; pl++) {
922*4882a593Smuzhiyun ppa.g.pg = pg;
923*4882a593Smuzhiyun ppa.g.pl = pl;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun ret = nvm_bb_chunk_sense(dev, ppa);
926*4882a593Smuzhiyun if (ret < 0) /* io error */
927*4882a593Smuzhiyun return ret;
928*4882a593Smuzhiyun else if (ret == 0) {
929*4882a593Smuzhiyun meta->wp += geo->ws_min;
930*4882a593Smuzhiyun } else if (ret > 0) {
931*4882a593Smuzhiyun switch (ret) {
932*4882a593Smuzhiyun case NVM_RSP_ERR_EMPTYPAGE:
933*4882a593Smuzhiyun return 0;
934*4882a593Smuzhiyun case NVM_RSP_ERR_FAILCRC:
935*4882a593Smuzhiyun case NVM_RSP_ERR_FAILECC:
936*4882a593Smuzhiyun case NVM_RSP_WARN_HIGHECC:
937*4882a593Smuzhiyun meta->wp += geo->ws_min;
938*4882a593Smuzhiyun break;
939*4882a593Smuzhiyun default:
940*4882a593Smuzhiyun return -ret; /* other io error */
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun return 0;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * folds a bad block list from its plane representation to its
951*4882a593Smuzhiyun * chunk representation.
952*4882a593Smuzhiyun *
953*4882a593Smuzhiyun * If any of the planes status are bad or grown bad, the chunk is marked
954*4882a593Smuzhiyun * offline. If not bad, the first plane state acts as the chunk state.
955*4882a593Smuzhiyun */
nvm_bb_to_chunk(struct nvm_dev * dev,struct ppa_addr ppa,u8 * blks,int nr_blks,struct nvm_chk_meta * meta)956*4882a593Smuzhiyun static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
957*4882a593Smuzhiyun u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
960*4882a593Smuzhiyun int ret, blk, pl, offset, blktype;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun for (blk = 0; blk < geo->num_chk; blk++) {
963*4882a593Smuzhiyun offset = blk * geo->pln_mode;
964*4882a593Smuzhiyun blktype = blks[offset];
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun for (pl = 0; pl < geo->pln_mode; pl++) {
967*4882a593Smuzhiyun if (blks[offset + pl] &
968*4882a593Smuzhiyun (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
969*4882a593Smuzhiyun blktype = blks[offset + pl];
970*4882a593Smuzhiyun break;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun ppa.g.blk = blk;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun meta->wp = 0;
977*4882a593Smuzhiyun meta->type = NVM_CHK_TP_W_SEQ;
978*4882a593Smuzhiyun meta->wi = 0;
979*4882a593Smuzhiyun meta->slba = generic_to_dev_addr(dev, ppa).ppa;
980*4882a593Smuzhiyun meta->cnlb = dev->geo.clba;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun if (blktype == NVM_BLK_T_FREE) {
983*4882a593Smuzhiyun ret = nvm_bb_chunk_scan(dev, ppa, meta);
984*4882a593Smuzhiyun if (ret)
985*4882a593Smuzhiyun return ret;
986*4882a593Smuzhiyun } else {
987*4882a593Smuzhiyun meta->state = NVM_CHK_ST_OFFLINE;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun meta++;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
nvm_get_bb_meta(struct nvm_dev * dev,sector_t slba,int nchks,struct nvm_chk_meta * meta)996*4882a593Smuzhiyun static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
997*4882a593Smuzhiyun int nchks, struct nvm_chk_meta *meta)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
1000*4882a593Smuzhiyun struct ppa_addr ppa;
1001*4882a593Smuzhiyun u8 *blks;
1002*4882a593Smuzhiyun int ch, lun, nr_blks;
1003*4882a593Smuzhiyun int ret = 0;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun ppa.ppa = slba;
1006*4882a593Smuzhiyun ppa = dev_to_generic_addr(dev, ppa);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun if (ppa.g.blk != 0)
1009*4882a593Smuzhiyun return -EINVAL;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if ((nchks % geo->num_chk) != 0)
1012*4882a593Smuzhiyun return -EINVAL;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun nr_blks = geo->num_chk * geo->pln_mode;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun blks = kmalloc(nr_blks, GFP_KERNEL);
1017*4882a593Smuzhiyun if (!blks)
1018*4882a593Smuzhiyun return -ENOMEM;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1021*4882a593Smuzhiyun for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1022*4882a593Smuzhiyun struct ppa_addr ppa_gen, ppa_dev;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (!nchks)
1025*4882a593Smuzhiyun goto done;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun ppa_gen.ppa = 0;
1028*4882a593Smuzhiyun ppa_gen.g.ch = ch;
1029*4882a593Smuzhiyun ppa_gen.g.lun = lun;
1030*4882a593Smuzhiyun ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1033*4882a593Smuzhiyun if (ret)
1034*4882a593Smuzhiyun goto done;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1037*4882a593Smuzhiyun meta);
1038*4882a593Smuzhiyun if (ret)
1039*4882a593Smuzhiyun goto done;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun meta += geo->num_chk;
1042*4882a593Smuzhiyun nchks -= geo->num_chk;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun done:
1046*4882a593Smuzhiyun kfree(blks);
1047*4882a593Smuzhiyun return ret;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
nvm_get_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr ppa,int nchks,struct nvm_chk_meta * meta)1050*4882a593Smuzhiyun int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1051*4882a593Smuzhiyun int nchks, struct nvm_chk_meta *meta)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun if (dev->geo.version == NVM_OCSSD_SPEC_12)
1058*4882a593Smuzhiyun return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1063*4882a593Smuzhiyun
nvm_set_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppas,int nr_ppas,int type)1064*4882a593Smuzhiyun int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1065*4882a593Smuzhiyun int nr_ppas, int type)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct nvm_dev *dev = tgt_dev->parent;
1068*4882a593Smuzhiyun struct nvm_rq rqd;
1069*4882a593Smuzhiyun int ret;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (dev->geo.version == NVM_OCSSD_SPEC_20)
1072*4882a593Smuzhiyun return 0;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (nr_ppas > NVM_MAX_VLBA) {
1075*4882a593Smuzhiyun pr_err("unable to update all blocks atomically\n");
1076*4882a593Smuzhiyun return -EINVAL;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun memset(&rqd, 0, sizeof(struct nvm_rq));
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1082*4882a593Smuzhiyun nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1085*4882a593Smuzhiyun nvm_free_rqd_ppalist(tgt_dev, &rqd);
1086*4882a593Smuzhiyun if (ret)
1087*4882a593Smuzhiyun return -EINVAL;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun return 0;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1092*4882a593Smuzhiyun
nvm_core_init(struct nvm_dev * dev)1093*4882a593Smuzhiyun static int nvm_core_init(struct nvm_dev *dev)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
1096*4882a593Smuzhiyun int ret;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1099*4882a593Smuzhiyun sizeof(unsigned long), GFP_KERNEL);
1100*4882a593Smuzhiyun if (!dev->lun_map)
1101*4882a593Smuzhiyun return -ENOMEM;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->area_list);
1104*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->targets);
1105*4882a593Smuzhiyun mutex_init(&dev->mlock);
1106*4882a593Smuzhiyun spin_lock_init(&dev->lock);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun ret = nvm_register_map(dev);
1109*4882a593Smuzhiyun if (ret)
1110*4882a593Smuzhiyun goto err_fmtype;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun return 0;
1113*4882a593Smuzhiyun err_fmtype:
1114*4882a593Smuzhiyun kfree(dev->lun_map);
1115*4882a593Smuzhiyun return ret;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
nvm_free(struct kref * ref)1118*4882a593Smuzhiyun static void nvm_free(struct kref *ref)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun if (dev->dma_pool)
1123*4882a593Smuzhiyun dev->ops->destroy_dma_pool(dev->dma_pool);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (dev->rmap)
1126*4882a593Smuzhiyun nvm_unregister_map(dev);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun kfree(dev->lun_map);
1129*4882a593Smuzhiyun kfree(dev);
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
nvm_init(struct nvm_dev * dev)1132*4882a593Smuzhiyun static int nvm_init(struct nvm_dev *dev)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun struct nvm_geo *geo = &dev->geo;
1135*4882a593Smuzhiyun int ret = -EINVAL;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (dev->ops->identity(dev)) {
1138*4882a593Smuzhiyun pr_err("device could not be identified\n");
1139*4882a593Smuzhiyun goto err;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1143*4882a593Smuzhiyun geo->minor_ver_id, geo->vmnt);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun ret = nvm_core_init(dev);
1146*4882a593Smuzhiyun if (ret) {
1147*4882a593Smuzhiyun pr_err("could not initialize core structures.\n");
1148*4882a593Smuzhiyun goto err;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1152*4882a593Smuzhiyun dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1153*4882a593Smuzhiyun dev->geo.num_chk, dev->geo.all_luns,
1154*4882a593Smuzhiyun dev->geo.num_ch);
1155*4882a593Smuzhiyun return 0;
1156*4882a593Smuzhiyun err:
1157*4882a593Smuzhiyun pr_err("failed to initialize nvm\n");
1158*4882a593Smuzhiyun return ret;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
nvm_alloc_dev(int node)1161*4882a593Smuzhiyun struct nvm_dev *nvm_alloc_dev(int node)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun struct nvm_dev *dev;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1166*4882a593Smuzhiyun if (dev)
1167*4882a593Smuzhiyun kref_init(&dev->ref);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun return dev;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_alloc_dev);
1172*4882a593Smuzhiyun
nvm_register(struct nvm_dev * dev)1173*4882a593Smuzhiyun int nvm_register(struct nvm_dev *dev)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun int ret, exp_pool_size;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (!dev->q || !dev->ops) {
1178*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1179*4882a593Smuzhiyun return -EINVAL;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun ret = nvm_init(dev);
1183*4882a593Smuzhiyun if (ret) {
1184*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1185*4882a593Smuzhiyun return ret;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun exp_pool_size = max_t(int, PAGE_SIZE,
1189*4882a593Smuzhiyun (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1190*4882a593Smuzhiyun exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1193*4882a593Smuzhiyun exp_pool_size);
1194*4882a593Smuzhiyun if (!dev->dma_pool) {
1195*4882a593Smuzhiyun pr_err("could not create dma pool\n");
1196*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1197*4882a593Smuzhiyun return -ENOMEM;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /* register device with a supported media manager */
1201*4882a593Smuzhiyun down_write(&nvm_lock);
1202*4882a593Smuzhiyun list_add(&dev->devices, &nvm_devices);
1203*4882a593Smuzhiyun up_write(&nvm_lock);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun return 0;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_register);
1208*4882a593Smuzhiyun
nvm_unregister(struct nvm_dev * dev)1209*4882a593Smuzhiyun void nvm_unregister(struct nvm_dev *dev)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun struct nvm_target *t, *tmp;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun mutex_lock(&dev->mlock);
1214*4882a593Smuzhiyun list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1215*4882a593Smuzhiyun if (t->dev->parent != dev)
1216*4882a593Smuzhiyun continue;
1217*4882a593Smuzhiyun __nvm_remove_target(t, false);
1218*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun mutex_unlock(&dev->mlock);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun down_write(&nvm_lock);
1223*4882a593Smuzhiyun list_del(&dev->devices);
1224*4882a593Smuzhiyun up_write(&nvm_lock);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun EXPORT_SYMBOL(nvm_unregister);
1229*4882a593Smuzhiyun
__nvm_configure_create(struct nvm_ioctl_create * create)1230*4882a593Smuzhiyun static int __nvm_configure_create(struct nvm_ioctl_create *create)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun struct nvm_dev *dev;
1233*4882a593Smuzhiyun int ret;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun down_write(&nvm_lock);
1236*4882a593Smuzhiyun dev = nvm_find_nvm_dev(create->dev);
1237*4882a593Smuzhiyun up_write(&nvm_lock);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (!dev) {
1240*4882a593Smuzhiyun pr_err("device not found\n");
1241*4882a593Smuzhiyun return -EINVAL;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun kref_get(&dev->ref);
1245*4882a593Smuzhiyun ret = nvm_create_tgt(dev, create);
1246*4882a593Smuzhiyun if (ret)
1247*4882a593Smuzhiyun kref_put(&dev->ref, nvm_free);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun return ret;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
nvm_ioctl_info(struct file * file,void __user * arg)1252*4882a593Smuzhiyun static long nvm_ioctl_info(struct file *file, void __user *arg)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun struct nvm_ioctl_info *info;
1255*4882a593Smuzhiyun struct nvm_tgt_type *tt;
1256*4882a593Smuzhiyun int tgt_iter = 0;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1259*4882a593Smuzhiyun if (IS_ERR(info))
1260*4882a593Smuzhiyun return -EFAULT;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun info->version[0] = NVM_VERSION_MAJOR;
1263*4882a593Smuzhiyun info->version[1] = NVM_VERSION_MINOR;
1264*4882a593Smuzhiyun info->version[2] = NVM_VERSION_PATCH;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun down_write(&nvm_tgtt_lock);
1267*4882a593Smuzhiyun list_for_each_entry(tt, &nvm_tgt_types, list) {
1268*4882a593Smuzhiyun struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun tgt->version[0] = tt->version[0];
1271*4882a593Smuzhiyun tgt->version[1] = tt->version[1];
1272*4882a593Smuzhiyun tgt->version[2] = tt->version[2];
1273*4882a593Smuzhiyun strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun tgt_iter++;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun info->tgtsize = tgt_iter;
1279*4882a593Smuzhiyun up_write(&nvm_tgtt_lock);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1282*4882a593Smuzhiyun kfree(info);
1283*4882a593Smuzhiyun return -EFAULT;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun kfree(info);
1287*4882a593Smuzhiyun return 0;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
nvm_ioctl_get_devices(struct file * file,void __user * arg)1290*4882a593Smuzhiyun static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun struct nvm_ioctl_get_devices *devices;
1293*4882a593Smuzhiyun struct nvm_dev *dev;
1294*4882a593Smuzhiyun int i = 0;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1297*4882a593Smuzhiyun if (!devices)
1298*4882a593Smuzhiyun return -ENOMEM;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun down_write(&nvm_lock);
1301*4882a593Smuzhiyun list_for_each_entry(dev, &nvm_devices, devices) {
1302*4882a593Smuzhiyun struct nvm_ioctl_device_info *info = &devices->info[i];
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun strlcpy(info->devname, dev->name, sizeof(info->devname));
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /* kept for compatibility */
1307*4882a593Smuzhiyun info->bmversion[0] = 1;
1308*4882a593Smuzhiyun info->bmversion[1] = 0;
1309*4882a593Smuzhiyun info->bmversion[2] = 0;
1310*4882a593Smuzhiyun strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1311*4882a593Smuzhiyun i++;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun if (i >= ARRAY_SIZE(devices->info)) {
1314*4882a593Smuzhiyun pr_err("max %zd devices can be reported.\n",
1315*4882a593Smuzhiyun ARRAY_SIZE(devices->info));
1316*4882a593Smuzhiyun break;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun up_write(&nvm_lock);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun devices->nr_devices = i;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun if (copy_to_user(arg, devices,
1324*4882a593Smuzhiyun sizeof(struct nvm_ioctl_get_devices))) {
1325*4882a593Smuzhiyun kfree(devices);
1326*4882a593Smuzhiyun return -EFAULT;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun kfree(devices);
1330*4882a593Smuzhiyun return 0;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
nvm_ioctl_dev_create(struct file * file,void __user * arg)1333*4882a593Smuzhiyun static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun struct nvm_ioctl_create create;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1338*4882a593Smuzhiyun return -EFAULT;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1341*4882a593Smuzhiyun create.conf.e.rsv != 0) {
1342*4882a593Smuzhiyun pr_err("reserved config field in use\n");
1343*4882a593Smuzhiyun return -EINVAL;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun create.dev[DISK_NAME_LEN - 1] = '\0';
1347*4882a593Smuzhiyun create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1348*4882a593Smuzhiyun create.tgtname[DISK_NAME_LEN - 1] = '\0';
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun if (create.flags != 0) {
1351*4882a593Smuzhiyun __u32 flags = create.flags;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* Check for valid flags */
1354*4882a593Smuzhiyun if (flags & NVM_TARGET_FACTORY)
1355*4882a593Smuzhiyun flags &= ~NVM_TARGET_FACTORY;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun if (flags) {
1358*4882a593Smuzhiyun pr_err("flag not supported\n");
1359*4882a593Smuzhiyun return -EINVAL;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun return __nvm_configure_create(&create);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
nvm_ioctl_dev_remove(struct file * file,void __user * arg)1366*4882a593Smuzhiyun static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun struct nvm_ioctl_remove remove;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1371*4882a593Smuzhiyun return -EFAULT;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun if (remove.flags != 0) {
1376*4882a593Smuzhiyun pr_err("no flags supported\n");
1377*4882a593Smuzhiyun return -EINVAL;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun return nvm_remove_tgt(&remove);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /* kept for compatibility reasons */
nvm_ioctl_dev_init(struct file * file,void __user * arg)1384*4882a593Smuzhiyun static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun struct nvm_ioctl_dev_init init;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1389*4882a593Smuzhiyun return -EFAULT;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun if (init.flags != 0) {
1392*4882a593Smuzhiyun pr_err("no flags supported\n");
1393*4882a593Smuzhiyun return -EINVAL;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun return 0;
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /* Kept for compatibility reasons */
nvm_ioctl_dev_factory(struct file * file,void __user * arg)1400*4882a593Smuzhiyun static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun struct nvm_ioctl_dev_factory fact;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1405*4882a593Smuzhiyun return -EFAULT;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun fact.dev[DISK_NAME_LEN - 1] = '\0';
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1410*4882a593Smuzhiyun return -EINVAL;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun return 0;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
nvm_ctl_ioctl(struct file * file,uint cmd,unsigned long arg)1415*4882a593Smuzhiyun static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
1420*4882a593Smuzhiyun return -EPERM;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun switch (cmd) {
1423*4882a593Smuzhiyun case NVM_INFO:
1424*4882a593Smuzhiyun return nvm_ioctl_info(file, argp);
1425*4882a593Smuzhiyun case NVM_GET_DEVICES:
1426*4882a593Smuzhiyun return nvm_ioctl_get_devices(file, argp);
1427*4882a593Smuzhiyun case NVM_DEV_CREATE:
1428*4882a593Smuzhiyun return nvm_ioctl_dev_create(file, argp);
1429*4882a593Smuzhiyun case NVM_DEV_REMOVE:
1430*4882a593Smuzhiyun return nvm_ioctl_dev_remove(file, argp);
1431*4882a593Smuzhiyun case NVM_DEV_INIT:
1432*4882a593Smuzhiyun return nvm_ioctl_dev_init(file, argp);
1433*4882a593Smuzhiyun case NVM_DEV_FACTORY:
1434*4882a593Smuzhiyun return nvm_ioctl_dev_factory(file, argp);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun return 0;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun static const struct file_operations _ctl_fops = {
1440*4882a593Smuzhiyun .open = nonseekable_open,
1441*4882a593Smuzhiyun .unlocked_ioctl = nvm_ctl_ioctl,
1442*4882a593Smuzhiyun .owner = THIS_MODULE,
1443*4882a593Smuzhiyun .llseek = noop_llseek,
1444*4882a593Smuzhiyun };
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun static struct miscdevice _nvm_misc = {
1447*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
1448*4882a593Smuzhiyun .name = "lightnvm",
1449*4882a593Smuzhiyun .nodename = "lightnvm/control",
1450*4882a593Smuzhiyun .fops = &_ctl_fops,
1451*4882a593Smuzhiyun };
1452*4882a593Smuzhiyun builtin_misc_device(_nvm_misc);
1453