1*4882a593Smuzhiyun /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * aoeblk.c
4*4882a593Smuzhiyun * block device routines
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/hdreg.h>
9*4882a593Smuzhiyun #include <linux/blk-mq.h>
10*4882a593Smuzhiyun #include <linux/backing-dev.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/ioctl.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/ratelimit.h>
15*4882a593Smuzhiyun #include <linux/genhd.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/export.h>
19*4882a593Smuzhiyun #include <linux/moduleparam.h>
20*4882a593Smuzhiyun #include <linux/debugfs.h>
21*4882a593Smuzhiyun #include <scsi/sg.h>
22*4882a593Smuzhiyun #include "aoe.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static DEFINE_MUTEX(aoeblk_mutex);
25*4882a593Smuzhiyun static struct kmem_cache *buf_pool_cache;
26*4882a593Smuzhiyun static struct dentry *aoe_debugfs_dir;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* GPFS needs a larger value than the default. */
29*4882a593Smuzhiyun static int aoe_maxsectors;
30*4882a593Smuzhiyun module_param(aoe_maxsectors, int, 0644);
31*4882a593Smuzhiyun MODULE_PARM_DESC(aoe_maxsectors,
32*4882a593Smuzhiyun "When nonzero, set the maximum number of sectors per I/O request");
33*4882a593Smuzhiyun
aoedisk_show_state(struct device * dev,struct device_attribute * attr,char * page)34*4882a593Smuzhiyun static ssize_t aoedisk_show_state(struct device *dev,
35*4882a593Smuzhiyun struct device_attribute *attr, char *page)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct gendisk *disk = dev_to_disk(dev);
38*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE,
41*4882a593Smuzhiyun "%s%s\n",
42*4882a593Smuzhiyun (d->flags & DEVFL_UP) ? "up" : "down",
43*4882a593Smuzhiyun (d->flags & DEVFL_KICKME) ? ",kickme" :
44*4882a593Smuzhiyun (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
45*4882a593Smuzhiyun /* I'd rather see nopen exported so we can ditch closewait */
46*4882a593Smuzhiyun }
aoedisk_show_mac(struct device * dev,struct device_attribute * attr,char * page)47*4882a593Smuzhiyun static ssize_t aoedisk_show_mac(struct device *dev,
48*4882a593Smuzhiyun struct device_attribute *attr, char *page)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct gendisk *disk = dev_to_disk(dev);
51*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
52*4882a593Smuzhiyun struct aoetgt *t = d->targets[0];
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (t == NULL)
55*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE, "none\n");
56*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
57*4882a593Smuzhiyun }
aoedisk_show_netif(struct device * dev,struct device_attribute * attr,char * page)58*4882a593Smuzhiyun static ssize_t aoedisk_show_netif(struct device *dev,
59*4882a593Smuzhiyun struct device_attribute *attr, char *page)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct gendisk *disk = dev_to_disk(dev);
62*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
63*4882a593Smuzhiyun struct net_device *nds[8], **nd, **nnd, **ne;
64*4882a593Smuzhiyun struct aoetgt **t, **te;
65*4882a593Smuzhiyun struct aoeif *ifp, *e;
66*4882a593Smuzhiyun char *p;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun memset(nds, 0, sizeof nds);
69*4882a593Smuzhiyun nd = nds;
70*4882a593Smuzhiyun ne = nd + ARRAY_SIZE(nds);
71*4882a593Smuzhiyun t = d->targets;
72*4882a593Smuzhiyun te = t + d->ntargets;
73*4882a593Smuzhiyun for (; t < te && *t; t++) {
74*4882a593Smuzhiyun ifp = (*t)->ifs;
75*4882a593Smuzhiyun e = ifp + NAOEIFS;
76*4882a593Smuzhiyun for (; ifp < e && ifp->nd; ifp++) {
77*4882a593Smuzhiyun for (nnd = nds; nnd < nd; nnd++)
78*4882a593Smuzhiyun if (*nnd == ifp->nd)
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun if (nnd == nd && nd != ne)
81*4882a593Smuzhiyun *nd++ = ifp->nd;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun ne = nd;
86*4882a593Smuzhiyun nd = nds;
87*4882a593Smuzhiyun if (*nd == NULL)
88*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE, "none\n");
89*4882a593Smuzhiyun for (p = page; nd < ne; nd++)
90*4882a593Smuzhiyun p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
91*4882a593Smuzhiyun p == page ? "" : ",", (*nd)->name);
92*4882a593Smuzhiyun p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
93*4882a593Smuzhiyun return p-page;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun /* firmware version */
aoedisk_show_fwver(struct device * dev,struct device_attribute * attr,char * page)96*4882a593Smuzhiyun static ssize_t aoedisk_show_fwver(struct device *dev,
97*4882a593Smuzhiyun struct device_attribute *attr, char *page)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct gendisk *disk = dev_to_disk(dev);
100*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
103*4882a593Smuzhiyun }
aoedisk_show_payload(struct device * dev,struct device_attribute * attr,char * page)104*4882a593Smuzhiyun static ssize_t aoedisk_show_payload(struct device *dev,
105*4882a593Smuzhiyun struct device_attribute *attr, char *page)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct gendisk *disk = dev_to_disk(dev);
108*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
aoedisk_debugfs_show(struct seq_file * s,void * ignored)113*4882a593Smuzhiyun static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct aoedev *d;
116*4882a593Smuzhiyun struct aoetgt **t, **te;
117*4882a593Smuzhiyun struct aoeif *ifp, *ife;
118*4882a593Smuzhiyun unsigned long flags;
119*4882a593Smuzhiyun char c;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun d = s->private;
122*4882a593Smuzhiyun seq_printf(s, "rttavg: %d rttdev: %d\n",
123*4882a593Smuzhiyun d->rttavg >> RTTSCALE,
124*4882a593Smuzhiyun d->rttdev >> RTTDSCALE);
125*4882a593Smuzhiyun seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
126*4882a593Smuzhiyun seq_printf(s, "kicked: %ld\n", d->kicked);
127*4882a593Smuzhiyun seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
128*4882a593Smuzhiyun seq_printf(s, "ref: %ld\n", d->ref);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
131*4882a593Smuzhiyun t = d->targets;
132*4882a593Smuzhiyun te = t + d->ntargets;
133*4882a593Smuzhiyun for (; t < te && *t; t++) {
134*4882a593Smuzhiyun c = '\t';
135*4882a593Smuzhiyun seq_printf(s, "falloc: %ld\n", (*t)->falloc);
136*4882a593Smuzhiyun seq_printf(s, "ffree: %p\n",
137*4882a593Smuzhiyun list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
138*4882a593Smuzhiyun seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
139*4882a593Smuzhiyun (*t)->maxout, (*t)->nframes);
140*4882a593Smuzhiyun seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
141*4882a593Smuzhiyun seq_printf(s, "\ttaint:%d\n", (*t)->taint);
142*4882a593Smuzhiyun seq_printf(s, "\tr:%d\n", (*t)->rpkts);
143*4882a593Smuzhiyun seq_printf(s, "\tw:%d\n", (*t)->wpkts);
144*4882a593Smuzhiyun ifp = (*t)->ifs;
145*4882a593Smuzhiyun ife = ifp + ARRAY_SIZE((*t)->ifs);
146*4882a593Smuzhiyun for (; ifp->nd && ifp < ife; ifp++) {
147*4882a593Smuzhiyun seq_printf(s, "%c%s", c, ifp->nd->name);
148*4882a593Smuzhiyun c = ',';
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun seq_puts(s, "\n");
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
aoe_debugfs_open(struct inode * inode,struct file * file)157*4882a593Smuzhiyun static int aoe_debugfs_open(struct inode *inode, struct file *file)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return single_open(file, aoedisk_debugfs_show, inode->i_private);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
163*4882a593Smuzhiyun static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
164*4882a593Smuzhiyun static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
165*4882a593Smuzhiyun static struct device_attribute dev_attr_firmware_version = {
166*4882a593Smuzhiyun .attr = { .name = "firmware-version", .mode = 0444 },
167*4882a593Smuzhiyun .show = aoedisk_show_fwver,
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static struct attribute *aoe_attrs[] = {
172*4882a593Smuzhiyun &dev_attr_state.attr,
173*4882a593Smuzhiyun &dev_attr_mac.attr,
174*4882a593Smuzhiyun &dev_attr_netif.attr,
175*4882a593Smuzhiyun &dev_attr_firmware_version.attr,
176*4882a593Smuzhiyun &dev_attr_payload.attr,
177*4882a593Smuzhiyun NULL,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun static const struct attribute_group aoe_attr_group = {
181*4882a593Smuzhiyun .attrs = aoe_attrs,
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static const struct attribute_group *aoe_attr_groups[] = {
185*4882a593Smuzhiyun &aoe_attr_group,
186*4882a593Smuzhiyun NULL,
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun static const struct file_operations aoe_debugfs_fops = {
190*4882a593Smuzhiyun .open = aoe_debugfs_open,
191*4882a593Smuzhiyun .read = seq_read,
192*4882a593Smuzhiyun .llseek = seq_lseek,
193*4882a593Smuzhiyun .release = single_release,
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun static void
aoedisk_add_debugfs(struct aoedev * d)197*4882a593Smuzhiyun aoedisk_add_debugfs(struct aoedev *d)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun char *p;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (aoe_debugfs_dir == NULL)
202*4882a593Smuzhiyun return;
203*4882a593Smuzhiyun p = strchr(d->gd->disk_name, '/');
204*4882a593Smuzhiyun if (p == NULL)
205*4882a593Smuzhiyun p = d->gd->disk_name;
206*4882a593Smuzhiyun else
207*4882a593Smuzhiyun p++;
208*4882a593Smuzhiyun BUG_ON(*p == '\0');
209*4882a593Smuzhiyun d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
210*4882a593Smuzhiyun &aoe_debugfs_fops);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun void
aoedisk_rm_debugfs(struct aoedev * d)213*4882a593Smuzhiyun aoedisk_rm_debugfs(struct aoedev *d)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun debugfs_remove(d->debugfs);
216*4882a593Smuzhiyun d->debugfs = NULL;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun static int
aoeblk_open(struct block_device * bdev,fmode_t mode)220*4882a593Smuzhiyun aoeblk_open(struct block_device *bdev, fmode_t mode)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct aoedev *d = bdev->bd_disk->private_data;
223*4882a593Smuzhiyun ulong flags;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (!virt_addr_valid(d)) {
226*4882a593Smuzhiyun pr_crit("aoe: invalid device pointer in %s\n",
227*4882a593Smuzhiyun __func__);
228*4882a593Smuzhiyun WARN_ON(1);
229*4882a593Smuzhiyun return -ENODEV;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
232*4882a593Smuzhiyun return -ENODEV;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun mutex_lock(&aoeblk_mutex);
235*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
236*4882a593Smuzhiyun if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
237*4882a593Smuzhiyun d->nopen++;
238*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
239*4882a593Smuzhiyun mutex_unlock(&aoeblk_mutex);
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
243*4882a593Smuzhiyun mutex_unlock(&aoeblk_mutex);
244*4882a593Smuzhiyun return -ENODEV;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static void
aoeblk_release(struct gendisk * disk,fmode_t mode)248*4882a593Smuzhiyun aoeblk_release(struct gendisk *disk, fmode_t mode)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct aoedev *d = disk->private_data;
251*4882a593Smuzhiyun ulong flags;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (--d->nopen == 0) {
256*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
257*4882a593Smuzhiyun aoecmd_cfg(d->aoemajor, d->aoeminor);
258*4882a593Smuzhiyun return;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
aoeblk_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)263*4882a593Smuzhiyun static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
264*4882a593Smuzhiyun const struct blk_mq_queue_data *bd)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct aoedev *d = hctx->queue->queuedata;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun spin_lock_irq(&d->lock);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if ((d->flags & DEVFL_UP) == 0) {
271*4882a593Smuzhiyun pr_info_ratelimited("aoe: device %ld.%d is not up\n",
272*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
273*4882a593Smuzhiyun spin_unlock_irq(&d->lock);
274*4882a593Smuzhiyun blk_mq_start_request(bd->rq);
275*4882a593Smuzhiyun return BLK_STS_IOERR;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun list_add_tail(&bd->rq->queuelist, &d->rq_list);
279*4882a593Smuzhiyun aoecmd_work(d);
280*4882a593Smuzhiyun spin_unlock_irq(&d->lock);
281*4882a593Smuzhiyun return BLK_STS_OK;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun static int
aoeblk_getgeo(struct block_device * bdev,struct hd_geometry * geo)285*4882a593Smuzhiyun aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct aoedev *d = bdev->bd_disk->private_data;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if ((d->flags & DEVFL_UP) == 0) {
290*4882a593Smuzhiyun printk(KERN_ERR "aoe: disk not up\n");
291*4882a593Smuzhiyun return -ENODEV;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun geo->cylinders = d->geo.cylinders;
295*4882a593Smuzhiyun geo->heads = d->geo.heads;
296*4882a593Smuzhiyun geo->sectors = d->geo.sectors;
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun static int
aoeblk_ioctl(struct block_device * bdev,fmode_t mode,uint cmd,ulong arg)301*4882a593Smuzhiyun aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct aoedev *d;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (!arg)
306*4882a593Smuzhiyun return -EINVAL;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun d = bdev->bd_disk->private_data;
309*4882a593Smuzhiyun if ((d->flags & DEVFL_UP) == 0) {
310*4882a593Smuzhiyun pr_err("aoe: disk not up\n");
311*4882a593Smuzhiyun return -ENODEV;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (cmd == HDIO_GET_IDENTITY) {
315*4882a593Smuzhiyun if (!copy_to_user((void __user *) arg, &d->ident,
316*4882a593Smuzhiyun sizeof(d->ident)))
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun return -EFAULT;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* udev calls scsi_id, which uses SG_IO, resulting in noise */
322*4882a593Smuzhiyun if (cmd != SG_IO)
323*4882a593Smuzhiyun pr_info("aoe: unknown ioctl 0x%x\n", cmd);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return -ENOTTY;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun static const struct block_device_operations aoe_bdops = {
329*4882a593Smuzhiyun .open = aoeblk_open,
330*4882a593Smuzhiyun .release = aoeblk_release,
331*4882a593Smuzhiyun .ioctl = aoeblk_ioctl,
332*4882a593Smuzhiyun .compat_ioctl = blkdev_compat_ptr_ioctl,
333*4882a593Smuzhiyun .getgeo = aoeblk_getgeo,
334*4882a593Smuzhiyun .owner = THIS_MODULE,
335*4882a593Smuzhiyun };
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun static const struct blk_mq_ops aoeblk_mq_ops = {
338*4882a593Smuzhiyun .queue_rq = aoeblk_queue_rq,
339*4882a593Smuzhiyun };
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* alloc_disk and add_disk can sleep */
342*4882a593Smuzhiyun void
aoeblk_gdalloc(void * vp)343*4882a593Smuzhiyun aoeblk_gdalloc(void *vp)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct aoedev *d = vp;
346*4882a593Smuzhiyun struct gendisk *gd;
347*4882a593Smuzhiyun mempool_t *mp;
348*4882a593Smuzhiyun struct request_queue *q;
349*4882a593Smuzhiyun struct blk_mq_tag_set *set;
350*4882a593Smuzhiyun ulong flags;
351*4882a593Smuzhiyun int late = 0;
352*4882a593Smuzhiyun int err;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
355*4882a593Smuzhiyun if (d->flags & DEVFL_GDALLOC
356*4882a593Smuzhiyun && !(d->flags & DEVFL_TKILL)
357*4882a593Smuzhiyun && !(d->flags & DEVFL_GD_NOW))
358*4882a593Smuzhiyun d->flags |= DEVFL_GD_NOW;
359*4882a593Smuzhiyun else
360*4882a593Smuzhiyun late = 1;
361*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
362*4882a593Smuzhiyun if (late)
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun gd = alloc_disk(AOE_PARTITIONS);
366*4882a593Smuzhiyun if (gd == NULL) {
367*4882a593Smuzhiyun pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
368*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
369*4882a593Smuzhiyun goto err;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
373*4882a593Smuzhiyun buf_pool_cache);
374*4882a593Smuzhiyun if (mp == NULL) {
375*4882a593Smuzhiyun printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
376*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
377*4882a593Smuzhiyun goto err_disk;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun set = &d->tag_set;
381*4882a593Smuzhiyun set->ops = &aoeblk_mq_ops;
382*4882a593Smuzhiyun set->cmd_size = sizeof(struct aoe_req);
383*4882a593Smuzhiyun set->nr_hw_queues = 1;
384*4882a593Smuzhiyun set->queue_depth = 128;
385*4882a593Smuzhiyun set->numa_node = NUMA_NO_NODE;
386*4882a593Smuzhiyun set->flags = BLK_MQ_F_SHOULD_MERGE;
387*4882a593Smuzhiyun err = blk_mq_alloc_tag_set(set);
388*4882a593Smuzhiyun if (err) {
389*4882a593Smuzhiyun pr_err("aoe: cannot allocate tag set for %ld.%d\n",
390*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
391*4882a593Smuzhiyun goto err_mempool;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun q = blk_mq_init_queue(set);
395*4882a593Smuzhiyun if (IS_ERR(q)) {
396*4882a593Smuzhiyun pr_err("aoe: cannot allocate block queue for %ld.%d\n",
397*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
398*4882a593Smuzhiyun blk_mq_free_tag_set(set);
399*4882a593Smuzhiyun goto err_mempool;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
403*4882a593Smuzhiyun WARN_ON(!(d->flags & DEVFL_GD_NOW));
404*4882a593Smuzhiyun WARN_ON(!(d->flags & DEVFL_GDALLOC));
405*4882a593Smuzhiyun WARN_ON(d->flags & DEVFL_TKILL);
406*4882a593Smuzhiyun WARN_ON(d->gd);
407*4882a593Smuzhiyun WARN_ON(d->flags & DEVFL_UP);
408*4882a593Smuzhiyun blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
409*4882a593Smuzhiyun blk_queue_io_opt(q, SZ_2M);
410*4882a593Smuzhiyun d->bufpool = mp;
411*4882a593Smuzhiyun d->blkq = gd->queue = q;
412*4882a593Smuzhiyun q->queuedata = d;
413*4882a593Smuzhiyun d->gd = gd;
414*4882a593Smuzhiyun if (aoe_maxsectors)
415*4882a593Smuzhiyun blk_queue_max_hw_sectors(q, aoe_maxsectors);
416*4882a593Smuzhiyun gd->major = AOE_MAJOR;
417*4882a593Smuzhiyun gd->first_minor = d->sysminor;
418*4882a593Smuzhiyun gd->fops = &aoe_bdops;
419*4882a593Smuzhiyun gd->private_data = d;
420*4882a593Smuzhiyun set_capacity(gd, d->ssize);
421*4882a593Smuzhiyun snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
422*4882a593Smuzhiyun d->aoemajor, d->aoeminor);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun d->flags &= ~DEVFL_GDALLOC;
425*4882a593Smuzhiyun d->flags |= DEVFL_UP;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun device_add_disk(NULL, gd, aoe_attr_groups);
430*4882a593Smuzhiyun aoedisk_add_debugfs(d);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
433*4882a593Smuzhiyun WARN_ON(!(d->flags & DEVFL_GD_NOW));
434*4882a593Smuzhiyun d->flags &= ~DEVFL_GD_NOW;
435*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
436*4882a593Smuzhiyun return;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun err_mempool:
439*4882a593Smuzhiyun mempool_destroy(mp);
440*4882a593Smuzhiyun err_disk:
441*4882a593Smuzhiyun put_disk(gd);
442*4882a593Smuzhiyun err:
443*4882a593Smuzhiyun spin_lock_irqsave(&d->lock, flags);
444*4882a593Smuzhiyun d->flags &= ~DEVFL_GD_NOW;
445*4882a593Smuzhiyun schedule_work(&d->work);
446*4882a593Smuzhiyun spin_unlock_irqrestore(&d->lock, flags);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun void
aoeblk_exit(void)450*4882a593Smuzhiyun aoeblk_exit(void)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun debugfs_remove_recursive(aoe_debugfs_dir);
453*4882a593Smuzhiyun aoe_debugfs_dir = NULL;
454*4882a593Smuzhiyun kmem_cache_destroy(buf_pool_cache);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun int __init
aoeblk_init(void)458*4882a593Smuzhiyun aoeblk_init(void)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun buf_pool_cache = kmem_cache_create("aoe_bufs",
461*4882a593Smuzhiyun sizeof(struct buf),
462*4882a593Smuzhiyun 0, 0, NULL);
463*4882a593Smuzhiyun if (buf_pool_cache == NULL)
464*4882a593Smuzhiyun return -ENOMEM;
465*4882a593Smuzhiyun aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
466*4882a593Smuzhiyun return 0;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469