xref: /OK3568_Linux_fs/kernel/drivers/s390/block/scm_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Block driver for s390 storage class memory.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corp. 2012
6*4882a593Smuzhiyun  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define KMSG_COMPONENT "scm_block"
10*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/mempool.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/blkdev.h>
17*4882a593Smuzhiyun #include <linux/blk-mq.h>
18*4882a593Smuzhiyun #include <linux/genhd.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <asm/eadm.h>
22*4882a593Smuzhiyun #include "scm_blk.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun debug_info_t *scm_debug;
25*4882a593Smuzhiyun static int scm_major;
26*4882a593Smuzhiyun static mempool_t *aidaw_pool;
27*4882a593Smuzhiyun static DEFINE_SPINLOCK(list_lock);
28*4882a593Smuzhiyun static LIST_HEAD(inactive_requests);
29*4882a593Smuzhiyun static unsigned int nr_requests = 64;
30*4882a593Smuzhiyun static unsigned int nr_requests_per_io = 8;
31*4882a593Smuzhiyun static atomic_t nr_devices = ATOMIC_INIT(0);
32*4882a593Smuzhiyun module_param(nr_requests, uint, S_IRUGO);
33*4882a593Smuzhiyun MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun module_param(nr_requests_per_io, uint, S_IRUGO);
36*4882a593Smuzhiyun MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
39*4882a593Smuzhiyun MODULE_LICENSE("GPL");
40*4882a593Smuzhiyun MODULE_ALIAS("scm:scmdev*");
41*4882a593Smuzhiyun 
__scm_free_rq(struct scm_request * scmrq)42*4882a593Smuzhiyun static void __scm_free_rq(struct scm_request *scmrq)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct aob_rq_header *aobrq = to_aobrq(scmrq);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	free_page((unsigned long) scmrq->aob);
47*4882a593Smuzhiyun 	kfree(scmrq->request);
48*4882a593Smuzhiyun 	kfree(aobrq);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
scm_free_rqs(void)51*4882a593Smuzhiyun static void scm_free_rqs(void)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct list_head *iter, *safe;
54*4882a593Smuzhiyun 	struct scm_request *scmrq;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	spin_lock_irq(&list_lock);
57*4882a593Smuzhiyun 	list_for_each_safe(iter, safe, &inactive_requests) {
58*4882a593Smuzhiyun 		scmrq = list_entry(iter, struct scm_request, list);
59*4882a593Smuzhiyun 		list_del(&scmrq->list);
60*4882a593Smuzhiyun 		__scm_free_rq(scmrq);
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 	spin_unlock_irq(&list_lock);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	mempool_destroy(aidaw_pool);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
__scm_alloc_rq(void)67*4882a593Smuzhiyun static int __scm_alloc_rq(void)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct aob_rq_header *aobrq;
70*4882a593Smuzhiyun 	struct scm_request *scmrq;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
73*4882a593Smuzhiyun 	if (!aobrq)
74*4882a593Smuzhiyun 		return -ENOMEM;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	scmrq = (void *) aobrq->data;
77*4882a593Smuzhiyun 	scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
78*4882a593Smuzhiyun 	if (!scmrq->aob)
79*4882a593Smuzhiyun 		goto free;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
82*4882a593Smuzhiyun 				 GFP_KERNEL);
83*4882a593Smuzhiyun 	if (!scmrq->request)
84*4882a593Smuzhiyun 		goto free;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	INIT_LIST_HEAD(&scmrq->list);
87*4882a593Smuzhiyun 	spin_lock_irq(&list_lock);
88*4882a593Smuzhiyun 	list_add(&scmrq->list, &inactive_requests);
89*4882a593Smuzhiyun 	spin_unlock_irq(&list_lock);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun free:
93*4882a593Smuzhiyun 	__scm_free_rq(scmrq);
94*4882a593Smuzhiyun 	return -ENOMEM;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
scm_alloc_rqs(unsigned int nrqs)97*4882a593Smuzhiyun static int scm_alloc_rqs(unsigned int nrqs)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	int ret = 0;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
102*4882a593Smuzhiyun 	if (!aidaw_pool)
103*4882a593Smuzhiyun 		return -ENOMEM;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	while (nrqs-- && !ret)
106*4882a593Smuzhiyun 		ret = __scm_alloc_rq();
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return ret;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
scm_request_fetch(void)111*4882a593Smuzhiyun static struct scm_request *scm_request_fetch(void)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct scm_request *scmrq = NULL;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	spin_lock_irq(&list_lock);
116*4882a593Smuzhiyun 	if (list_empty(&inactive_requests))
117*4882a593Smuzhiyun 		goto out;
118*4882a593Smuzhiyun 	scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
119*4882a593Smuzhiyun 	list_del(&scmrq->list);
120*4882a593Smuzhiyun out:
121*4882a593Smuzhiyun 	spin_unlock_irq(&list_lock);
122*4882a593Smuzhiyun 	return scmrq;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
scm_request_done(struct scm_request * scmrq)125*4882a593Smuzhiyun static void scm_request_done(struct scm_request *scmrq)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	unsigned long flags;
128*4882a593Smuzhiyun 	struct msb *msb;
129*4882a593Smuzhiyun 	u64 aidaw;
130*4882a593Smuzhiyun 	int i;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
133*4882a593Smuzhiyun 		msb = &scmrq->aob->msb[i];
134*4882a593Smuzhiyun 		aidaw = msb->data_addr;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
137*4882a593Smuzhiyun 		    IS_ALIGNED(aidaw, PAGE_SIZE))
138*4882a593Smuzhiyun 			mempool_free(virt_to_page(aidaw), aidaw_pool);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	spin_lock_irqsave(&list_lock, flags);
142*4882a593Smuzhiyun 	list_add(&scmrq->list, &inactive_requests);
143*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list_lock, flags);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
scm_permit_request(struct scm_blk_dev * bdev,struct request * req)146*4882a593Smuzhiyun static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
scm_aidaw_alloc(void)151*4882a593Smuzhiyun static inline struct aidaw *scm_aidaw_alloc(void)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return page ? page_address(page) : NULL;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
scm_aidaw_bytes(struct aidaw * aidaw)158*4882a593Smuzhiyun static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	unsigned long _aidaw = (unsigned long) aidaw;
161*4882a593Smuzhiyun 	unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
scm_aidaw_fetch(struct scm_request * scmrq,unsigned int bytes)166*4882a593Smuzhiyun struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct aidaw *aidaw;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
171*4882a593Smuzhiyun 		return scmrq->next_aidaw;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	aidaw = scm_aidaw_alloc();
174*4882a593Smuzhiyun 	if (aidaw)
175*4882a593Smuzhiyun 		memset(aidaw, 0, PAGE_SIZE);
176*4882a593Smuzhiyun 	return aidaw;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
scm_request_prepare(struct scm_request * scmrq)179*4882a593Smuzhiyun static int scm_request_prepare(struct scm_request *scmrq)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = scmrq->bdev;
182*4882a593Smuzhiyun 	struct scm_device *scmdev = bdev->gendisk->private_data;
183*4882a593Smuzhiyun 	int pos = scmrq->aob->request.msb_count;
184*4882a593Smuzhiyun 	struct msb *msb = &scmrq->aob->msb[pos];
185*4882a593Smuzhiyun 	struct request *req = scmrq->request[pos];
186*4882a593Smuzhiyun 	struct req_iterator iter;
187*4882a593Smuzhiyun 	struct aidaw *aidaw;
188*4882a593Smuzhiyun 	struct bio_vec bv;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
191*4882a593Smuzhiyun 	if (!aidaw)
192*4882a593Smuzhiyun 		return -ENOMEM;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	msb->bs = MSB_BS_4K;
195*4882a593Smuzhiyun 	scmrq->aob->request.msb_count++;
196*4882a593Smuzhiyun 	msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
197*4882a593Smuzhiyun 	msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
198*4882a593Smuzhiyun 	msb->flags |= MSB_FLAG_IDA;
199*4882a593Smuzhiyun 	msb->data_addr = (u64) aidaw;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	rq_for_each_segment(bv, req, iter) {
202*4882a593Smuzhiyun 		WARN_ON(bv.bv_offset);
203*4882a593Smuzhiyun 		msb->blk_count += bv.bv_len >> 12;
204*4882a593Smuzhiyun 		aidaw->data_addr = (u64) page_address(bv.bv_page);
205*4882a593Smuzhiyun 		aidaw++;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	scmrq->next_aidaw = aidaw;
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
scm_request_set(struct scm_request * scmrq,struct request * req)212*4882a593Smuzhiyun static inline void scm_request_set(struct scm_request *scmrq,
213*4882a593Smuzhiyun 				   struct request *req)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	scmrq->request[scmrq->aob->request.msb_count] = req;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
scm_request_init(struct scm_blk_dev * bdev,struct scm_request * scmrq)218*4882a593Smuzhiyun static inline void scm_request_init(struct scm_blk_dev *bdev,
219*4882a593Smuzhiyun 				    struct scm_request *scmrq)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct aob_rq_header *aobrq = to_aobrq(scmrq);
222*4882a593Smuzhiyun 	struct aob *aob = scmrq->aob;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	memset(scmrq->request, 0,
225*4882a593Smuzhiyun 	       nr_requests_per_io * sizeof(scmrq->request[0]));
226*4882a593Smuzhiyun 	memset(aob, 0, sizeof(*aob));
227*4882a593Smuzhiyun 	aobrq->scmdev = bdev->scmdev;
228*4882a593Smuzhiyun 	aob->request.cmd_code = ARQB_CMD_MOVE;
229*4882a593Smuzhiyun 	aob->request.data = (u64) aobrq;
230*4882a593Smuzhiyun 	scmrq->bdev = bdev;
231*4882a593Smuzhiyun 	scmrq->retries = 4;
232*4882a593Smuzhiyun 	scmrq->error = BLK_STS_OK;
233*4882a593Smuzhiyun 	/* We don't use all msbs - place aidaws at the end of the aob page. */
234*4882a593Smuzhiyun 	scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
scm_request_requeue(struct scm_request * scmrq)237*4882a593Smuzhiyun static void scm_request_requeue(struct scm_request *scmrq)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = scmrq->bdev;
240*4882a593Smuzhiyun 	int i;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
243*4882a593Smuzhiyun 		blk_mq_requeue_request(scmrq->request[i], false);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	atomic_dec(&bdev->queued_reqs);
246*4882a593Smuzhiyun 	scm_request_done(scmrq);
247*4882a593Smuzhiyun 	blk_mq_kick_requeue_list(bdev->rq);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
scm_request_finish(struct scm_request * scmrq)250*4882a593Smuzhiyun static void scm_request_finish(struct scm_request *scmrq)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = scmrq->bdev;
253*4882a593Smuzhiyun 	blk_status_t *error;
254*4882a593Smuzhiyun 	int i;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
257*4882a593Smuzhiyun 		error = blk_mq_rq_to_pdu(scmrq->request[i]);
258*4882a593Smuzhiyun 		*error = scmrq->error;
259*4882a593Smuzhiyun 		if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
260*4882a593Smuzhiyun 			blk_mq_complete_request(scmrq->request[i]);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	atomic_dec(&bdev->queued_reqs);
264*4882a593Smuzhiyun 	scm_request_done(scmrq);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
scm_request_start(struct scm_request * scmrq)267*4882a593Smuzhiyun static void scm_request_start(struct scm_request *scmrq)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = scmrq->bdev;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	atomic_inc(&bdev->queued_reqs);
272*4882a593Smuzhiyun 	if (eadm_start_aob(scmrq->aob)) {
273*4882a593Smuzhiyun 		SCM_LOG(5, "no subchannel");
274*4882a593Smuzhiyun 		scm_request_requeue(scmrq);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun struct scm_queue {
279*4882a593Smuzhiyun 	struct scm_request *scmrq;
280*4882a593Smuzhiyun 	spinlock_t lock;
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
scm_blk_request(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)283*4882a593Smuzhiyun static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
284*4882a593Smuzhiyun 			   const struct blk_mq_queue_data *qd)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct scm_device *scmdev = hctx->queue->queuedata;
287*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
288*4882a593Smuzhiyun 	struct scm_queue *sq = hctx->driver_data;
289*4882a593Smuzhiyun 	struct request *req = qd->rq;
290*4882a593Smuzhiyun 	struct scm_request *scmrq;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	spin_lock(&sq->lock);
293*4882a593Smuzhiyun 	if (!scm_permit_request(bdev, req)) {
294*4882a593Smuzhiyun 		spin_unlock(&sq->lock);
295*4882a593Smuzhiyun 		return BLK_STS_RESOURCE;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	scmrq = sq->scmrq;
299*4882a593Smuzhiyun 	if (!scmrq) {
300*4882a593Smuzhiyun 		scmrq = scm_request_fetch();
301*4882a593Smuzhiyun 		if (!scmrq) {
302*4882a593Smuzhiyun 			SCM_LOG(5, "no request");
303*4882a593Smuzhiyun 			spin_unlock(&sq->lock);
304*4882a593Smuzhiyun 			return BLK_STS_RESOURCE;
305*4882a593Smuzhiyun 		}
306*4882a593Smuzhiyun 		scm_request_init(bdev, scmrq);
307*4882a593Smuzhiyun 		sq->scmrq = scmrq;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	scm_request_set(scmrq, req);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (scm_request_prepare(scmrq)) {
312*4882a593Smuzhiyun 		SCM_LOG(5, "aidaw alloc failed");
313*4882a593Smuzhiyun 		scm_request_set(scmrq, NULL);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		if (scmrq->aob->request.msb_count)
316*4882a593Smuzhiyun 			scm_request_start(scmrq);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		sq->scmrq = NULL;
319*4882a593Smuzhiyun 		spin_unlock(&sq->lock);
320*4882a593Smuzhiyun 		return BLK_STS_RESOURCE;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 	blk_mq_start_request(req);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
325*4882a593Smuzhiyun 		scm_request_start(scmrq);
326*4882a593Smuzhiyun 		sq->scmrq = NULL;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	spin_unlock(&sq->lock);
329*4882a593Smuzhiyun 	return BLK_STS_OK;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
scm_blk_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int idx)332*4882a593Smuzhiyun static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
333*4882a593Smuzhiyun 			     unsigned int idx)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (!qd)
338*4882a593Smuzhiyun 		return -ENOMEM;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	spin_lock_init(&qd->lock);
341*4882a593Smuzhiyun 	hctx->driver_data = qd;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
scm_blk_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int idx)346*4882a593Smuzhiyun static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct scm_queue *qd = hctx->driver_data;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	WARN_ON(qd->scmrq);
351*4882a593Smuzhiyun 	kfree(hctx->driver_data);
352*4882a593Smuzhiyun 	hctx->driver_data = NULL;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
__scmrq_log_error(struct scm_request * scmrq)355*4882a593Smuzhiyun static void __scmrq_log_error(struct scm_request *scmrq)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct aob *aob = scmrq->aob;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (scmrq->error == BLK_STS_TIMEOUT)
360*4882a593Smuzhiyun 		SCM_LOG(1, "Request timeout");
361*4882a593Smuzhiyun 	else {
362*4882a593Smuzhiyun 		SCM_LOG(1, "Request error");
363*4882a593Smuzhiyun 		SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 	if (scmrq->retries)
366*4882a593Smuzhiyun 		SCM_LOG(1, "Retry request");
367*4882a593Smuzhiyun 	else
368*4882a593Smuzhiyun 		pr_err("An I/O operation to SCM failed with rc=%d\n",
369*4882a593Smuzhiyun 		       scmrq->error);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
scm_blk_handle_error(struct scm_request * scmrq)372*4882a593Smuzhiyun static void scm_blk_handle_error(struct scm_request *scmrq)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct scm_blk_dev *bdev = scmrq->bdev;
375*4882a593Smuzhiyun 	unsigned long flags;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (scmrq->error != BLK_STS_IOERR)
378*4882a593Smuzhiyun 		goto restart;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* For -EIO the response block is valid. */
381*4882a593Smuzhiyun 	switch (scmrq->aob->response.eqc) {
382*4882a593Smuzhiyun 	case EQC_WR_PROHIBIT:
383*4882a593Smuzhiyun 		spin_lock_irqsave(&bdev->lock, flags);
384*4882a593Smuzhiyun 		if (bdev->state != SCM_WR_PROHIBIT)
385*4882a593Smuzhiyun 			pr_info("%lx: Write access to the SCM increment is suspended\n",
386*4882a593Smuzhiyun 				(unsigned long) bdev->scmdev->address);
387*4882a593Smuzhiyun 		bdev->state = SCM_WR_PROHIBIT;
388*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bdev->lock, flags);
389*4882a593Smuzhiyun 		goto requeue;
390*4882a593Smuzhiyun 	default:
391*4882a593Smuzhiyun 		break;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun restart:
395*4882a593Smuzhiyun 	if (!eadm_start_aob(scmrq->aob))
396*4882a593Smuzhiyun 		return;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun requeue:
399*4882a593Smuzhiyun 	scm_request_requeue(scmrq);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
scm_blk_irq(struct scm_device * scmdev,void * data,blk_status_t error)402*4882a593Smuzhiyun void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct scm_request *scmrq = data;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	scmrq->error = error;
407*4882a593Smuzhiyun 	if (error) {
408*4882a593Smuzhiyun 		__scmrq_log_error(scmrq);
409*4882a593Smuzhiyun 		if (scmrq->retries-- > 0) {
410*4882a593Smuzhiyun 			scm_blk_handle_error(scmrq);
411*4882a593Smuzhiyun 			return;
412*4882a593Smuzhiyun 		}
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	scm_request_finish(scmrq);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
scm_blk_request_done(struct request * req)418*4882a593Smuzhiyun static void scm_blk_request_done(struct request *req)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	blk_status_t *error = blk_mq_rq_to_pdu(req);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	blk_mq_end_request(req, *error);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun static const struct block_device_operations scm_blk_devops = {
426*4882a593Smuzhiyun 	.owner = THIS_MODULE,
427*4882a593Smuzhiyun };
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun static const struct blk_mq_ops scm_mq_ops = {
430*4882a593Smuzhiyun 	.queue_rq = scm_blk_request,
431*4882a593Smuzhiyun 	.complete = scm_blk_request_done,
432*4882a593Smuzhiyun 	.init_hctx = scm_blk_init_hctx,
433*4882a593Smuzhiyun 	.exit_hctx = scm_blk_exit_hctx,
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun 
scm_blk_dev_setup(struct scm_blk_dev * bdev,struct scm_device * scmdev)436*4882a593Smuzhiyun int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	unsigned int devindex, nr_max_blk;
439*4882a593Smuzhiyun 	struct request_queue *rq;
440*4882a593Smuzhiyun 	int len, ret;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	devindex = atomic_inc_return(&nr_devices) - 1;
443*4882a593Smuzhiyun 	/* scma..scmz + scmaa..scmzz */
444*4882a593Smuzhiyun 	if (devindex > 701) {
445*4882a593Smuzhiyun 		ret = -ENODEV;
446*4882a593Smuzhiyun 		goto out;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	bdev->scmdev = scmdev;
450*4882a593Smuzhiyun 	bdev->state = SCM_OPER;
451*4882a593Smuzhiyun 	spin_lock_init(&bdev->lock);
452*4882a593Smuzhiyun 	atomic_set(&bdev->queued_reqs, 0);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	bdev->tag_set.ops = &scm_mq_ops;
455*4882a593Smuzhiyun 	bdev->tag_set.cmd_size = sizeof(blk_status_t);
456*4882a593Smuzhiyun 	bdev->tag_set.nr_hw_queues = nr_requests;
457*4882a593Smuzhiyun 	bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
458*4882a593Smuzhiyun 	bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
459*4882a593Smuzhiyun 	bdev->tag_set.numa_node = NUMA_NO_NODE;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	ret = blk_mq_alloc_tag_set(&bdev->tag_set);
462*4882a593Smuzhiyun 	if (ret)
463*4882a593Smuzhiyun 		goto out;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	rq = blk_mq_init_queue(&bdev->tag_set);
466*4882a593Smuzhiyun 	if (IS_ERR(rq)) {
467*4882a593Smuzhiyun 		ret = PTR_ERR(rq);
468*4882a593Smuzhiyun 		goto out_tag;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 	bdev->rq = rq;
471*4882a593Smuzhiyun 	nr_max_blk = min(scmdev->nr_max_block,
472*4882a593Smuzhiyun 			 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	blk_queue_logical_block_size(rq, 1 << 12);
475*4882a593Smuzhiyun 	blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
476*4882a593Smuzhiyun 	blk_queue_max_segments(rq, nr_max_blk);
477*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
478*4882a593Smuzhiyun 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	bdev->gendisk = alloc_disk(SCM_NR_PARTS);
481*4882a593Smuzhiyun 	if (!bdev->gendisk) {
482*4882a593Smuzhiyun 		ret = -ENOMEM;
483*4882a593Smuzhiyun 		goto out_queue;
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 	rq->queuedata = scmdev;
486*4882a593Smuzhiyun 	bdev->gendisk->private_data = scmdev;
487*4882a593Smuzhiyun 	bdev->gendisk->fops = &scm_blk_devops;
488*4882a593Smuzhiyun 	bdev->gendisk->queue = rq;
489*4882a593Smuzhiyun 	bdev->gendisk->major = scm_major;
490*4882a593Smuzhiyun 	bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
493*4882a593Smuzhiyun 	if (devindex > 25) {
494*4882a593Smuzhiyun 		len += snprintf(bdev->gendisk->disk_name + len,
495*4882a593Smuzhiyun 				DISK_NAME_LEN - len, "%c",
496*4882a593Smuzhiyun 				'a' + (devindex / 26) - 1);
497*4882a593Smuzhiyun 		devindex = devindex % 26;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 	snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
500*4882a593Smuzhiyun 		 'a' + devindex);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* 512 byte sectors */
503*4882a593Smuzhiyun 	set_capacity(bdev->gendisk, scmdev->size >> 9);
504*4882a593Smuzhiyun 	device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
505*4882a593Smuzhiyun 	return 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun out_queue:
508*4882a593Smuzhiyun 	blk_cleanup_queue(rq);
509*4882a593Smuzhiyun out_tag:
510*4882a593Smuzhiyun 	blk_mq_free_tag_set(&bdev->tag_set);
511*4882a593Smuzhiyun out:
512*4882a593Smuzhiyun 	atomic_dec(&nr_devices);
513*4882a593Smuzhiyun 	return ret;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
scm_blk_dev_cleanup(struct scm_blk_dev * bdev)516*4882a593Smuzhiyun void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	del_gendisk(bdev->gendisk);
519*4882a593Smuzhiyun 	blk_cleanup_queue(bdev->gendisk->queue);
520*4882a593Smuzhiyun 	blk_mq_free_tag_set(&bdev->tag_set);
521*4882a593Smuzhiyun 	put_disk(bdev->gendisk);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
scm_blk_set_available(struct scm_blk_dev * bdev)524*4882a593Smuzhiyun void scm_blk_set_available(struct scm_blk_dev *bdev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	unsigned long flags;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	spin_lock_irqsave(&bdev->lock, flags);
529*4882a593Smuzhiyun 	if (bdev->state == SCM_WR_PROHIBIT)
530*4882a593Smuzhiyun 		pr_info("%lx: Write access to the SCM increment is restored\n",
531*4882a593Smuzhiyun 			(unsigned long) bdev->scmdev->address);
532*4882a593Smuzhiyun 	bdev->state = SCM_OPER;
533*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bdev->lock, flags);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
scm_blk_params_valid(void)536*4882a593Smuzhiyun static bool __init scm_blk_params_valid(void)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	if (!nr_requests_per_io || nr_requests_per_io > 64)
539*4882a593Smuzhiyun 		return false;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return true;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
scm_blk_init(void)544*4882a593Smuzhiyun static int __init scm_blk_init(void)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	int ret = -EINVAL;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	if (!scm_blk_params_valid())
549*4882a593Smuzhiyun 		goto out;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	ret = register_blkdev(0, "scm");
552*4882a593Smuzhiyun 	if (ret < 0)
553*4882a593Smuzhiyun 		goto out;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	scm_major = ret;
556*4882a593Smuzhiyun 	ret = scm_alloc_rqs(nr_requests);
557*4882a593Smuzhiyun 	if (ret)
558*4882a593Smuzhiyun 		goto out_free;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	scm_debug = debug_register("scm_log", 16, 1, 16);
561*4882a593Smuzhiyun 	if (!scm_debug) {
562*4882a593Smuzhiyun 		ret = -ENOMEM;
563*4882a593Smuzhiyun 		goto out_free;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	debug_register_view(scm_debug, &debug_hex_ascii_view);
567*4882a593Smuzhiyun 	debug_set_level(scm_debug, 2);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	ret = scm_drv_init();
570*4882a593Smuzhiyun 	if (ret)
571*4882a593Smuzhiyun 		goto out_dbf;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	return ret;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun out_dbf:
576*4882a593Smuzhiyun 	debug_unregister(scm_debug);
577*4882a593Smuzhiyun out_free:
578*4882a593Smuzhiyun 	scm_free_rqs();
579*4882a593Smuzhiyun 	unregister_blkdev(scm_major, "scm");
580*4882a593Smuzhiyun out:
581*4882a593Smuzhiyun 	return ret;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun module_init(scm_blk_init);
584*4882a593Smuzhiyun 
scm_blk_cleanup(void)585*4882a593Smuzhiyun static void __exit scm_blk_cleanup(void)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	scm_drv_cleanup();
588*4882a593Smuzhiyun 	debug_unregister(scm_debug);
589*4882a593Smuzhiyun 	scm_free_rqs();
590*4882a593Smuzhiyun 	unregister_blkdev(scm_major, "scm");
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun module_exit(scm_blk_cleanup);
593