xref: /OK3568_Linux_fs/kernel/drivers/nvdimm/nd_virtio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * virtio_pmem.c: Virtio pmem Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Discovers persistent memory range information
6*4882a593Smuzhiyun  * from host and provides a virtio based flushing
7*4882a593Smuzhiyun  * interface.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include "virtio_pmem.h"
10*4882a593Smuzhiyun #include "nd.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun  /* The interrupt handler */
virtio_pmem_host_ack(struct virtqueue * vq)13*4882a593Smuzhiyun void virtio_pmem_host_ack(struct virtqueue *vq)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	struct virtio_pmem *vpmem = vq->vdev->priv;
16*4882a593Smuzhiyun 	struct virtio_pmem_request *req_data, *req_buf;
17*4882a593Smuzhiyun 	unsigned long flags;
18*4882a593Smuzhiyun 	unsigned int len;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	spin_lock_irqsave(&vpmem->pmem_lock, flags);
21*4882a593Smuzhiyun 	while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
22*4882a593Smuzhiyun 		req_data->done = true;
23*4882a593Smuzhiyun 		wake_up(&req_data->host_acked);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 		if (!list_empty(&vpmem->req_list)) {
26*4882a593Smuzhiyun 			req_buf = list_first_entry(&vpmem->req_list,
27*4882a593Smuzhiyun 					struct virtio_pmem_request, list);
28*4882a593Smuzhiyun 			req_buf->wq_buf_avail = true;
29*4882a593Smuzhiyun 			wake_up(&req_buf->wq_buf);
30*4882a593Smuzhiyun 			list_del(&req_buf->list);
31*4882a593Smuzhiyun 		}
32*4882a593Smuzhiyun 	}
33*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun  /* The request submission function */
virtio_pmem_flush(struct nd_region * nd_region)38*4882a593Smuzhiyun static int virtio_pmem_flush(struct nd_region *nd_region)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct virtio_device *vdev = nd_region->provider_data;
41*4882a593Smuzhiyun 	struct virtio_pmem *vpmem  = vdev->priv;
42*4882a593Smuzhiyun 	struct virtio_pmem_request *req_data;
43*4882a593Smuzhiyun 	struct scatterlist *sgs[2], sg, ret;
44*4882a593Smuzhiyun 	unsigned long flags;
45*4882a593Smuzhiyun 	int err, err1;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	might_sleep();
48*4882a593Smuzhiyun 	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
49*4882a593Smuzhiyun 	if (!req_data)
50*4882a593Smuzhiyun 		return -ENOMEM;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	req_data->done = false;
53*4882a593Smuzhiyun 	init_waitqueue_head(&req_data->host_acked);
54*4882a593Smuzhiyun 	init_waitqueue_head(&req_data->wq_buf);
55*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req_data->list);
56*4882a593Smuzhiyun 	req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
57*4882a593Smuzhiyun 	sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
58*4882a593Smuzhiyun 	sgs[0] = &sg;
59*4882a593Smuzhiyun 	sg_init_one(&ret, &req_data->resp.ret, sizeof(req_data->resp));
60*4882a593Smuzhiyun 	sgs[1] = &ret;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	spin_lock_irqsave(&vpmem->pmem_lock, flags);
63*4882a593Smuzhiyun 	 /*
64*4882a593Smuzhiyun 	  * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
65*4882a593Smuzhiyun 	  * queue does not have free descriptor. We add the request
66*4882a593Smuzhiyun 	  * to req_list and wait for host_ack to wake us up when free
67*4882a593Smuzhiyun 	  * slots are available.
68*4882a593Smuzhiyun 	  */
69*4882a593Smuzhiyun 	while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
70*4882a593Smuzhiyun 					GFP_ATOMIC)) == -ENOSPC) {
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
73*4882a593Smuzhiyun 		req_data->wq_buf_avail = false;
74*4882a593Smuzhiyun 		list_add_tail(&req_data->list, &vpmem->req_list);
75*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		/* A host response results in "host_ack" getting called */
78*4882a593Smuzhiyun 		wait_event(req_data->wq_buf, req_data->wq_buf_avail);
79*4882a593Smuzhiyun 		spin_lock_irqsave(&vpmem->pmem_lock, flags);
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	err1 = virtqueue_kick(vpmem->req_vq);
82*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
83*4882a593Smuzhiyun 	/*
84*4882a593Smuzhiyun 	 * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
85*4882a593Smuzhiyun 	 * do anything about that.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	if (err || !err1) {
88*4882a593Smuzhiyun 		dev_info(&vdev->dev, "failed to send command to virtio pmem device\n");
89*4882a593Smuzhiyun 		err = -EIO;
90*4882a593Smuzhiyun 	} else {
91*4882a593Smuzhiyun 		/* A host repsonse results in "host_ack" getting called */
92*4882a593Smuzhiyun 		wait_event(req_data->host_acked, req_data->done);
93*4882a593Smuzhiyun 		err = le32_to_cpu(req_data->resp.ret);
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	kfree(req_data);
97*4882a593Smuzhiyun 	return err;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* The asynchronous flush callback function */
async_pmem_flush(struct nd_region * nd_region,struct bio * bio)101*4882a593Smuzhiyun int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	/*
104*4882a593Smuzhiyun 	 * Create child bio for asynchronous flush and chain with
105*4882a593Smuzhiyun 	 * parent bio. Otherwise directly call nd_region flush.
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	if (bio && bio->bi_iter.bi_sector != -1) {
108*4882a593Smuzhiyun 		struct bio *child = bio_alloc(GFP_ATOMIC, 0);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		if (!child)
111*4882a593Smuzhiyun 			return -ENOMEM;
112*4882a593Smuzhiyun 		bio_copy_dev(child, bio);
113*4882a593Smuzhiyun 		child->bi_opf = REQ_PREFLUSH;
114*4882a593Smuzhiyun 		child->bi_iter.bi_sector = -1;
115*4882a593Smuzhiyun 		bio_chain(child, bio);
116*4882a593Smuzhiyun 		submit_bio(child);
117*4882a593Smuzhiyun 		return 0;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 	if (virtio_pmem_flush(nd_region))
120*4882a593Smuzhiyun 		return -EIO;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return 0;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(async_pmem_flush);
125*4882a593Smuzhiyun MODULE_LICENSE("GPL");
126