xref: /OK3568_Linux_fs/kernel/drivers/remoteproc/remoteproc_virtio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Remote processor messaging transport (OMAP platform-specific bits)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2011 Texas Instruments, Inc.
6*4882a593Smuzhiyun  * Copyright (C) 2011 Google, Inc.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Ohad Ben-Cohen <ohad@wizery.com>
9*4882a593Smuzhiyun  * Brian Swetland <swetland@google.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
15*4882a593Smuzhiyun #include <linux/remoteproc.h>
16*4882a593Smuzhiyun #include <linux/virtio.h>
17*4882a593Smuzhiyun #include <linux/virtio_config.h>
18*4882a593Smuzhiyun #include <linux/virtio_ids.h>
19*4882a593Smuzhiyun #include <linux/virtio_ring.h>
20*4882a593Smuzhiyun #include <linux/err.h>
21*4882a593Smuzhiyun #include <linux/kref.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "remoteproc_internal.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* kick the remote processor, and let it know which virtqueue to poke at */
rproc_virtio_notify(struct virtqueue * vq)27*4882a593Smuzhiyun static bool rproc_virtio_notify(struct virtqueue *vq)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct rproc_vring *rvring = vq->priv;
30*4882a593Smuzhiyun 	struct rproc *rproc = rvring->rvdev->rproc;
31*4882a593Smuzhiyun 	int notifyid = rvring->notifyid;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	rproc->ops->kick(rproc, notifyid);
36*4882a593Smuzhiyun 	return true;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun  * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
41*4882a593Smuzhiyun  * @rproc: handle to the remote processor
42*4882a593Smuzhiyun  * @notifyid: index of the signalled virtqueue (unique per this @rproc)
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * This function should be called by the platform-specific rproc driver,
45*4882a593Smuzhiyun  * when the remote processor signals that a specific virtqueue has pending
46*4882a593Smuzhiyun  * messages available.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
49*4882a593Smuzhiyun  * and otherwise returns IRQ_HANDLED.
50*4882a593Smuzhiyun  */
rproc_vq_interrupt(struct rproc * rproc,int notifyid)51*4882a593Smuzhiyun irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct rproc_vring *rvring;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	rvring = idr_find(&rproc->notifyids, notifyid);
58*4882a593Smuzhiyun 	if (!rvring || !rvring->vq)
59*4882a593Smuzhiyun 		return IRQ_NONE;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return vring_interrupt(0, rvring->vq);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun EXPORT_SYMBOL(rproc_vq_interrupt);
64*4882a593Smuzhiyun 
rp_find_vq(struct virtio_device * vdev,unsigned int id,void (* callback)(struct virtqueue * vq),const char * name,bool ctx)65*4882a593Smuzhiyun static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
66*4882a593Smuzhiyun 				    unsigned int id,
67*4882a593Smuzhiyun 				    void (*callback)(struct virtqueue *vq),
68*4882a593Smuzhiyun 				    const char *name, bool ctx)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
71*4882a593Smuzhiyun 	struct rproc *rproc = vdev_to_rproc(vdev);
72*4882a593Smuzhiyun 	struct device *dev = &rproc->dev;
73*4882a593Smuzhiyun 	struct rproc_mem_entry *mem;
74*4882a593Smuzhiyun 	struct rproc_vring *rvring;
75*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
76*4882a593Smuzhiyun 	struct virtqueue *vq;
77*4882a593Smuzhiyun 	void *addr;
78*4882a593Smuzhiyun 	int len, size;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* we're temporarily limited to two virtqueues per rvdev */
81*4882a593Smuzhiyun 	if (id >= ARRAY_SIZE(rvdev->vring))
82*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (!name)
85*4882a593Smuzhiyun 		return NULL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Search allocated memory region by name */
88*4882a593Smuzhiyun 	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
89*4882a593Smuzhiyun 					  id);
90*4882a593Smuzhiyun 	if (!mem || !mem->va)
91*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	rvring = &rvdev->vring[id];
94*4882a593Smuzhiyun 	addr = mem->va;
95*4882a593Smuzhiyun 	len = rvring->len;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* zero vring */
98*4882a593Smuzhiyun 	size = vring_size(len, rvring->align);
99*4882a593Smuzhiyun 	memset(addr, 0, size);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
102*4882a593Smuzhiyun 		id, addr, len, rvring->notifyid);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * Create the new vq, and tell virtio we're not interested in
106*4882a593Smuzhiyun 	 * the 'weak' smp barriers, since we're talking with a real device.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
109*4882a593Smuzhiyun 				 addr, rproc_virtio_notify, callback, name);
110*4882a593Smuzhiyun 	if (!vq) {
111*4882a593Smuzhiyun 		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
112*4882a593Smuzhiyun 		rproc_free_vring(rvring);
113*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	rvring->vq = vq;
117*4882a593Smuzhiyun 	vq->priv = rvring;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* Update vring in resource table */
120*4882a593Smuzhiyun 	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
121*4882a593Smuzhiyun 	rsc->vring[id].da = mem->da;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return vq;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
__rproc_virtio_del_vqs(struct virtio_device * vdev)126*4882a593Smuzhiyun static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct virtqueue *vq, *n;
129*4882a593Smuzhiyun 	struct rproc_vring *rvring;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
132*4882a593Smuzhiyun 		rvring = vq->priv;
133*4882a593Smuzhiyun 		rvring->vq = NULL;
134*4882a593Smuzhiyun 		vring_del_virtqueue(vq);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
rproc_virtio_del_vqs(struct virtio_device * vdev)138*4882a593Smuzhiyun static void rproc_virtio_del_vqs(struct virtio_device *vdev)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	__rproc_virtio_del_vqs(vdev);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
rproc_virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)143*4882a593Smuzhiyun static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
144*4882a593Smuzhiyun 				 struct virtqueue *vqs[],
145*4882a593Smuzhiyun 				 vq_callback_t *callbacks[],
146*4882a593Smuzhiyun 				 const char * const names[],
147*4882a593Smuzhiyun 				 const bool * ctx,
148*4882a593Smuzhiyun 				 struct irq_affinity *desc)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	int i, ret, queue_idx = 0;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	for (i = 0; i < nvqs; ++i) {
153*4882a593Smuzhiyun 		if (!names[i]) {
154*4882a593Smuzhiyun 			vqs[i] = NULL;
155*4882a593Smuzhiyun 			continue;
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
159*4882a593Smuzhiyun 				    ctx ? ctx[i] : false);
160*4882a593Smuzhiyun 		if (IS_ERR(vqs[i])) {
161*4882a593Smuzhiyun 			ret = PTR_ERR(vqs[i]);
162*4882a593Smuzhiyun 			goto error;
163*4882a593Smuzhiyun 		}
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return 0;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun error:
169*4882a593Smuzhiyun 	__rproc_virtio_del_vqs(vdev);
170*4882a593Smuzhiyun 	return ret;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
rproc_virtio_get_status(struct virtio_device * vdev)173*4882a593Smuzhiyun static u8 rproc_virtio_get_status(struct virtio_device *vdev)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
176*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return rsc->status;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
rproc_virtio_set_status(struct virtio_device * vdev,u8 status)183*4882a593Smuzhiyun static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
186*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	rsc->status = status;
191*4882a593Smuzhiyun 	dev_dbg(&vdev->dev, "status: %d\n", status);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
rproc_virtio_reset(struct virtio_device * vdev)194*4882a593Smuzhiyun static void rproc_virtio_reset(struct virtio_device *vdev)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
197*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	rsc->status = 0;
202*4882a593Smuzhiyun 	dev_dbg(&vdev->dev, "reset !\n");
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /* provide the vdev features as retrieved from the firmware */
rproc_virtio_get_features(struct virtio_device * vdev)206*4882a593Smuzhiyun static u64 rproc_virtio_get_features(struct virtio_device *vdev)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
209*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return rsc->dfeatures;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
rproc_transport_features(struct virtio_device * vdev)216*4882a593Smuzhiyun static void rproc_transport_features(struct virtio_device *vdev)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	/*
219*4882a593Smuzhiyun 	 * Packed ring isn't enabled on remoteproc for now,
220*4882a593Smuzhiyun 	 * because remoteproc uses vring_new_virtqueue() which
221*4882a593Smuzhiyun 	 * creates virtio rings on preallocated memory.
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun 	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
rproc_virtio_finalize_features(struct virtio_device * vdev)226*4882a593Smuzhiyun static int rproc_virtio_finalize_features(struct virtio_device *vdev)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
229*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* Give virtio_ring a chance to accept features */
234*4882a593Smuzhiyun 	vring_transport_features(vdev);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* Give virtio_rproc a chance to accept features. */
237*4882a593Smuzhiyun 	rproc_transport_features(vdev);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* Make sure we don't have any features > 32 bits! */
240*4882a593Smuzhiyun 	BUG_ON((u32)vdev->features != vdev->features);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/*
243*4882a593Smuzhiyun 	 * Remember the finalized features of our vdev, and provide it
244*4882a593Smuzhiyun 	 * to the remote processor once it is powered on.
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	rsc->gfeatures = vdev->features;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
rproc_virtio_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)251*4882a593Smuzhiyun static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
252*4882a593Smuzhiyun 			     void *buf, unsigned int len)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
255*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
256*4882a593Smuzhiyun 	void *cfg;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
259*4882a593Smuzhiyun 	cfg = &rsc->vring[rsc->num_of_vrings];
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (offset + len > rsc->config_len || offset + len < len) {
262*4882a593Smuzhiyun 		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
263*4882a593Smuzhiyun 		return;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	memcpy(buf, cfg + offset, len);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
rproc_virtio_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)269*4882a593Smuzhiyun static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
270*4882a593Smuzhiyun 			     const void *buf, unsigned int len)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
273*4882a593Smuzhiyun 	struct fw_rsc_vdev *rsc;
274*4882a593Smuzhiyun 	void *cfg;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
277*4882a593Smuzhiyun 	cfg = &rsc->vring[rsc->num_of_vrings];
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (offset + len > rsc->config_len || offset + len < len) {
280*4882a593Smuzhiyun 		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
281*4882a593Smuzhiyun 		return;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	memcpy(cfg + offset, buf, len);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun static const struct virtio_config_ops rproc_virtio_config_ops = {
288*4882a593Smuzhiyun 	.get_features	= rproc_virtio_get_features,
289*4882a593Smuzhiyun 	.finalize_features = rproc_virtio_finalize_features,
290*4882a593Smuzhiyun 	.find_vqs	= rproc_virtio_find_vqs,
291*4882a593Smuzhiyun 	.del_vqs	= rproc_virtio_del_vqs,
292*4882a593Smuzhiyun 	.reset		= rproc_virtio_reset,
293*4882a593Smuzhiyun 	.set_status	= rproc_virtio_set_status,
294*4882a593Smuzhiyun 	.get_status	= rproc_virtio_get_status,
295*4882a593Smuzhiyun 	.get		= rproc_virtio_get,
296*4882a593Smuzhiyun 	.set		= rproc_virtio_set,
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun  * This function is called whenever vdev is released, and is responsible
301*4882a593Smuzhiyun  * to decrement the remote processor's refcount which was taken when vdev was
302*4882a593Smuzhiyun  * added.
303*4882a593Smuzhiyun  *
304*4882a593Smuzhiyun  * Never call this function directly; it will be called by the driver
305*4882a593Smuzhiyun  * core when needed.
306*4882a593Smuzhiyun  */
rproc_virtio_dev_release(struct device * dev)307*4882a593Smuzhiyun static void rproc_virtio_dev_release(struct device *dev)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct virtio_device *vdev = dev_to_virtio(dev);
310*4882a593Smuzhiyun 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
311*4882a593Smuzhiyun 	struct rproc *rproc = vdev_to_rproc(vdev);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	kfree(vdev);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	kref_put(&rvdev->refcount, rproc_vdev_release);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	put_device(&rproc->dev);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun  * rproc_add_virtio_dev() - register an rproc-induced virtio device
322*4882a593Smuzhiyun  * @rvdev: the remote vdev
323*4882a593Smuzhiyun  * @id: the device type identification (used to match it with a driver).
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * This function registers a virtio device. This vdev's partent is
326*4882a593Smuzhiyun  * the rproc device.
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * Returns 0 on success or an appropriate error value otherwise.
329*4882a593Smuzhiyun  */
rproc_add_virtio_dev(struct rproc_vdev * rvdev,int id)330*4882a593Smuzhiyun int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct rproc *rproc = rvdev->rproc;
333*4882a593Smuzhiyun 	struct device *dev = &rvdev->dev;
334*4882a593Smuzhiyun 	struct virtio_device *vdev;
335*4882a593Smuzhiyun 	struct rproc_mem_entry *mem;
336*4882a593Smuzhiyun 	int ret;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (rproc->ops->kick == NULL) {
339*4882a593Smuzhiyun 		ret = -EINVAL;
340*4882a593Smuzhiyun 		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
341*4882a593Smuzhiyun 		goto out;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* Try to find dedicated vdev buffer carveout */
345*4882a593Smuzhiyun 	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
346*4882a593Smuzhiyun 	if (mem) {
347*4882a593Smuzhiyun 		phys_addr_t pa;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		if (mem->of_resm_idx != -1) {
350*4882a593Smuzhiyun 			struct device_node *np = rproc->dev.parent->of_node;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 			/* Associate reserved memory to vdev device */
353*4882a593Smuzhiyun 			ret = of_reserved_mem_device_init_by_idx(dev, np,
354*4882a593Smuzhiyun 								 mem->of_resm_idx);
355*4882a593Smuzhiyun 			if (ret) {
356*4882a593Smuzhiyun 				dev_err(dev, "Can't associate reserved memory\n");
357*4882a593Smuzhiyun 				goto out;
358*4882a593Smuzhiyun 			}
359*4882a593Smuzhiyun 		} else {
360*4882a593Smuzhiyun 			if (mem->va) {
361*4882a593Smuzhiyun 				dev_warn(dev, "vdev %d buffer already mapped\n",
362*4882a593Smuzhiyun 					 rvdev->index);
363*4882a593Smuzhiyun 				pa = rproc_va_to_pa(mem->va);
364*4882a593Smuzhiyun 			} else {
365*4882a593Smuzhiyun 				/* Use dma address as carveout no memmapped yet */
366*4882a593Smuzhiyun 				pa = (phys_addr_t)mem->dma;
367*4882a593Smuzhiyun 			}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 			/* Associate vdev buffer memory pool to vdev subdev */
370*4882a593Smuzhiyun 			ret = dma_declare_coherent_memory(dev, pa,
371*4882a593Smuzhiyun 							   mem->da,
372*4882a593Smuzhiyun 							   mem->len);
373*4882a593Smuzhiyun 			if (ret < 0) {
374*4882a593Smuzhiyun 				dev_err(dev, "Failed to associate buffer\n");
375*4882a593Smuzhiyun 				goto out;
376*4882a593Smuzhiyun 			}
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	} else {
379*4882a593Smuzhiyun 		struct device_node *np = rproc->dev.parent->of_node;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		/*
382*4882a593Smuzhiyun 		 * If we don't have dedicated buffer, just attempt to re-assign
383*4882a593Smuzhiyun 		 * the reserved memory from our parent. A default memory-region
384*4882a593Smuzhiyun 		 * at index 0 from the parent's memory-regions is assigned for
385*4882a593Smuzhiyun 		 * the rvdev dev to allocate from. Failure is non-critical and
386*4882a593Smuzhiyun 		 * the allocations will fall back to global pools, so don't
387*4882a593Smuzhiyun 		 * check return value either.
388*4882a593Smuzhiyun 		 */
389*4882a593Smuzhiyun 		of_reserved_mem_device_init_by_idx(dev, np, 0);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Allocate virtio device */
393*4882a593Smuzhiyun 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
394*4882a593Smuzhiyun 	if (!vdev) {
395*4882a593Smuzhiyun 		ret = -ENOMEM;
396*4882a593Smuzhiyun 		goto out;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 	vdev->id.device	= id,
399*4882a593Smuzhiyun 	vdev->config = &rproc_virtio_config_ops,
400*4882a593Smuzhiyun 	vdev->dev.parent = dev;
401*4882a593Smuzhiyun 	vdev->dev.release = rproc_virtio_dev_release;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/*
404*4882a593Smuzhiyun 	 * We're indirectly making a non-temporary copy of the rproc pointer
405*4882a593Smuzhiyun 	 * here, because drivers probed with this vdev will indirectly
406*4882a593Smuzhiyun 	 * access the wrapping rproc.
407*4882a593Smuzhiyun 	 *
408*4882a593Smuzhiyun 	 * Therefore we must increment the rproc refcount here, and decrement
409*4882a593Smuzhiyun 	 * it _only_ when the vdev is released.
410*4882a593Smuzhiyun 	 */
411*4882a593Smuzhiyun 	get_device(&rproc->dev);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* Reference the vdev and vring allocations */
414*4882a593Smuzhiyun 	kref_get(&rvdev->refcount);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ret = register_virtio_device(vdev);
417*4882a593Smuzhiyun 	if (ret) {
418*4882a593Smuzhiyun 		put_device(&vdev->dev);
419*4882a593Smuzhiyun 		dev_err(dev, "failed to register vdev: %d\n", ret);
420*4882a593Smuzhiyun 		goto out;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun out:
426*4882a593Smuzhiyun 	return ret;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun  * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
431*4882a593Smuzhiyun  * @dev: the virtio device
432*4882a593Smuzhiyun  * @data: must be null
433*4882a593Smuzhiyun  *
434*4882a593Smuzhiyun  * This function unregisters an existing virtio device.
435*4882a593Smuzhiyun  */
rproc_remove_virtio_dev(struct device * dev,void * data)436*4882a593Smuzhiyun int rproc_remove_virtio_dev(struct device *dev, void *data)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct virtio_device *vdev = dev_to_virtio(dev);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	unregister_virtio_device(vdev);
441*4882a593Smuzhiyun 	return 0;
442*4882a593Smuzhiyun }
443