xref: /OK3568_Linux_fs/kernel/drivers/rpmsg/rockchip_rpmsg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Rockchip Remote Processors Messaging Platform Support.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
6*4882a593Smuzhiyun  * Author: Steven Liu <steven.liu@rock-chips.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/mailbox_client.h>
15*4882a593Smuzhiyun #include <linux/mailbox_controller.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/rpmsg/rockchip_rpmsg.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/virtio_config.h>
23*4882a593Smuzhiyun #include <linux/virtio_ids.h>
24*4882a593Smuzhiyun #include <linux/virtio_ring.h>
25*4882a593Smuzhiyun #include <soc/rockchip/rockchip-mailbox.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "rpmsg_internal.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun enum rk_rpmsg_chip {
30*4882a593Smuzhiyun 	RK3562,
31*4882a593Smuzhiyun 	RK3568,
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct rk_virtio_dev {
35*4882a593Smuzhiyun 	struct virtio_device vdev;
36*4882a593Smuzhiyun 	unsigned int vring[2];
37*4882a593Smuzhiyun 	struct virtqueue *vq[2];
38*4882a593Smuzhiyun 	unsigned int base_queue_id;
39*4882a593Smuzhiyun 	int num_of_vqs;
40*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define to_rk_rpvdev(vd)	container_of(vd, struct rk_virtio_dev, vdev)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct rk_rpmsg_dev {
46*4882a593Smuzhiyun 	struct platform_device *pdev;
47*4882a593Smuzhiyun 	enum rk_rpmsg_chip chip;
48*4882a593Smuzhiyun 	int vdev_nums;
49*4882a593Smuzhiyun 	unsigned int link_id;
50*4882a593Smuzhiyun 	int first_notify;
51*4882a593Smuzhiyun 	u32 flags;
52*4882a593Smuzhiyun 	struct mbox_client mbox_cl;
53*4882a593Smuzhiyun 	struct mbox_chan *mbox_rx_chan;
54*4882a593Smuzhiyun 	struct mbox_chan *mbox_tx_chan;
55*4882a593Smuzhiyun 	struct rk_virtio_dev *rpvdev[RPMSG_MAX_INSTANCE_NUM];
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct rk_rpmsg_vq_info {
59*4882a593Smuzhiyun 	u32 queue_id;
60*4882a593Smuzhiyun 	void *vring_addr;
61*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
rk_rpmsg_rx_callback(struct mbox_client * client,void * message)64*4882a593Smuzhiyun static void rk_rpmsg_rx_callback(struct mbox_client *client, void *message)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	u32 link_id;
67*4882a593Smuzhiyun 	struct rk_virtio_dev *rpvdev;
68*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev = container_of(client, struct rk_rpmsg_dev, mbox_cl);
69*4882a593Smuzhiyun 	struct platform_device *pdev = rpdev->pdev;
70*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
71*4882a593Smuzhiyun 	struct rockchip_mbox_msg *rx_msg;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	rx_msg = message;
74*4882a593Smuzhiyun 	dev_dbg(dev, "rpmsg master: receive cmd=0x%x data=0x%x\n",
75*4882a593Smuzhiyun 		rx_msg->cmd, rx_msg->data);
76*4882a593Smuzhiyun 	if (rx_msg->data != RPMSG_MBOX_MAGIC)
77*4882a593Smuzhiyun 		dev_err(dev, "rpmsg master: mailbox data error!\n");
78*4882a593Smuzhiyun 	link_id = rx_msg->cmd & 0xFFU;
79*4882a593Smuzhiyun 	/* TODO: only support one remote core now */
80*4882a593Smuzhiyun 	rpvdev = rpdev->rpvdev[0];
81*4882a593Smuzhiyun 	rpdev->flags |= RPMSG_REMOTE_IS_READY;
82*4882a593Smuzhiyun 	dev_dbg(dev, "rpmsg master: rx link_id=0x%x flag=0x%x\n", link_id, rpdev->flags);
83*4882a593Smuzhiyun 	vring_interrupt(0, rpvdev->vq[0]);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
rk_rpmsg_notify(struct virtqueue * vq)86*4882a593Smuzhiyun static bool rk_rpmsg_notify(struct virtqueue *vq)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct rk_rpmsg_vq_info *rpvq = vq->priv;
89*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev = rpvq->rpdev;
90*4882a593Smuzhiyun 	struct platform_device *pdev = rpdev->pdev;
91*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
92*4882a593Smuzhiyun 	u32 link_id;
93*4882a593Smuzhiyun 	int ret;
94*4882a593Smuzhiyun 	struct rockchip_mbox_msg tx_msg;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	memset(&tx_msg, 0, sizeof(tx_msg));
97*4882a593Smuzhiyun 	dev_dbg(dev, "queue_id-0x%x virt_vring_addr 0x%p\n",
98*4882a593Smuzhiyun 		rpvq->queue_id, rpvq->vring_addr);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	link_id = rpdev->link_id;
101*4882a593Smuzhiyun 	tx_msg.cmd = link_id & 0xFFU;
102*4882a593Smuzhiyun 	tx_msg.data = RPMSG_MBOX_MAGIC;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if ((rpdev->first_notify == 0) && (rpvq->queue_id % 2 == 0)) {
105*4882a593Smuzhiyun 		/* first_notify is used in the master init handshake phase. */
106*4882a593Smuzhiyun 		dev_dbg(dev, "rpmsg first_notify\n");
107*4882a593Smuzhiyun 		rpdev->first_notify++;
108*4882a593Smuzhiyun 	} else if (rpvq->queue_id % 2 == 0) {
109*4882a593Smuzhiyun 		/* tx done is not supported, so ignored */
110*4882a593Smuzhiyun 		return true;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 	ret = mbox_send_message(rpdev->mbox_tx_chan, &tx_msg);
113*4882a593Smuzhiyun 	if (ret < 0) {
114*4882a593Smuzhiyun 		dev_err(dev, "mbox send failed!\n");
115*4882a593Smuzhiyun 		return false;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 	mbox_chan_txdone(rpdev->mbox_tx_chan, 0);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return true;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
rk_rpmsg_find_vq(struct virtio_device * vdev,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx)122*4882a593Smuzhiyun static struct virtqueue *rk_rpmsg_find_vq(struct virtio_device *vdev,
123*4882a593Smuzhiyun 					  unsigned int index,
124*4882a593Smuzhiyun 					  void (*callback)(struct virtqueue *vq),
125*4882a593Smuzhiyun 					  const char *name,
126*4882a593Smuzhiyun 					  bool ctx)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct rk_virtio_dev *rpvdev = to_rk_rpvdev(vdev);
129*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev = rpvdev->rpdev;
130*4882a593Smuzhiyun 	struct platform_device *pdev = rpdev->pdev;
131*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
132*4882a593Smuzhiyun 	struct rk_rpmsg_vq_info *rpvq;
133*4882a593Smuzhiyun 	struct virtqueue *vq;
134*4882a593Smuzhiyun 	int ret;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
137*4882a593Smuzhiyun 	if (!rpvq)
138*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	rpdev->flags &= (~RPMSG_CACHED_VRING);
141*4882a593Smuzhiyun 	rpvq->vring_addr = (__force void *) ioremap(rpvdev->vring[index], RPMSG_VRING_SIZE);
142*4882a593Smuzhiyun 	if (!rpvq->vring_addr) {
143*4882a593Smuzhiyun 		ret = -ENOMEM;
144*4882a593Smuzhiyun 		goto free_rpvq;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 	dev_dbg(dev, "vring%d: phys 0x%x, virt 0x%p\n", index,
147*4882a593Smuzhiyun 		rpvdev->vring[index], rpvq->vring_addr);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	memset_io(rpvq->vring_addr, 0, RPMSG_VRING_SIZE);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	vq = vring_new_virtqueue(index, RPMSG_BUF_COUNT, RPMSG_VRING_ALIGN, vdev, true, ctx,
152*4882a593Smuzhiyun 				 rpvq->vring_addr, rk_rpmsg_notify, callback, name);
153*4882a593Smuzhiyun 	if (!vq) {
154*4882a593Smuzhiyun 		dev_err(dev, "vring_new_virtqueue failed\n");
155*4882a593Smuzhiyun 		ret = -ENOMEM;
156*4882a593Smuzhiyun 		goto unmap_vring;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	rpvdev->vq[index] = vq;
160*4882a593Smuzhiyun 	vq->priv = rpvq;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	rpvq->queue_id = rpvdev->base_queue_id + index;
163*4882a593Smuzhiyun 	rpvq->rpdev = rpdev;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return vq;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun unmap_vring:
168*4882a593Smuzhiyun 	iounmap((__force void __iomem *) rpvq->vring_addr);
169*4882a593Smuzhiyun free_rpvq:
170*4882a593Smuzhiyun 	kfree(rpvq);
171*4882a593Smuzhiyun 	return ERR_PTR(ret);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
rk_rpmsg_get_status(struct virtio_device * vdev)174*4882a593Smuzhiyun static u8 rk_rpmsg_get_status(struct virtio_device *vdev)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	/* TODO: */
177*4882a593Smuzhiyun 	return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
rk_rpmsg_set_status(struct virtio_device * vdev,u8 status)180*4882a593Smuzhiyun static void rk_rpmsg_set_status(struct virtio_device *vdev, u8 status)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	/* TODO: */
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
rk_rpmsg_reset(struct virtio_device * vdev)185*4882a593Smuzhiyun static void rk_rpmsg_reset(struct virtio_device *vdev)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	/* TODO: */
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
rk_rpmsg_del_vqs(struct virtio_device * vdev)190*4882a593Smuzhiyun static void rk_rpmsg_del_vqs(struct virtio_device *vdev)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct virtqueue *vq, *n;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
195*4882a593Smuzhiyun 		struct rk_rpmsg_vq_info *rpvq = vq->priv;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		iounmap(rpvq->vring_addr);
198*4882a593Smuzhiyun 		vring_del_virtqueue(vq);
199*4882a593Smuzhiyun 		kfree(rpvq);
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
rk_rpmsg_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)203*4882a593Smuzhiyun static int rk_rpmsg_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
204*4882a593Smuzhiyun 			     struct virtqueue *vqs[],
205*4882a593Smuzhiyun 			     vq_callback_t *callbacks[],
206*4882a593Smuzhiyun 			     const char * const names[],
207*4882a593Smuzhiyun 			     const bool *ctx,
208*4882a593Smuzhiyun 			     struct irq_affinity *desc)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct rk_virtio_dev *rpvdev = to_rk_rpvdev(vdev);
211*4882a593Smuzhiyun 	int i, ret;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* Each rpmsg instance has two virtqueues. vqs[0] is rvq and vqs[1] is tvq */
214*4882a593Smuzhiyun 	if (nvqs != 2)
215*4882a593Smuzhiyun 		return -EINVAL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	for (i = 0; i < nvqs; ++i) {
218*4882a593Smuzhiyun 		vqs[i] = rk_rpmsg_find_vq(vdev, i, callbacks[i], names[i],
219*4882a593Smuzhiyun 					  ctx ? ctx[i] : false);
220*4882a593Smuzhiyun 		if (IS_ERR(vqs[i])) {
221*4882a593Smuzhiyun 			ret = PTR_ERR(vqs[i]);
222*4882a593Smuzhiyun 			goto error;
223*4882a593Smuzhiyun 		}
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	rpvdev->num_of_vqs = nvqs;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return 0;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun error:
231*4882a593Smuzhiyun 	rk_rpmsg_del_vqs(vdev);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return ret;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
rk_rpmsg_get_features(struct virtio_device * vdev)236*4882a593Smuzhiyun static u64 rk_rpmsg_get_features(struct virtio_device *vdev)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return RPMSG_VIRTIO_RPMSG_F_NS;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
rk_rpmsg_finalize_features(struct virtio_device * vdev)241*4882a593Smuzhiyun static int rk_rpmsg_finalize_features(struct virtio_device *vdev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	vring_transport_features(vdev);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
rk_rpmsg_vdev_release(struct device * dev)248*4882a593Smuzhiyun static void rk_rpmsg_vdev_release(struct device *dev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun static struct virtio_config_ops rk_rpmsg_config_ops = {
253*4882a593Smuzhiyun 	.get_status	= rk_rpmsg_get_status,
254*4882a593Smuzhiyun 	.set_status	= rk_rpmsg_set_status,
255*4882a593Smuzhiyun 	.reset		= rk_rpmsg_reset,
256*4882a593Smuzhiyun 	.find_vqs	= rk_rpmsg_find_vqs,
257*4882a593Smuzhiyun 	.del_vqs	= rk_rpmsg_del_vqs,
258*4882a593Smuzhiyun 	.get_features	= rk_rpmsg_get_features,
259*4882a593Smuzhiyun 	.finalize_features = rk_rpmsg_finalize_features,
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun 
rk_set_vring_phy_buf(struct platform_device * pdev,struct rk_rpmsg_dev * rpdev,int vdev_nums)262*4882a593Smuzhiyun static int rk_set_vring_phy_buf(struct platform_device *pdev,
263*4882a593Smuzhiyun 				struct rk_rpmsg_dev *rpdev, int vdev_nums)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
266*4882a593Smuzhiyun 	struct resource *res;
267*4882a593Smuzhiyun 	resource_size_t size;
268*4882a593Smuzhiyun 	unsigned int start, end;
269*4882a593Smuzhiyun 	int i, ret = 0;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272*4882a593Smuzhiyun 	if (res) {
273*4882a593Smuzhiyun 		size = resource_size(res);
274*4882a593Smuzhiyun 		start = res->start;
275*4882a593Smuzhiyun 		end = res->start + size;
276*4882a593Smuzhiyun 		for (i = 0; i < vdev_nums; i++) {
277*4882a593Smuzhiyun 			rpdev->rpvdev[i] = devm_kzalloc(dev, sizeof(struct rk_virtio_dev),
278*4882a593Smuzhiyun 							GFP_KERNEL);
279*4882a593Smuzhiyun 			if (!rpdev->rpvdev[i])
280*4882a593Smuzhiyun 				return -ENOMEM;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 			rpdev->rpvdev[i]->vring[0] = start;
283*4882a593Smuzhiyun 			rpdev->rpvdev[i]->vring[1] = start + RPMSG_VRING_SIZE;
284*4882a593Smuzhiyun 			start += RPMSG_VRING_OVERHEAD;
285*4882a593Smuzhiyun 			if (start > end) {
286*4882a593Smuzhiyun 				dev_err(dev, "Too small memory size %x!\n", (u32)size);
287*4882a593Smuzhiyun 				ret = -EINVAL;
288*4882a593Smuzhiyun 				break;
289*4882a593Smuzhiyun 			}
290*4882a593Smuzhiyun 		}
291*4882a593Smuzhiyun 	} else {
292*4882a593Smuzhiyun 		return -ENOMEM;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
rockchip_rpmsg_probe(struct platform_device * pdev)298*4882a593Smuzhiyun static int rockchip_rpmsg_probe(struct platform_device *pdev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
301*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev = NULL;
302*4882a593Smuzhiyun 	struct mbox_client *cl;
303*4882a593Smuzhiyun 	int i, ret = 0;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	rpdev = devm_kzalloc(dev, sizeof(*rpdev), GFP_KERNEL);
306*4882a593Smuzhiyun 	if (!rpdev)
307*4882a593Smuzhiyun 		return -ENOMEM;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	dev_info(dev, "rockchip rpmsg platform probe.\n");
310*4882a593Smuzhiyun 	rpdev->pdev = pdev;
311*4882a593Smuzhiyun 	rpdev->chip = (enum rk_rpmsg_chip)device_get_match_data(dev);
312*4882a593Smuzhiyun 	rpdev->first_notify = 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	cl = &rpdev->mbox_cl;
315*4882a593Smuzhiyun 	cl->dev = dev;
316*4882a593Smuzhiyun 	cl->rx_callback = rk_rpmsg_rx_callback;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	rpdev->mbox_rx_chan = mbox_request_channel_byname(cl, "rpmsg-rx");
319*4882a593Smuzhiyun 	if (IS_ERR(rpdev->mbox_rx_chan)) {
320*4882a593Smuzhiyun 		ret = PTR_ERR(rpdev->mbox_rx_chan);
321*4882a593Smuzhiyun 		dev_err(dev, "failed to request mbox rx chan, ret %d\n", ret);
322*4882a593Smuzhiyun 		return ret;
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 	rpdev->mbox_tx_chan = mbox_request_channel_byname(cl, "rpmsg-tx");
325*4882a593Smuzhiyun 	if (IS_ERR(rpdev->mbox_tx_chan)) {
326*4882a593Smuzhiyun 		ret = PTR_ERR(rpdev->mbox_tx_chan);
327*4882a593Smuzhiyun 		dev_err(dev, "failed to request mbox tx chan, ret %d\n", ret);
328*4882a593Smuzhiyun 		return ret;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	ret = device_property_read_u32(dev, "rockchip,link-id", &rpdev->link_id);
332*4882a593Smuzhiyun 	if (ret) {
333*4882a593Smuzhiyun 		dev_err(dev, "failed to get link_id, ret %d\n", ret);
334*4882a593Smuzhiyun 		goto free_channel;
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 	ret = device_property_read_u32(dev, "rockchip,vdev-nums", &rpdev->vdev_nums);
337*4882a593Smuzhiyun 	if (ret) {
338*4882a593Smuzhiyun 		dev_info(dev, "vdev-nums default 1\n");
339*4882a593Smuzhiyun 		rpdev->vdev_nums = 1;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 	if (rpdev->vdev_nums > RPMSG_MAX_INSTANCE_NUM) {
342*4882a593Smuzhiyun 		dev_err(dev, "vdev-nums exceed the max %d\n", RPMSG_MAX_INSTANCE_NUM);
343*4882a593Smuzhiyun 		ret = -EINVAL;
344*4882a593Smuzhiyun 		goto free_channel;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	ret = rk_set_vring_phy_buf(pdev, rpdev, rpdev->vdev_nums);
348*4882a593Smuzhiyun 	if (ret) {
349*4882a593Smuzhiyun 		dev_err(dev, "No vring buffer.\n");
350*4882a593Smuzhiyun 		ret = -ENOMEM;
351*4882a593Smuzhiyun 		goto free_channel;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 	if (of_reserved_mem_device_init(dev)) {
354*4882a593Smuzhiyun 		dev_info(dev, "No shared DMA pool.\n");
355*4882a593Smuzhiyun 		rpdev->flags &= (~RPMSG_SHARED_DMA_POOL);
356*4882a593Smuzhiyun 	} else {
357*4882a593Smuzhiyun 		rpdev->flags |= RPMSG_SHARED_DMA_POOL;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	for (i = 0; i < rpdev->vdev_nums; i++) {
361*4882a593Smuzhiyun 		dev_info(dev, "rpdev vdev%d: vring0 0x%x, vring1 0x%x\n",
362*4882a593Smuzhiyun 			 i, rpdev->rpvdev[i]->vring[0], rpdev->rpvdev[i]->vring[1]);
363*4882a593Smuzhiyun 		rpdev->rpvdev[i]->vdev.id.device = VIRTIO_ID_RPMSG;
364*4882a593Smuzhiyun 		rpdev->rpvdev[i]->vdev.config = &rk_rpmsg_config_ops;
365*4882a593Smuzhiyun 		rpdev->rpvdev[i]->vdev.dev.parent = dev;
366*4882a593Smuzhiyun 		rpdev->rpvdev[i]->vdev.dev.release = rk_rpmsg_vdev_release;
367*4882a593Smuzhiyun 		rpdev->rpvdev[i]->base_queue_id = i * 2;
368*4882a593Smuzhiyun 		rpdev->rpvdev[i]->rpdev = rpdev;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		ret = register_virtio_device(&rpdev->rpvdev[i]->vdev);
371*4882a593Smuzhiyun 		if (ret) {
372*4882a593Smuzhiyun 			dev_err(dev, "fail to register rpvdev: %d\n", ret);
373*4882a593Smuzhiyun 			goto free_reserved_mem;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	platform_set_drvdata(pdev, rpdev);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun free_reserved_mem:
382*4882a593Smuzhiyun 	if (rpdev->flags & RPMSG_SHARED_DMA_POOL)
383*4882a593Smuzhiyun 		of_reserved_mem_device_release(dev);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun free_channel:
386*4882a593Smuzhiyun 	mbox_free_channel(rpdev->mbox_rx_chan);
387*4882a593Smuzhiyun 	mbox_free_channel(rpdev->mbox_tx_chan);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return ret;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
rockchip_rpmsg_remove(struct platform_device * pdev)392*4882a593Smuzhiyun static int rockchip_rpmsg_remove(struct platform_device *pdev)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct rk_rpmsg_dev *rpdev = platform_get_drvdata(pdev);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	mbox_free_channel(rpdev->mbox_rx_chan);
397*4882a593Smuzhiyun 	mbox_free_channel(rpdev->mbox_tx_chan);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun static const struct of_device_id rockchip_rpmsg_match[] = {
403*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3562-rpmsg", .data = (void *)RK3562, },
404*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3568-rpmsg", .data = (void *)RK3568, },
405*4882a593Smuzhiyun 	{ /* sentinel */ },
406*4882a593Smuzhiyun };
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rockchip_rpmsg_match);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun static struct platform_driver rockchip_rpmsg_driver = {
411*4882a593Smuzhiyun 	.probe = rockchip_rpmsg_probe,
412*4882a593Smuzhiyun 	.remove = rockchip_rpmsg_remove,
413*4882a593Smuzhiyun 	.driver = {
414*4882a593Smuzhiyun 		.name = "rockchip-rpmsg",
415*4882a593Smuzhiyun 		.of_match_table = rockchip_rpmsg_match,
416*4882a593Smuzhiyun 	},
417*4882a593Smuzhiyun };
418*4882a593Smuzhiyun module_platform_driver(rockchip_rpmsg_driver);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun MODULE_LICENSE("GPL");
421*4882a593Smuzhiyun MODULE_DESCRIPTION("Rockchip Remote Processors Messaging Platform Support");
422*4882a593Smuzhiyun MODULE_AUTHOR("Steven Liu <steven.liu@rock-chips.com>");
423*4882a593Smuzhiyun 
424