1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Rockchip Remote Processors Messaging Platform Support.
4 *
5 * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
6 * Author: Steven Liu <steven.liu@rock-chips.com>
7 */
8
9 #include <linux/delay.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/mailbox_client.h>
15 #include <linux/mailbox_controller.h>
16 #include <linux/module.h>
17 #include <linux/of_device.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/platform_device.h>
20 #include <linux/rpmsg/rockchip_rpmsg.h>
21 #include <linux/slab.h>
22 #include <linux/virtio_config.h>
23 #include <linux/virtio_ids.h>
24 #include <linux/virtio_ring.h>
25 #include <soc/rockchip/rockchip-mailbox.h>
26
27 #include "rpmsg_internal.h"
28
29 enum rk_rpmsg_chip {
30 RK3562,
31 RK3568,
32 };
33
34 struct rk_virtio_dev {
35 struct virtio_device vdev;
36 unsigned int vring[2];
37 struct virtqueue *vq[2];
38 unsigned int base_queue_id;
39 int num_of_vqs;
40 struct rk_rpmsg_dev *rpdev;
41 };
42
43 #define to_rk_rpvdev(vd) container_of(vd, struct rk_virtio_dev, vdev)
44
45 struct rk_rpmsg_dev {
46 struct platform_device *pdev;
47 enum rk_rpmsg_chip chip;
48 int vdev_nums;
49 unsigned int link_id;
50 int first_notify;
51 u32 flags;
52 struct mbox_client mbox_cl;
53 struct mbox_chan *mbox_rx_chan;
54 struct mbox_chan *mbox_tx_chan;
55 struct rk_virtio_dev *rpvdev[RPMSG_MAX_INSTANCE_NUM];
56 };
57
58 struct rk_rpmsg_vq_info {
59 u32 queue_id;
60 void *vring_addr;
61 struct rk_rpmsg_dev *rpdev;
62 };
63
rk_rpmsg_rx_callback(struct mbox_client * client,void * message)64 static void rk_rpmsg_rx_callback(struct mbox_client *client, void *message)
65 {
66 u32 link_id;
67 struct rk_virtio_dev *rpvdev;
68 struct rk_rpmsg_dev *rpdev = container_of(client, struct rk_rpmsg_dev, mbox_cl);
69 struct platform_device *pdev = rpdev->pdev;
70 struct device *dev = &pdev->dev;
71 struct rockchip_mbox_msg *rx_msg;
72
73 rx_msg = message;
74 dev_dbg(dev, "rpmsg master: receive cmd=0x%x data=0x%x\n",
75 rx_msg->cmd, rx_msg->data);
76 if (rx_msg->data != RPMSG_MBOX_MAGIC)
77 dev_err(dev, "rpmsg master: mailbox data error!\n");
78 link_id = rx_msg->cmd & 0xFFU;
79 /* TODO: only support one remote core now */
80 rpvdev = rpdev->rpvdev[0];
81 rpdev->flags |= RPMSG_REMOTE_IS_READY;
82 dev_dbg(dev, "rpmsg master: rx link_id=0x%x flag=0x%x\n", link_id, rpdev->flags);
83 vring_interrupt(0, rpvdev->vq[0]);
84 }
85
rk_rpmsg_notify(struct virtqueue * vq)86 static bool rk_rpmsg_notify(struct virtqueue *vq)
87 {
88 struct rk_rpmsg_vq_info *rpvq = vq->priv;
89 struct rk_rpmsg_dev *rpdev = rpvq->rpdev;
90 struct platform_device *pdev = rpdev->pdev;
91 struct device *dev = &pdev->dev;
92 u32 link_id;
93 int ret;
94 struct rockchip_mbox_msg tx_msg;
95
96 memset(&tx_msg, 0, sizeof(tx_msg));
97 dev_dbg(dev, "queue_id-0x%x virt_vring_addr 0x%p\n",
98 rpvq->queue_id, rpvq->vring_addr);
99
100 link_id = rpdev->link_id;
101 tx_msg.cmd = link_id & 0xFFU;
102 tx_msg.data = RPMSG_MBOX_MAGIC;
103
104 if ((rpdev->first_notify == 0) && (rpvq->queue_id % 2 == 0)) {
105 /* first_notify is used in the master init handshake phase. */
106 dev_dbg(dev, "rpmsg first_notify\n");
107 rpdev->first_notify++;
108 } else if (rpvq->queue_id % 2 == 0) {
109 /* tx done is not supported, so ignored */
110 return true;
111 }
112 ret = mbox_send_message(rpdev->mbox_tx_chan, &tx_msg);
113 if (ret < 0) {
114 dev_err(dev, "mbox send failed!\n");
115 return false;
116 }
117 mbox_chan_txdone(rpdev->mbox_tx_chan, 0);
118
119 return true;
120 }
121
rk_rpmsg_find_vq(struct virtio_device * vdev,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx)122 static struct virtqueue *rk_rpmsg_find_vq(struct virtio_device *vdev,
123 unsigned int index,
124 void (*callback)(struct virtqueue *vq),
125 const char *name,
126 bool ctx)
127 {
128 struct rk_virtio_dev *rpvdev = to_rk_rpvdev(vdev);
129 struct rk_rpmsg_dev *rpdev = rpvdev->rpdev;
130 struct platform_device *pdev = rpdev->pdev;
131 struct device *dev = &pdev->dev;
132 struct rk_rpmsg_vq_info *rpvq;
133 struct virtqueue *vq;
134 int ret;
135
136 rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
137 if (!rpvq)
138 return ERR_PTR(-ENOMEM);
139
140 rpdev->flags &= (~RPMSG_CACHED_VRING);
141 rpvq->vring_addr = (__force void *) ioremap(rpvdev->vring[index], RPMSG_VRING_SIZE);
142 if (!rpvq->vring_addr) {
143 ret = -ENOMEM;
144 goto free_rpvq;
145 }
146 dev_dbg(dev, "vring%d: phys 0x%x, virt 0x%p\n", index,
147 rpvdev->vring[index], rpvq->vring_addr);
148
149 memset_io(rpvq->vring_addr, 0, RPMSG_VRING_SIZE);
150
151 vq = vring_new_virtqueue(index, RPMSG_BUF_COUNT, RPMSG_VRING_ALIGN, vdev, true, ctx,
152 rpvq->vring_addr, rk_rpmsg_notify, callback, name);
153 if (!vq) {
154 dev_err(dev, "vring_new_virtqueue failed\n");
155 ret = -ENOMEM;
156 goto unmap_vring;
157 }
158
159 rpvdev->vq[index] = vq;
160 vq->priv = rpvq;
161
162 rpvq->queue_id = rpvdev->base_queue_id + index;
163 rpvq->rpdev = rpdev;
164
165 return vq;
166
167 unmap_vring:
168 iounmap((__force void __iomem *) rpvq->vring_addr);
169 free_rpvq:
170 kfree(rpvq);
171 return ERR_PTR(ret);
172 }
173
rk_rpmsg_get_status(struct virtio_device * vdev)174 static u8 rk_rpmsg_get_status(struct virtio_device *vdev)
175 {
176 /* TODO: */
177 return 0;
178 }
179
rk_rpmsg_set_status(struct virtio_device * vdev,u8 status)180 static void rk_rpmsg_set_status(struct virtio_device *vdev, u8 status)
181 {
182 /* TODO: */
183 }
184
rk_rpmsg_reset(struct virtio_device * vdev)185 static void rk_rpmsg_reset(struct virtio_device *vdev)
186 {
187 /* TODO: */
188 }
189
rk_rpmsg_del_vqs(struct virtio_device * vdev)190 static void rk_rpmsg_del_vqs(struct virtio_device *vdev)
191 {
192 struct virtqueue *vq, *n;
193
194 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
195 struct rk_rpmsg_vq_info *rpvq = vq->priv;
196
197 iounmap(rpvq->vring_addr);
198 vring_del_virtqueue(vq);
199 kfree(rpvq);
200 }
201 }
202
rk_rpmsg_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)203 static int rk_rpmsg_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
204 struct virtqueue *vqs[],
205 vq_callback_t *callbacks[],
206 const char * const names[],
207 const bool *ctx,
208 struct irq_affinity *desc)
209 {
210 struct rk_virtio_dev *rpvdev = to_rk_rpvdev(vdev);
211 int i, ret;
212
213 /* Each rpmsg instance has two virtqueues. vqs[0] is rvq and vqs[1] is tvq */
214 if (nvqs != 2)
215 return -EINVAL;
216
217 for (i = 0; i < nvqs; ++i) {
218 vqs[i] = rk_rpmsg_find_vq(vdev, i, callbacks[i], names[i],
219 ctx ? ctx[i] : false);
220 if (IS_ERR(vqs[i])) {
221 ret = PTR_ERR(vqs[i]);
222 goto error;
223 }
224 }
225
226 rpvdev->num_of_vqs = nvqs;
227
228 return 0;
229
230 error:
231 rk_rpmsg_del_vqs(vdev);
232
233 return ret;
234 }
235
rk_rpmsg_get_features(struct virtio_device * vdev)236 static u64 rk_rpmsg_get_features(struct virtio_device *vdev)
237 {
238 return RPMSG_VIRTIO_RPMSG_F_NS;
239 }
240
rk_rpmsg_finalize_features(struct virtio_device * vdev)241 static int rk_rpmsg_finalize_features(struct virtio_device *vdev)
242 {
243 vring_transport_features(vdev);
244
245 return 0;
246 }
247
rk_rpmsg_vdev_release(struct device * dev)248 static void rk_rpmsg_vdev_release(struct device *dev)
249 {
250 }
251
252 static struct virtio_config_ops rk_rpmsg_config_ops = {
253 .get_status = rk_rpmsg_get_status,
254 .set_status = rk_rpmsg_set_status,
255 .reset = rk_rpmsg_reset,
256 .find_vqs = rk_rpmsg_find_vqs,
257 .del_vqs = rk_rpmsg_del_vqs,
258 .get_features = rk_rpmsg_get_features,
259 .finalize_features = rk_rpmsg_finalize_features,
260 };
261
rk_set_vring_phy_buf(struct platform_device * pdev,struct rk_rpmsg_dev * rpdev,int vdev_nums)262 static int rk_set_vring_phy_buf(struct platform_device *pdev,
263 struct rk_rpmsg_dev *rpdev, int vdev_nums)
264 {
265 struct device *dev = &pdev->dev;
266 struct resource *res;
267 resource_size_t size;
268 unsigned int start, end;
269 int i, ret = 0;
270
271 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272 if (res) {
273 size = resource_size(res);
274 start = res->start;
275 end = res->start + size;
276 for (i = 0; i < vdev_nums; i++) {
277 rpdev->rpvdev[i] = devm_kzalloc(dev, sizeof(struct rk_virtio_dev),
278 GFP_KERNEL);
279 if (!rpdev->rpvdev[i])
280 return -ENOMEM;
281
282 rpdev->rpvdev[i]->vring[0] = start;
283 rpdev->rpvdev[i]->vring[1] = start + RPMSG_VRING_SIZE;
284 start += RPMSG_VRING_OVERHEAD;
285 if (start > end) {
286 dev_err(dev, "Too small memory size %x!\n", (u32)size);
287 ret = -EINVAL;
288 break;
289 }
290 }
291 } else {
292 return -ENOMEM;
293 }
294
295 return ret;
296 }
297
rockchip_rpmsg_probe(struct platform_device * pdev)298 static int rockchip_rpmsg_probe(struct platform_device *pdev)
299 {
300 struct device *dev = &pdev->dev;
301 struct rk_rpmsg_dev *rpdev = NULL;
302 struct mbox_client *cl;
303 int i, ret = 0;
304
305 rpdev = devm_kzalloc(dev, sizeof(*rpdev), GFP_KERNEL);
306 if (!rpdev)
307 return -ENOMEM;
308
309 dev_info(dev, "rockchip rpmsg platform probe.\n");
310 rpdev->pdev = pdev;
311 rpdev->chip = (enum rk_rpmsg_chip)device_get_match_data(dev);
312 rpdev->first_notify = 0;
313
314 cl = &rpdev->mbox_cl;
315 cl->dev = dev;
316 cl->rx_callback = rk_rpmsg_rx_callback;
317
318 rpdev->mbox_rx_chan = mbox_request_channel_byname(cl, "rpmsg-rx");
319 if (IS_ERR(rpdev->mbox_rx_chan)) {
320 ret = PTR_ERR(rpdev->mbox_rx_chan);
321 dev_err(dev, "failed to request mbox rx chan, ret %d\n", ret);
322 return ret;
323 }
324 rpdev->mbox_tx_chan = mbox_request_channel_byname(cl, "rpmsg-tx");
325 if (IS_ERR(rpdev->mbox_tx_chan)) {
326 ret = PTR_ERR(rpdev->mbox_tx_chan);
327 dev_err(dev, "failed to request mbox tx chan, ret %d\n", ret);
328 return ret;
329 }
330
331 ret = device_property_read_u32(dev, "rockchip,link-id", &rpdev->link_id);
332 if (ret) {
333 dev_err(dev, "failed to get link_id, ret %d\n", ret);
334 goto free_channel;
335 }
336 ret = device_property_read_u32(dev, "rockchip,vdev-nums", &rpdev->vdev_nums);
337 if (ret) {
338 dev_info(dev, "vdev-nums default 1\n");
339 rpdev->vdev_nums = 1;
340 }
341 if (rpdev->vdev_nums > RPMSG_MAX_INSTANCE_NUM) {
342 dev_err(dev, "vdev-nums exceed the max %d\n", RPMSG_MAX_INSTANCE_NUM);
343 ret = -EINVAL;
344 goto free_channel;
345 }
346
347 ret = rk_set_vring_phy_buf(pdev, rpdev, rpdev->vdev_nums);
348 if (ret) {
349 dev_err(dev, "No vring buffer.\n");
350 ret = -ENOMEM;
351 goto free_channel;
352 }
353 if (of_reserved_mem_device_init(dev)) {
354 dev_info(dev, "No shared DMA pool.\n");
355 rpdev->flags &= (~RPMSG_SHARED_DMA_POOL);
356 } else {
357 rpdev->flags |= RPMSG_SHARED_DMA_POOL;
358 }
359
360 for (i = 0; i < rpdev->vdev_nums; i++) {
361 dev_info(dev, "rpdev vdev%d: vring0 0x%x, vring1 0x%x\n",
362 i, rpdev->rpvdev[i]->vring[0], rpdev->rpvdev[i]->vring[1]);
363 rpdev->rpvdev[i]->vdev.id.device = VIRTIO_ID_RPMSG;
364 rpdev->rpvdev[i]->vdev.config = &rk_rpmsg_config_ops;
365 rpdev->rpvdev[i]->vdev.dev.parent = dev;
366 rpdev->rpvdev[i]->vdev.dev.release = rk_rpmsg_vdev_release;
367 rpdev->rpvdev[i]->base_queue_id = i * 2;
368 rpdev->rpvdev[i]->rpdev = rpdev;
369
370 ret = register_virtio_device(&rpdev->rpvdev[i]->vdev);
371 if (ret) {
372 dev_err(dev, "fail to register rpvdev: %d\n", ret);
373 goto free_reserved_mem;
374 }
375 }
376
377 platform_set_drvdata(pdev, rpdev);
378
379 return ret;
380
381 free_reserved_mem:
382 if (rpdev->flags & RPMSG_SHARED_DMA_POOL)
383 of_reserved_mem_device_release(dev);
384
385 free_channel:
386 mbox_free_channel(rpdev->mbox_rx_chan);
387 mbox_free_channel(rpdev->mbox_tx_chan);
388
389 return ret;
390 }
391
rockchip_rpmsg_remove(struct platform_device * pdev)392 static int rockchip_rpmsg_remove(struct platform_device *pdev)
393 {
394 struct rk_rpmsg_dev *rpdev = platform_get_drvdata(pdev);
395
396 mbox_free_channel(rpdev->mbox_rx_chan);
397 mbox_free_channel(rpdev->mbox_tx_chan);
398
399 return 0;
400 }
401
402 static const struct of_device_id rockchip_rpmsg_match[] = {
403 { .compatible = "rockchip,rk3562-rpmsg", .data = (void *)RK3562, },
404 { .compatible = "rockchip,rk3568-rpmsg", .data = (void *)RK3568, },
405 { /* sentinel */ },
406 };
407
408 MODULE_DEVICE_TABLE(of, rockchip_rpmsg_match);
409
410 static struct platform_driver rockchip_rpmsg_driver = {
411 .probe = rockchip_rpmsg_probe,
412 .remove = rockchip_rpmsg_remove,
413 .driver = {
414 .name = "rockchip-rpmsg",
415 .of_match_table = rockchip_rpmsg_match,
416 },
417 };
418 module_platform_driver(rockchip_rpmsg_driver);
419
420 MODULE_LICENSE("GPL");
421 MODULE_DESCRIPTION("Rockchip Remote Processors Messaging Platform Support");
422 MODULE_AUTHOR("Steven Liu <steven.liu@rock-chips.com>");
423
424