xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/rockchip-pcie-dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/fs.h>
9 #include <linux/gpio.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/kthread.h>
14 #include <linux/list.h>
15 #include <linux/miscdevice.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_gpio.h>
19 #include <linux/of_pci.h>
20 #include <linux/platform_device.h>
21 #include <linux/poll.h>
22 #include <linux/reset.h>
23 #include <linux/resource.h>
24 #include <linux/signal.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <uapi/linux/rk-pcie-dma.h>
28 
29 #include "rockchip-pcie-dma.h"
30 
31 /* dma transfer */
32 /*
33  * Write buffer format
34  * 0	     4               8	       0xc	0x10	SZ_1M
35  * ------------------------------------------------------
36  * |0x12345678|local idx(0-7)|data size|reserve	|data	|
37  * ------------------------------------------------------
38  *
39  * Byte 3-0: Receiver check if a valid data package arrived
40  * Byte 7-4: As a index for data rcv ack buffer
41  * Byte 11-8: Actual data size
42  *
43  * Data rcv ack buffer format
44  * 0		4B
45  * --------------
46  * |0xdeadbeef	|
47  * --------------
48  *
49  * Data free ack buffer format
50  * 0		4B
51  * --------------
52  * |0xcafebabe	|
53  * --------------
54  *
55  *	RC		EP
56  * -	---------	---------
57  * |	|  1MB	|	|	|
58  * |	|------	|	|	|
59  * |	|	|	|	|
60  * |	|	|	|	|
61  * 8MB	|wr buf	|  ->	|rd buf	|
62  * |	|	|	|	|
63  * |	|	|	|	|
64  * |	|	|	|	|
65  * -	---------	---------
66  * |	|  1MB	|	|	|
67  * |	|------	|	|	|
68  * |	|	|	|	|
69  * |	|	|	|	|
70  * 8MB	|rd buf	|  <-	|wr buf	|
71  * |	|	|	|	|
72  * |	|	|	|	|
73  * |	|	|	|	|
74  * -	---------	---------
75  * |	|  4B	|	|	|
76  * |	|------	|	|	|
77  * |	|	|	|	|
78  * 32B	|	|	|	|
79  * |	|scan	|  <-	|data	|
80  * |	|	|	|rcv	|
81  * |	|	|	|ack	|
82  * |	|	|	|send	|
83  * -	---------	---------
84  * |	|  4B	|	|	|
85  * |	|------	|	|	|
86  * |	|	|	|	|
87  * 32B	|data	|  ->	|scan	|
88  * |	|rcv	|	|	|
89  * |	|ack	|	|	|
90  * |	|send	|	|	|
91  * |	|	|	|	|
92  * -	---------	---------
93  * |	|  4B	|	|	|
94  * |	|------	|	|	|
95  * |	|	|	|	|
96  * 32B	|	|	|	|
97  * |	|scan	|  <-	|data	|
98  * |	|	|	|free	|
99  * |	|	|	|ack	|
100  * |	|	|	|send	|
101  * -	---------	---------
102  * |	|4B	|	|	|
103  * |	|------	|	|	|
104  * |	|	|	|	|
105  * 32B	|data	|  ->	|scan	|
106  * |	|free	|	|	|
107  * |	|ack	|	|	|
108  * |	|send	|	|	|
109  * |	|	|	|	|
110  * -	---------	---------
111  */
112 
113 #define NODE_SIZE		(sizeof(unsigned int))
114 #define PCIE_DMA_ACK_BLOCK_SIZE		(NODE_SIZE * 8)
115 
116 #define PCIE_DMA_BUF_CNT		8
117 
118 #define PCIE_DMA_DATA_CHECK		0x12345678
119 #define PCIE_DMA_DATA_ACK_CHECK		0xdeadbeef
120 #define PCIE_DMA_DATA_FREE_ACK_CHECK	0xcafebabe
121 
122 #define PCIE_DMA_PARAM_SIZE		64
123 #define PCIE_DMA_CHN0			0x0
124 
125 enum transfer_type {
126 	PCIE_DMA_DATA_SND,
127 	PCIE_DMA_DATA_RCV_ACK,
128 	PCIE_DMA_DATA_FREE_ACK,
129 	PCIE_DMA_READ_REMOTE,
130 };
131 
132 static int enable_check_sum;
133 struct pcie_misc_dev {
134 	struct miscdevice dev;
135 	struct dma_trx_obj *obj;
136 };
137 static void *rk_pcie_map_kernel(phys_addr_t start, size_t len);
138 static void rk_pcie_unmap_kernel(void *vaddr);
139 
is_rc(struct dma_trx_obj * obj)140 static inline bool is_rc(struct dma_trx_obj *obj)
141 {
142 	return (obj->busno == 0);
143 }
144 
rk_pcie_check_sum(unsigned int * src,int size)145 static unsigned int rk_pcie_check_sum(unsigned int *src, int size)
146 {
147 	unsigned int result = 0;
148 
149 	size /= sizeof(*src);
150 
151 	while (size-- > 0)
152 		result ^= *src++;
153 
154 	return result;
155 }
156 
rk_pcie_handle_dma_interrupt(struct dma_trx_obj * obj,u32 chn,enum dma_dir dir)157 static int rk_pcie_handle_dma_interrupt(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
158 {
159 	struct dma_table *cur;
160 
161 	cur = obj->cur;
162 	if (!cur) {
163 		pr_err("no pcie dma table\n");
164 		return 0;
165 	}
166 
167 	obj->dma_free = true;
168 	obj->irq_num++;
169 
170 	if (cur->dir == DMA_TO_BUS) {
171 		if (list_empty(&obj->tbl_list)) {
172 			if (obj->dma_free &&
173 			    obj->loop_count >= obj->loop_count_threshold)
174 				complete(&obj->done);
175 		}
176 	}
177 
178 	return 0;
179 }
180 
rk_pcie_prepare_dma(struct dma_trx_obj * obj,unsigned int idx,unsigned int bus_idx,unsigned int local_idx,size_t buf_size,enum transfer_type type,int chn)181 static void rk_pcie_prepare_dma(struct dma_trx_obj *obj,
182 			unsigned int idx, unsigned int bus_idx,
183 			unsigned int local_idx, size_t buf_size,
184 			enum transfer_type type, int chn)
185 {
186 	struct device *dev = obj->dev;
187 	phys_addr_t local, bus;
188 	void *virt;
189 	unsigned long flags;
190 	struct dma_table *table = NULL;
191 	unsigned int checksum;
192 
193 	switch (type) {
194 	case PCIE_DMA_DATA_SND:
195 		table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET + local_idx];
196 		table->type = PCIE_DMA_DATA_SND;
197 		table->dir = DMA_TO_BUS;
198 		local = obj->local_mem_start + local_idx * obj->buffer_size;
199 		bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
200 		virt = obj->local_mem_base + local_idx * obj->buffer_size;
201 
202 		if (obj->addr_reverse) {
203 			if (is_rc(obj)) {
204 				local += obj->rd_buf_size;
205 				virt += obj->rd_buf_size;
206 				bus += obj->wr_buf_size;
207 			}
208 		} else {
209 			if (!is_rc(obj)) {
210 				local += obj->rd_buf_size;
211 				virt += obj->rd_buf_size;
212 				bus += obj->wr_buf_size;
213 			}
214 		}
215 
216 		obj->begin = ktime_get();
217 		dma_sync_single_for_device(dev, local, buf_size, DMA_TO_DEVICE);
218 		obj->end = ktime_get();
219 
220 		obj->cache_time_total += ktime_to_ns(ktime_sub(obj->end, obj->begin));
221 
222 		writel(PCIE_DMA_DATA_CHECK, virt + obj->set_data_check_pos);
223 		writel(local_idx, virt + obj->set_local_idx_pos);
224 		writel(buf_size, virt + obj->set_buf_size_pos);
225 
226 		if (enable_check_sum) {
227 			checksum = rk_pcie_check_sum(virt, SZ_1M - 0x10);
228 			writel(checksum, virt + obj->set_chk_sum_pos);
229 		}
230 
231 		buf_size = obj->buffer_size;
232 		break;
233 	case PCIE_DMA_DATA_RCV_ACK:
234 		table = obj->table[PCIE_DMA_DATA_RCV_ACK_TABLE_OFFSET + idx];
235 		table->type = PCIE_DMA_DATA_RCV_ACK;
236 		table->dir = DMA_TO_BUS;
237 		local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
238 		virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
239 		bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
240 
241 		if (is_rc(obj)) {
242 			local += PCIE_DMA_ACK_BLOCK_SIZE;
243 			bus += PCIE_DMA_ACK_BLOCK_SIZE;
244 			virt += PCIE_DMA_ACK_BLOCK_SIZE;
245 		}
246 		writel(PCIE_DMA_DATA_ACK_CHECK, virt);
247 		break;
248 	case PCIE_DMA_DATA_FREE_ACK:
249 		table = obj->table[PCIE_DMA_DATA_FREE_ACK_TABLE_OFFSET + idx];
250 		table->type = PCIE_DMA_DATA_FREE_ACK;
251 		table->dir = DMA_TO_BUS;
252 		local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
253 		bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
254 		virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
255 		buf_size = 4;
256 
257 		if (is_rc(obj)) {
258 			local += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
259 			bus += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
260 			virt += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
261 		} else {
262 			local += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
263 			bus += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
264 			virt += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
265 		}
266 		writel(PCIE_DMA_DATA_FREE_ACK_CHECK, virt);
267 		break;
268 	case PCIE_DMA_READ_REMOTE:
269 		table = obj->table[PCIE_DMA_DATA_READ_REMOTE_TABLE_OFFSET + local_idx];
270 		table->type = PCIE_DMA_READ_REMOTE;
271 		table->dir = DMA_FROM_BUS;
272 		local = obj->local_mem_start + local_idx * obj->buffer_size;
273 		bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
274 		if (!is_rc(obj)) {
275 			local += obj->rd_buf_size;
276 			bus += obj->wr_buf_size;
277 		}
278 		buf_size = obj->buffer_size;
279 		break;
280 	default:
281 		dev_err(dev, "type = %d not support\n", type);
282 		return;
283 	}
284 
285 	table->buf_size = buf_size;
286 	table->bus = bus;
287 	table->local = local;
288 	table->chn = chn;
289 
290 	if (!obj->config_dma_func) {
291 		WARN_ON(1);
292 		return;
293 	}
294 	obj->config_dma_func(table);
295 
296 	spin_lock_irqsave(&obj->tbl_list_lock, flags);
297 	list_add_tail(&table->tbl_node, &obj->tbl_list);
298 	spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
299 }
300 
rk_pcie_dma_trx_work(struct work_struct * work)301 static void rk_pcie_dma_trx_work(struct work_struct *work)
302 {
303 	unsigned long flags;
304 	struct dma_trx_obj *obj = container_of(work,
305 				struct dma_trx_obj, dma_trx_work);
306 	struct dma_table *table;
307 
308 	while (!list_empty(&obj->tbl_list)) {
309 		table = list_first_entry(&obj->tbl_list, struct dma_table,
310 					 tbl_node);
311 		if (obj->dma_free) {
312 			obj->dma_free = false;
313 			spin_lock_irqsave(&obj->tbl_list_lock, flags);
314 			list_del_init(&table->tbl_node);
315 			spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
316 			obj->cur = table;
317 			if (!obj->start_dma_func) {
318 				WARN_ON(1);
319 				return;
320 			}
321 			reinit_completion(&obj->done);
322 			obj->start_dma_func(obj, table);
323 		}
324 	}
325 }
326 
rk_pcie_clear_ack(void * addr)327 static void rk_pcie_clear_ack(void *addr)
328 {
329 	writel(0x0, addr);
330 }
331 
rk_pcie_scan_timer(struct hrtimer * timer)332 static enum hrtimer_restart rk_pcie_scan_timer(struct hrtimer *timer)
333 {
334 	unsigned int sdv;
335 	unsigned int idx;
336 	unsigned int sav;
337 	unsigned int suv;
338 	void *sda_base;
339 	void *scan_data_addr;
340 	void *scan_ack_addr;
341 	void *scan_user_addr;
342 	int i;
343 	bool need_ack = false;
344 	struct dma_trx_obj *obj = container_of(timer,
345 					struct dma_trx_obj, scan_timer);
346 	unsigned int check_sum, check_sum_tmp;
347 
348 	if (!obj->remote_mem_start) {
349 		if (is_rc(obj))
350 			obj->remote_mem_start = readl(obj->region_base + 0x4);
351 		else
352 			obj->remote_mem_start = readl(obj->region_base);
353 		goto continue_scan;
354 	}
355 
356 	for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
357 		sda_base = obj->local_mem_base + obj->buffer_size * i;
358 
359 		if (obj->addr_reverse) {
360 			if (is_rc(obj))
361 				scan_data_addr = sda_base;
362 			else
363 				scan_data_addr =  sda_base + obj->rd_buf_size;
364 		} else {
365 			if (is_rc(obj))
366 				scan_data_addr =  sda_base + obj->rd_buf_size;
367 			else
368 				scan_data_addr = sda_base;
369 		}
370 		sdv = readl(scan_data_addr + obj->set_data_check_pos);
371 		idx = readl(scan_data_addr + obj->set_local_idx_pos);
372 
373 		if (sdv == PCIE_DMA_DATA_CHECK) {
374 			if (!need_ack)
375 				need_ack = true;
376 			if (enable_check_sum) {
377 				check_sum = readl(scan_data_addr + obj->set_chk_sum_pos);
378 				check_sum_tmp = rk_pcie_check_sum(scan_data_addr, SZ_1M - 0x10);
379 				if (check_sum != check_sum_tmp) {
380 					pr_err("checksum[%d] failed, 0x%x, should be 0x%x\n",
381 					       idx, check_sum_tmp, check_sum);
382 					print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
383 						       32, 4, scan_data_addr, SZ_1M, false);
384 				}
385 				writel(0x0, scan_data_addr + obj->set_chk_sum_pos);
386 			}
387 			writel(0x0, scan_data_addr + obj->set_data_check_pos);
388 
389 			set_bit(i, &obj->local_read_available);
390 			rk_pcie_prepare_dma(obj, idx, 0, 0, 0x4,
391 				PCIE_DMA_DATA_RCV_ACK, PCIE_DMA_DEFAULT_CHN);
392 		}
393 	}
394 
395 	if (need_ack || !list_empty(&obj->tbl_list))
396 		queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
397 
398 	scan_ack_addr = obj->local_mem_base + obj->ack_base;
399 	scan_user_addr = obj->local_mem_base + obj->ack_base;
400 
401 	if (is_rc(obj)) {
402 		scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 2;
403 	} else {
404 		scan_ack_addr += PCIE_DMA_ACK_BLOCK_SIZE;
405 		scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 3;
406 	}
407 
408 	for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
409 		void *addr = scan_ack_addr + i * NODE_SIZE;
410 
411 		sav = readl(addr);
412 		if (sav == PCIE_DMA_DATA_ACK_CHECK) {
413 			rk_pcie_clear_ack(addr);
414 			set_bit(i, &obj->local_write_available);
415 		}
416 
417 		addr = scan_user_addr + i * NODE_SIZE;
418 		suv = readl(addr);
419 		if (suv == PCIE_DMA_DATA_FREE_ACK_CHECK) {
420 			rk_pcie_clear_ack(addr);
421 			set_bit(i, &obj->remote_write_available);
422 		}
423 	}
424 
425 	if ((obj->local_write_available && obj->remote_write_available) ||
426 		obj->local_read_available) {
427 		wake_up(&obj->event_queue);
428 	}
429 
430 continue_scan:
431 	hrtimer_add_expires(&obj->scan_timer, ktime_set(0, 100 * 1000));
432 
433 	return HRTIMER_RESTART;
434 }
435 
rk_pcie_misc_open(struct inode * inode,struct file * filp)436 static int rk_pcie_misc_open(struct inode *inode, struct file *filp)
437 {
438 	struct miscdevice *miscdev = filp->private_data;
439 	struct pcie_misc_dev *pcie_misc_dev = container_of(miscdev,
440 						 struct pcie_misc_dev, dev);
441 
442 	filp->private_data = pcie_misc_dev->obj;
443 
444 	mutex_lock(&pcie_misc_dev->obj->count_mutex);
445 	if (pcie_misc_dev->obj->ref_count++)
446 		goto already_opened;
447 
448 	pcie_misc_dev->obj->loop_count = 0;
449 	pcie_misc_dev->obj->local_read_available = 0x0;
450 	pcie_misc_dev->obj->local_write_available = 0xff;
451 	pcie_misc_dev->obj->remote_write_available = 0xff;
452 	pcie_misc_dev->obj->dma_free = true;
453 
454 	pr_info("Open pcie misc device success\n");
455 
456 already_opened:
457 	mutex_unlock(&pcie_misc_dev->obj->count_mutex);
458 	return 0;
459 }
460 
rk_pcie_misc_release(struct inode * inode,struct file * filp)461 static int rk_pcie_misc_release(struct inode *inode, struct file *filp)
462 {
463 	struct dma_trx_obj *obj = filp->private_data;
464 
465 	mutex_lock(&obj->count_mutex);
466 
467 	if (--obj->ref_count)
468 		goto still_opened;
469 	hrtimer_cancel(&obj->scan_timer);
470 
471 	pr_info("Close pcie misc device\n");
472 
473 still_opened:
474 	mutex_unlock(&obj->count_mutex);
475 	return 0;
476 }
477 
rk_pcie_misc_mmap(struct file * filp,struct vm_area_struct * vma)478 static int rk_pcie_misc_mmap(struct file *filp,
479 				     struct vm_area_struct *vma)
480 {
481 	struct dma_trx_obj *obj = filp->private_data;
482 	size_t size = vma->vm_end - vma->vm_start;
483 	int err;
484 
485 	err = remap_pfn_range(vma, vma->vm_start,
486 			    __phys_to_pfn(obj->local_mem_start),
487 			    size, vma->vm_page_prot);
488 	if (err)
489 		return -EAGAIN;
490 
491 	return 0;
492 }
rk_pcie_send_addr_to_remote(struct dma_trx_obj * obj)493 static void rk_pcie_send_addr_to_remote(struct dma_trx_obj *obj)
494 {
495 	struct dma_table *table;
496 
497 	/* Temporary use to send local buffer address to remote */
498 	table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET];
499 	table->type = PCIE_DMA_DATA_SND;
500 	table->dir = DMA_TO_BUS;
501 	table->buf_size = 0x4;
502 	if (is_rc(obj))
503 		table->local = obj->region_start;
504 	else
505 		table->local = obj->region_start + 0x4;
506 	table->bus = table->local;
507 	table->chn = PCIE_DMA_DEFAULT_CHN;
508 	obj->config_dma_func(table);
509 	obj->cur = table;
510 	obj->start_dma_func(obj, table);
511 }
512 
rk_pcie_misc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)513 static long rk_pcie_misc_ioctl(struct file *filp, unsigned int cmd,
514 					unsigned long arg)
515 {
516 	struct dma_trx_obj *obj = filp->private_data;
517 	struct device *dev = obj->dev;
518 	union pcie_dma_ioctl_param msg;
519 	union pcie_dma_ioctl_param msg_to_user;
520 	phys_addr_t addr;
521 	void __user *uarg = (void __user *)arg;
522 	int ret;
523 	int i;
524 	phys_addr_t addr_send_to_remote;
525 	enum transfer_type type;
526 
527 	if (copy_from_user(&msg, uarg, sizeof(msg)) != 0) {
528 		dev_err(dev, "failed to copy argument into kernel space\n");
529 		return -EFAULT;
530 	}
531 
532 	switch (cmd) {
533 	case PCIE_DMA_START:
534 		test_and_clear_bit(msg.in.l_widx, &obj->local_write_available);
535 		test_and_clear_bit(msg.in.r_widx, &obj->remote_write_available);
536 		type = PCIE_DMA_DATA_SND;
537 		obj->loop_count++;
538 		break;
539 	case PCIE_DMA_GET_LOCAL_READ_BUFFER_INDEX:
540 		msg_to_user.lra = obj->local_read_available;
541 		addr = obj->local_mem_start;
542 		if (is_rc(obj))
543 			addr += obj->rd_buf_size;
544 		/* by kernel auto or by user to invalidate cache */
545 		for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
546 			if (test_bit(i, &obj->local_read_available))
547 				dma_sync_single_for_cpu(dev, addr + i * obj->buffer_size, obj->buffer_size, DMA_FROM_DEVICE);
548 		}
549 
550 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
551 		if (ret) {
552 			dev_err(dev, "failed to get read buffer index\n");
553 			return -EFAULT;
554 		}
555 		break;
556 	case PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX:
557 		test_and_clear_bit(msg.in.idx, &obj->local_read_available);
558 		type = PCIE_DMA_DATA_FREE_ACK;
559 		break;
560 	case PCIE_DMA_GET_LOCAL_REMOTE_WRITE_BUFFER_INDEX:
561 		msg_to_user.out.lwa = obj->local_write_available;
562 		msg_to_user.out.rwa = obj->remote_write_available;
563 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
564 		if (ret) {
565 			dev_err(dev, "failed to get write buffer index\n");
566 			return -EFAULT;
567 		}
568 		break;
569 	case PCIE_DMA_SYNC_BUFFER_FOR_CPU:
570 		addr = obj->local_mem_start + msg.in.idx * obj->buffer_size;
571 		if (is_rc(obj))
572 			addr += obj->rd_buf_size;
573 		dma_sync_single_for_cpu(dev, addr, obj->buffer_size,
574 					DMA_FROM_DEVICE);
575 		break;
576 	case PCIE_DMA_WAIT_TRANSFER_COMPLETE:
577 		ret = wait_for_completion_interruptible(&obj->done);
578 		if (WARN_ON(ret)) {
579 			pr_info("failed to wait complete\n");
580 			return ret;
581 		}
582 
583 		obj->cache_time_avarage = obj->cache_time_total / obj->loop_count;
584 
585 		pr_debug("cache_time: total = %lld, average = %lld, count = %d, size = 0x%x\n",
586 			 obj->cache_time_total, obj->cache_time_avarage,
587 			 obj->loop_count, obj->buffer_size);
588 
589 		obj->cache_time_avarage = 0;
590 		obj->cache_time_total = 0;
591 
592 		obj->loop_count = 0;
593 		break;
594 	case PCIE_DMA_SET_LOOP_COUNT:
595 		obj->loop_count_threshold = msg.count;
596 		pr_info("threshold = %d\n", obj->loop_count_threshold);
597 		break;
598 	case PCIE_DMA_GET_TOTAL_BUFFER_SIZE:
599 		msg_to_user.total_buffer_size = obj->local_mem_size;
600 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
601 		if (ret) {
602 			dev_err(dev, "failed to get write buffer index\n");
603 			return -EFAULT;
604 		}
605 		break;
606 	case PCIE_DMA_SET_BUFFER_SIZE:
607 		obj->buffer_size = msg.buffer_size;
608 		pr_debug("buffer_size = %d\n", obj->buffer_size);
609 		obj->rd_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
610 		obj->wr_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
611 		obj->ack_base = obj->rd_buf_size + obj->wr_buf_size;
612 		obj->set_data_check_pos = obj->buffer_size - 0x4;
613 		obj->set_local_idx_pos = obj->buffer_size - 0x8;
614 		obj->set_buf_size_pos = obj->buffer_size - 0xc;
615 		obj->set_chk_sum_pos = obj->buffer_size - 0x10;
616 		break;
617 	case PCIE_DMA_READ_FROM_REMOTE:
618 		pr_debug("read buffer from : %d to local : %d\n",
619 			 msg.in.r_widx, msg.in.l_widx);
620 
621 		type = PCIE_DMA_READ_REMOTE;
622 		break;
623 	case PCIE_DMA_USER_SET_BUF_ADDR:
624 		/* If msg.local_addr valid, use msg.local_addr for local buffer,
625 		 * and should be contiguous physical address.
626 		 * If msg.local is zero, local buffer get from DT reserved.
627 		 * Anyway local buffer address should send to remote, then
628 		 * remote know where to send data to.
629 		 * Should finish this case first before send data.
630 		 */
631 		if (msg.local_addr) {
632 			pr_debug("local_addr = %pa\n", &msg.local_addr);
633 			addr_send_to_remote = (phys_addr_t)msg.local_addr;
634 			obj->local_mem_start = (phys_addr_t)msg.local_addr;
635 			/* Unmap previous */
636 			rk_pcie_unmap_kernel(obj->local_mem_base);
637 			/* Remap userspace's buffer to kernel */
638 			obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
639 						obj->buffer_size * PCIE_DMA_BUF_CNT * 2 + SZ_4K);
640 			if (!obj->local_mem_base)
641 				return -EFAULT;
642 		} else {
643 			addr_send_to_remote = obj->local_mem_start;
644 		}
645 		if (is_rc(obj))
646 			writel(addr_send_to_remote, obj->region_base);
647 		else
648 			writel(addr_send_to_remote, obj->region_base + 0x4);
649 		rk_pcie_send_addr_to_remote(obj);
650 		hrtimer_start(&obj->scan_timer,
651 		      ktime_set(0, 1 * 1000 * 1000 * 1000), HRTIMER_MODE_REL);
652 		break;
653 	case PCIE_DMA_GET_BUFFER_SIZE:
654 		msg_to_user.buffer_size = obj->buffer_size;
655 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
656 		if (ret) {
657 			dev_err(dev, "failed to get buffer\n");
658 			return -EFAULT;
659 		}
660 		break;
661 	default:
662 		pr_info("%s, %d, cmd : %x not support\n", __func__, __LINE__,
663 			cmd);
664 		return -EFAULT;
665 	}
666 
667 	if (cmd == PCIE_DMA_START || cmd == PCIE_DMA_READ_FROM_REMOTE ||
668 		cmd == PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX) {
669 		rk_pcie_prepare_dma(obj, msg.in.idx, msg.in.r_widx,
670 				    msg.in.l_widx, msg.in.size, type,
671 				    msg.in.chn);
672 		queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
673 	}
674 
675 	return 0;
676 }
677 
rk_pcie_misc_poll(struct file * filp,poll_table * wait)678 static unsigned int rk_pcie_misc_poll(struct file *filp,
679 						poll_table *wait)
680 {
681 	struct dma_trx_obj *obj = filp->private_data;
682 	u32 lwa, rwa, lra;
683 	u32 ret = 0;
684 
685 	poll_wait(filp, &obj->event_queue, wait);
686 
687 	lwa = obj->local_write_available;
688 	rwa = obj->remote_write_available;
689 	if (lwa && rwa)
690 		ret = POLLOUT;
691 
692 	lra = obj->local_read_available;
693 	if (lra)
694 		ret |= POLLIN;
695 
696 	return ret;
697 }
698 
699 static const struct file_operations rk_pcie_misc_fops = {
700 	.open		= rk_pcie_misc_open,
701 	.release	= rk_pcie_misc_release,
702 	.mmap		= rk_pcie_misc_mmap,
703 	.unlocked_ioctl	= rk_pcie_misc_ioctl,
704 	.poll		= rk_pcie_misc_poll,
705 };
706 
rk_pcie_delete_misc(struct dma_trx_obj * obj)707 static void rk_pcie_delete_misc(struct dma_trx_obj *obj)
708 {
709 	misc_deregister(&obj->pcie_dev->dev);
710 }
711 
rk_pcie_add_misc(struct dma_trx_obj * obj)712 static int rk_pcie_add_misc(struct dma_trx_obj *obj)
713 {
714 	int ret;
715 	struct pcie_misc_dev *pcie_dev;
716 
717 	pcie_dev = devm_kzalloc(obj->dev, sizeof(*pcie_dev), GFP_KERNEL);
718 	if (!pcie_dev)
719 		return -ENOMEM;
720 
721 	pcie_dev->dev.minor = MISC_DYNAMIC_MINOR;
722 	pcie_dev->dev.name = "pcie-dev";
723 	pcie_dev->dev.fops = &rk_pcie_misc_fops;
724 	pcie_dev->dev.parent = NULL;
725 
726 	ret = misc_register(&pcie_dev->dev);
727 	if (ret) {
728 		pr_err("pcie: failed to register misc device.\n");
729 		return ret;
730 	}
731 
732 	pcie_dev->obj = obj;
733 	obj->pcie_dev = pcie_dev;
734 
735 	pr_info("register misc device pcie-dev\n");
736 
737 	return 0;
738 }
739 
rk_pcie_map_kernel(phys_addr_t start,size_t len)740 static void *rk_pcie_map_kernel(phys_addr_t start, size_t len)
741 {
742 	int i;
743 	void *vaddr;
744 	pgprot_t pgprot;
745 	phys_addr_t phys;
746 	int npages = PAGE_ALIGN(len) / PAGE_SIZE;
747 	struct page **p = vmalloc(sizeof(struct page *) * npages);
748 
749 	if (!p)
750 		return NULL;
751 
752 	pgprot = pgprot_noncached(PAGE_KERNEL);
753 
754 	phys = start;
755 	for (i = 0; i < npages; i++) {
756 		p[i] = phys_to_page(phys);
757 		phys += PAGE_SIZE;
758 	}
759 
760 	vaddr = vmap(p, npages, VM_MAP, pgprot);
761 	vfree(p);
762 
763 	return vaddr;
764 }
765 
rk_pcie_unmap_kernel(void * vaddr)766 static void rk_pcie_unmap_kernel(void *vaddr)
767 {
768 	vunmap(vaddr);
769 }
770 
rk_pcie_dma_table_free(struct dma_trx_obj * obj,int num)771 static void rk_pcie_dma_table_free(struct dma_trx_obj *obj, int num)
772 {
773 	int i;
774 	struct dma_table *table;
775 
776 	if (num > PCIE_DMA_TABLE_NUM)
777 		num = PCIE_DMA_TABLE_NUM;
778 
779 	for (i = 0; i < num; i++) {
780 		table = obj->table[i];
781 		dma_free_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
782 			table->descs, table->phys_descs);
783 		kfree(table);
784 	}
785 }
786 
rk_pcie_dma_table_alloc(struct dma_trx_obj * obj)787 static int rk_pcie_dma_table_alloc(struct dma_trx_obj *obj)
788 {
789 	int i;
790 	struct dma_table *table;
791 
792 	for (i = 0; i < PCIE_DMA_TABLE_NUM; i++) {
793 		table = kzalloc(sizeof(*table), GFP_KERNEL);
794 		if (!table)
795 			goto free_table;
796 
797 		table->descs = dma_alloc_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
798 				&table->phys_descs, GFP_KERNEL | __GFP_ZERO);
799 		if (!table->descs) {
800 			kfree(table);
801 			goto free_table;
802 		}
803 
804 		table->chn = PCIE_DMA_DEFAULT_CHN;
805 		INIT_LIST_HEAD(&table->tbl_node);
806 		obj->table[i] = table;
807 	}
808 
809 	return 0;
810 
811 free_table:
812 	rk_pcie_dma_table_free(obj, i);
813 	dev_err(obj->dev, "Failed to alloc dma table\n");
814 
815 	return -ENOMEM;
816 }
817 
818 #ifdef CONFIG_DEBUG_FS
rk_pcie_debugfs_trx_show(struct seq_file * s,void * v)819 static int rk_pcie_debugfs_trx_show(struct seq_file *s, void *v)
820 {
821 	struct dma_trx_obj *dma_obj = s->private;
822 	bool list = list_empty(&dma_obj->tbl_list);
823 
824 	seq_printf(s, "version = %x,", dma_obj->version);
825 	seq_printf(s, "last:%s,",
826 			dma_obj->cur ? (dma_obj->cur->dir == DMA_FROM_BUS ? "read" : "write") : "no trx");
827 	seq_printf(s, "irq_num = %ld, loop_count = %d,",
828 			dma_obj->irq_num, dma_obj->loop_count);
829 	seq_printf(s, "loop_threshold = %d,",
830 			dma_obj->loop_count_threshold);
831 	seq_printf(s, "lwa = %lx, rwa = %lx, lra = %lx,",
832 			dma_obj->local_write_available,
833 			dma_obj->remote_write_available,
834 			dma_obj->local_read_available);
835 	seq_printf(s, "list : (%s), dma chn : (%s)\n",
836 			list ? "empty" : "not empty",
837 			dma_obj->dma_free ? "free" : "busy");
838 
839 	return 0;
840 }
841 
rk_pcie_debugfs_open(struct inode * inode,struct file * file)842 static int rk_pcie_debugfs_open(struct inode *inode, struct file *file)
843 {
844 	return single_open(file, rk_pcie_debugfs_trx_show, inode->i_private);
845 }
846 
rk_pcie_debugfs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)847 static ssize_t rk_pcie_debugfs_write(struct file *file, const char __user *user_buf,
848 				 size_t count, loff_t *ppos)
849 {
850 	int ret;
851 
852 	ret = kstrtoint_from_user(user_buf, count, 0, &enable_check_sum);
853 	if (ret)
854 		return ret;
855 
856 	return count;
857 }
858 
859 static const struct file_operations rk_pcie_debugfs_fops = {
860 	.owner = THIS_MODULE,
861 	.open = rk_pcie_debugfs_open,
862 	.read = seq_read,
863 	.llseek = seq_lseek,
864 	.release = single_release,
865 	.write = rk_pcie_debugfs_write,
866 };
867 #endif
868 
rk_pcie_dma_obj_probe(struct device * dev)869 struct dma_trx_obj *rk_pcie_dma_obj_probe(struct device *dev)
870 {
871 	int ret;
872 	int busno;
873 	struct device_node *np = dev->of_node;
874 	struct device_node *mem;
875 	struct resource reg;
876 	struct dma_trx_obj *obj;
877 	int reverse;
878 
879 	obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
880 	if (!obj)
881 		return ERR_PTR(-ENOMEM);
882 
883 	obj->dev = dev;
884 
885 	ret = of_property_read_u32(np, "busno", &busno);
886 	if (ret < 0) {
887 		dev_err(dev, "missing \"busno\" property\n");
888 		return ERR_PTR(ret);
889 	}
890 
891 	obj->busno = busno;
892 
893 	ret = of_property_read_u32(np, "reverse", &reverse);
894 	if (ret < 0)
895 		obj->addr_reverse = 0;
896 	else
897 		obj->addr_reverse = reverse;
898 
899 	mem = of_parse_phandle(np, "memory-region", 0);
900 	if (!mem) {
901 		dev_err(dev, "missing \"memory-region\" property\n");
902 		return ERR_PTR(-ENODEV);
903 	}
904 
905 	ret = of_address_to_resource(mem, 0, &reg);
906 	if (ret < 0) {
907 		dev_err(dev, "missing \"reg\" property\n");
908 		return ERR_PTR(-ENODEV);
909 	}
910 
911 	obj->local_mem_start = reg.start;
912 	obj->local_mem_size = resource_size(&reg);
913 	obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
914 						 obj->local_mem_size);
915 	if (!obj->local_mem_base)
916 		return ERR_PTR(-ENOMEM);
917 
918 	mem = of_parse_phandle(np, "memory-region1", 0);
919 	if (!mem) {
920 		dev_err(dev, "missing \"memory-region1\" property\n");
921 		obj = ERR_PTR(-ENODEV);
922 		goto unmap_local_mem_region;
923 	}
924 
925 	ret = of_address_to_resource(mem, 0, &reg);
926 	if (ret < 0) {
927 		dev_err(dev, "missing \"reg\" property\n");
928 		obj = ERR_PTR(-ENODEV);
929 		goto unmap_local_mem_region;
930 	}
931 
932 	obj->region_start = reg.start;
933 	obj->region_size = resource_size(&reg);
934 	obj->region_base = rk_pcie_map_kernel(obj->region_start,
935 					      obj->region_size);
936 	if (!obj->region_base) {
937 		dev_err(dev, "mapping region_base error\n");
938 		obj = ERR_PTR(-ENOMEM);
939 		goto unmap_local_mem_region;
940 	}
941 	if (!is_rc(obj))
942 		writel(0x0, obj->region_base);
943 	else
944 		writel(0x0, obj->region_base + 0x4);
945 
946 	ret = rk_pcie_dma_table_alloc(obj);
947 	if (ret) {
948 		dev_err(dev, "rk_pcie_dma_table_alloc error\n");
949 		obj = ERR_PTR(-ENOMEM);
950 		goto unmap_region;
951 
952 	}
953 	obj->dma_trx_wq = create_singlethread_workqueue("dma_trx_wq");
954 	INIT_WORK(&obj->dma_trx_work, rk_pcie_dma_trx_work);
955 
956 	INIT_LIST_HEAD(&obj->tbl_list);
957 	spin_lock_init(&obj->tbl_list_lock);
958 
959 	init_waitqueue_head(&obj->event_queue);
960 
961 	hrtimer_init_on_stack(&obj->scan_timer, CLOCK_MONOTONIC,
962 				HRTIMER_MODE_REL);
963 	obj->scan_timer.function = rk_pcie_scan_timer;
964 	obj->irq_num = 0;
965 	obj->loop_count_threshold = 0;
966 	obj->ref_count = 0;
967 	obj->version = 0x4;
968 	init_completion(&obj->done);
969 	obj->cb = rk_pcie_handle_dma_interrupt;
970 
971 	mutex_init(&obj->count_mutex);
972 	rk_pcie_add_misc(obj);
973 
974 #ifdef CONFIG_DEBUG_FS
975 	obj->pcie_root = debugfs_create_dir("pcie", NULL);
976 	if (!obj->pcie_root) {
977 		obj = ERR_PTR(-EINVAL);
978 		goto free_dma_table;
979 	}
980 
981 	debugfs_create_file("pcie_trx", 0644, obj->pcie_root, obj,
982 			&rk_pcie_debugfs_fops);
983 #endif
984 
985 	return obj;
986 free_dma_table:
987 	rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
988 unmap_region:
989 	rk_pcie_unmap_kernel(obj->region_base);
990 unmap_local_mem_region:
991 	rk_pcie_unmap_kernel(obj->local_mem_base);
992 
993 	return obj;
994 }
995 EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_probe);
996 
rk_pcie_dma_obj_remove(struct dma_trx_obj * obj)997 void rk_pcie_dma_obj_remove(struct dma_trx_obj *obj)
998 {
999 	hrtimer_cancel(&obj->scan_timer);
1000 	destroy_hrtimer_on_stack(&obj->scan_timer);
1001 	rk_pcie_delete_misc(obj);
1002 	rk_pcie_unmap_kernel(obj->local_mem_base);
1003 	rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
1004 	destroy_workqueue(obj->dma_trx_wq);
1005 
1006 #ifdef CONFIG_DEBUG_FS
1007 	debugfs_remove_recursive(obj->pcie_root);
1008 #endif
1009 }
1010 EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_remove);
1011