1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ISHTP DMA I/F functions
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2003-2016, Intel Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/wait.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include "ishtp-dev.h"
14*4882a593Smuzhiyun #include "client.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
18*4882a593Smuzhiyun * @dev: ishtp device
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Allocate RX and TX DMA buffer once during bus setup.
21*4882a593Smuzhiyun * It allocates 1MB, RX and TX DMA buffer, which are divided
22*4882a593Smuzhiyun * into slots.
23*4882a593Smuzhiyun */
ishtp_cl_alloc_dma_buf(struct ishtp_device * dev)24*4882a593Smuzhiyun void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun dma_addr_t h;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun if (dev->ishtp_host_dma_tx_buf)
29*4882a593Smuzhiyun return;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf_size = 1024*1024;
32*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf_size = 1024*1024;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Allocate Tx buffer and init usage bitmap */
35*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
36*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf_size,
37*4882a593Smuzhiyun &h, GFP_KERNEL);
38*4882a593Smuzhiyun if (dev->ishtp_host_dma_tx_buf)
39*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf_phys = h;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
42*4882a593Smuzhiyun DMA_SLOT_SIZE;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
45*4882a593Smuzhiyun sizeof(uint8_t),
46*4882a593Smuzhiyun GFP_KERNEL);
47*4882a593Smuzhiyun spin_lock_init(&dev->ishtp_dma_tx_lock);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Allocate Rx buffer */
50*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
51*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf_size,
52*4882a593Smuzhiyun &h, GFP_KERNEL);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (dev->ishtp_host_dma_rx_buf)
55*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf_phys = h;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
60*4882a593Smuzhiyun * @dev: ishtp device
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Free DMA buffer when all clients are released. This is
63*4882a593Smuzhiyun * only happens during error path in ISH built in driver
64*4882a593Smuzhiyun * model
65*4882a593Smuzhiyun */
ishtp_cl_free_dma_buf(struct ishtp_device * dev)66*4882a593Smuzhiyun void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun dma_addr_t h;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (dev->ishtp_host_dma_tx_buf) {
71*4882a593Smuzhiyun h = dev->ishtp_host_dma_tx_buf_phys;
72*4882a593Smuzhiyun dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
73*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf, h);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (dev->ishtp_host_dma_rx_buf) {
77*4882a593Smuzhiyun h = dev->ishtp_host_dma_rx_buf_phys;
78*4882a593Smuzhiyun dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
79*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf, h);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun kfree(dev->ishtp_dma_tx_map);
83*4882a593Smuzhiyun dev->ishtp_host_dma_tx_buf = NULL;
84*4882a593Smuzhiyun dev->ishtp_host_dma_rx_buf = NULL;
85*4882a593Smuzhiyun dev->ishtp_dma_tx_map = NULL;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
90*4882a593Smuzhiyun * @dev: ishtp device
91*4882a593Smuzhiyun * @size: Size of memory to get
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * Find and return free address of "size" bytes in dma tx buffer.
94*4882a593Smuzhiyun * the function will mark this address as "in-used" memory.
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Return: NULL when no free buffer else a buffer to copy
97*4882a593Smuzhiyun */
ishtp_cl_get_dma_send_buf(struct ishtp_device * dev,uint32_t size)98*4882a593Smuzhiyun void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
99*4882a593Smuzhiyun uint32_t size)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun unsigned long flags;
102*4882a593Smuzhiyun int i, j, free;
103*4882a593Smuzhiyun /* additional slot is needed if there is rem */
104*4882a593Smuzhiyun int required_slots = (size / DMA_SLOT_SIZE)
105*4882a593Smuzhiyun + 1 * (size % DMA_SLOT_SIZE != 0);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
108*4882a593Smuzhiyun for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
109*4882a593Smuzhiyun free = 1;
110*4882a593Smuzhiyun for (j = 0; j < required_slots; j++)
111*4882a593Smuzhiyun if (dev->ishtp_dma_tx_map[i+j]) {
112*4882a593Smuzhiyun free = 0;
113*4882a593Smuzhiyun i += j;
114*4882a593Smuzhiyun break;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun if (free) {
117*4882a593Smuzhiyun /* mark memory as "caught" */
118*4882a593Smuzhiyun for (j = 0; j < required_slots; j++)
119*4882a593Smuzhiyun dev->ishtp_dma_tx_map[i+j] = 1;
120*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
121*4882a593Smuzhiyun return (i * DMA_SLOT_SIZE) +
122*4882a593Smuzhiyun (unsigned char *)dev->ishtp_host_dma_tx_buf;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
126*4882a593Smuzhiyun dev_err(dev->devc, "No free DMA buffer to send msg\n");
127*4882a593Smuzhiyun return NULL;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
132*4882a593Smuzhiyun * @dev: ishtp device
133*4882a593Smuzhiyun * @msg_addr: message address of slot
134*4882a593Smuzhiyun * @size: Size of memory to get
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Release_dma_acked_mem - returnes the acked memory to free list.
137*4882a593Smuzhiyun * (from msg_addr, size bytes long)
138*4882a593Smuzhiyun */
ishtp_cl_release_dma_acked_mem(struct ishtp_device * dev,void * msg_addr,uint8_t size)139*4882a593Smuzhiyun void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
140*4882a593Smuzhiyun void *msg_addr,
141*4882a593Smuzhiyun uint8_t size)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun unsigned long flags;
144*4882a593Smuzhiyun int acked_slots = (size / DMA_SLOT_SIZE)
145*4882a593Smuzhiyun + 1 * (size % DMA_SLOT_SIZE != 0);
146*4882a593Smuzhiyun int i, j;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
149*4882a593Smuzhiyun dev_err(dev->devc, "Bad DMA Tx ack address\n");
150*4882a593Smuzhiyun return;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
154*4882a593Smuzhiyun spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
155*4882a593Smuzhiyun for (j = 0; j < acked_slots; j++) {
156*4882a593Smuzhiyun if ((i + j) >= dev->ishtp_dma_num_slots ||
157*4882a593Smuzhiyun !dev->ishtp_dma_tx_map[i+j]) {
158*4882a593Smuzhiyun /* no such slot, or memory is already free */
159*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
160*4882a593Smuzhiyun dev_err(dev->devc, "Bad DMA Tx ack address\n");
161*4882a593Smuzhiyun return;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun dev->ishtp_dma_tx_map[i+j] = 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
166*4882a593Smuzhiyun }
167