1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/dma-mapping.h>
6*4882a593Smuzhiyun #include <linux/mei.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "mei_dev.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
12*4882a593Smuzhiyun * for the dma descriptor
13*4882a593Smuzhiyun * @dev: mei_device
14*4882a593Smuzhiyun * @dscr: dma descriptor
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Return:
17*4882a593Smuzhiyun * * 0 - on success or zero allocation request
18*4882a593Smuzhiyun * * -EINVAL - if size is not power of 2
19*4882a593Smuzhiyun * * -ENOMEM - of allocation has failed
20*4882a593Smuzhiyun */
mei_dmam_dscr_alloc(struct mei_device * dev,struct mei_dma_dscr * dscr)21*4882a593Smuzhiyun static int mei_dmam_dscr_alloc(struct mei_device *dev,
22*4882a593Smuzhiyun struct mei_dma_dscr *dscr)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun if (!dscr->size)
25*4882a593Smuzhiyun return 0;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun if (WARN_ON(!is_power_of_2(dscr->size)))
28*4882a593Smuzhiyun return -EINVAL;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (dscr->vaddr)
31*4882a593Smuzhiyun return 0;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
34*4882a593Smuzhiyun GFP_KERNEL);
35*4882a593Smuzhiyun if (!dscr->vaddr)
36*4882a593Smuzhiyun return -ENOMEM;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return 0;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun * mei_dmam_dscr_free() - free a managed coherent buffer
43*4882a593Smuzhiyun * from the dma descriptor
44*4882a593Smuzhiyun * @dev: mei_device
45*4882a593Smuzhiyun * @dscr: dma descriptor
46*4882a593Smuzhiyun */
mei_dmam_dscr_free(struct mei_device * dev,struct mei_dma_dscr * dscr)47*4882a593Smuzhiyun static void mei_dmam_dscr_free(struct mei_device *dev,
48*4882a593Smuzhiyun struct mei_dma_dscr *dscr)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun if (!dscr->vaddr)
51*4882a593Smuzhiyun return;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
54*4882a593Smuzhiyun dscr->vaddr = NULL;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun * mei_dmam_ring_free() - free dma ring buffers
59*4882a593Smuzhiyun * @dev: mei device
60*4882a593Smuzhiyun */
mei_dmam_ring_free(struct mei_device * dev)61*4882a593Smuzhiyun void mei_dmam_ring_free(struct mei_device *dev)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun int i;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun for (i = 0; i < DMA_DSCR_NUM; i++)
66*4882a593Smuzhiyun mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /**
70*4882a593Smuzhiyun * mei_dmam_ring_alloc() - allocate dma ring buffers
71*4882a593Smuzhiyun * @dev: mei device
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Return: -ENOMEM on allocation failure 0 otherwise
74*4882a593Smuzhiyun */
mei_dmam_ring_alloc(struct mei_device * dev)75*4882a593Smuzhiyun int mei_dmam_ring_alloc(struct mei_device *dev)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int i;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun for (i = 0; i < DMA_DSCR_NUM; i++)
80*4882a593Smuzhiyun if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
81*4882a593Smuzhiyun goto err;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return 0;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun err:
86*4882a593Smuzhiyun mei_dmam_ring_free(dev);
87*4882a593Smuzhiyun return -ENOMEM;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * mei_dma_ring_is_allocated() - check if dma ring is allocated
92*4882a593Smuzhiyun * @dev: mei device
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * Return: true if dma ring is allocated
95*4882a593Smuzhiyun */
mei_dma_ring_is_allocated(struct mei_device * dev)96*4882a593Smuzhiyun bool mei_dma_ring_is_allocated(struct mei_device *dev)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static inline
mei_dma_ring_ctrl(struct mei_device * dev)102*4882a593Smuzhiyun struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * mei_dma_ring_reset() - reset the dma control block
109*4882a593Smuzhiyun * @dev: mei device
110*4882a593Smuzhiyun */
mei_dma_ring_reset(struct mei_device * dev)111*4882a593Smuzhiyun void mei_dma_ring_reset(struct mei_device *dev)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (!ctrl)
116*4882a593Smuzhiyun return;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun memset(ctrl, 0, sizeof(*ctrl));
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /**
122*4882a593Smuzhiyun * mei_dma_copy_from() - copy from dma ring into buffer
123*4882a593Smuzhiyun * @dev: mei device
124*4882a593Smuzhiyun * @buf: data buffer
125*4882a593Smuzhiyun * @offset: offset in slots.
126*4882a593Smuzhiyun * @n: number of slots to copy.
127*4882a593Smuzhiyun */
mei_dma_copy_from(struct mei_device * dev,unsigned char * buf,u32 offset,u32 n)128*4882a593Smuzhiyun static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
129*4882a593Smuzhiyun u32 offset, u32 n)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun size_t b_offset = offset << 2;
134*4882a593Smuzhiyun size_t b_n = n << 2;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun memcpy(buf, dbuf + b_offset, b_n);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return b_n;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun * mei_dma_copy_to() - copy to a buffer to the dma ring
143*4882a593Smuzhiyun * @dev: mei device
144*4882a593Smuzhiyun * @buf: data buffer
145*4882a593Smuzhiyun * @offset: offset in slots.
146*4882a593Smuzhiyun * @n: number of slots to copy.
147*4882a593Smuzhiyun */
mei_dma_copy_to(struct mei_device * dev,unsigned char * buf,u32 offset,u32 n)148*4882a593Smuzhiyun static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
149*4882a593Smuzhiyun u32 offset, u32 n)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun size_t b_offset = offset << 2;
154*4882a593Smuzhiyun size_t b_n = n << 2;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun memcpy(hbuf + b_offset, buf, b_n);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return b_n;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /**
162*4882a593Smuzhiyun * mei_dma_ring_read() - read data from the ring
163*4882a593Smuzhiyun * @dev: mei device
164*4882a593Smuzhiyun * @buf: buffer to read into: may be NULL in case of droping the data.
165*4882a593Smuzhiyun * @len: length to read.
166*4882a593Smuzhiyun */
mei_dma_ring_read(struct mei_device * dev,unsigned char * buf,u32 len)167*4882a593Smuzhiyun void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
170*4882a593Smuzhiyun u32 dbuf_depth;
171*4882a593Smuzhiyun u32 rd_idx, rem, slots;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (WARN_ON(!ctrl))
174*4882a593Smuzhiyun return;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (!len)
179*4882a593Smuzhiyun return;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
182*4882a593Smuzhiyun rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
183*4882a593Smuzhiyun slots = mei_data2slots(len);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* if buf is NULL we drop the packet by advancing the pointer.*/
186*4882a593Smuzhiyun if (!buf)
187*4882a593Smuzhiyun goto out;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (rd_idx + slots > dbuf_depth) {
190*4882a593Smuzhiyun buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
191*4882a593Smuzhiyun rem = slots - (dbuf_depth - rd_idx);
192*4882a593Smuzhiyun rd_idx = 0;
193*4882a593Smuzhiyun } else {
194*4882a593Smuzhiyun rem = slots;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun mei_dma_copy_from(dev, buf, rd_idx, rem);
198*4882a593Smuzhiyun out:
199*4882a593Smuzhiyun WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
mei_dma_ring_hbuf_depth(struct mei_device * dev)202*4882a593Smuzhiyun static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
209*4882a593Smuzhiyun * @dev: mei_device
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * Return: number of empty slots
212*4882a593Smuzhiyun */
mei_dma_ring_empty_slots(struct mei_device * dev)213*4882a593Smuzhiyun u32 mei_dma_ring_empty_slots(struct mei_device *dev)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
216*4882a593Smuzhiyun u32 wr_idx, rd_idx, hbuf_depth, empty;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!mei_dma_ring_is_allocated(dev))
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (WARN_ON(!ctrl))
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* easier to work in slots */
225*4882a593Smuzhiyun hbuf_depth = mei_dma_ring_hbuf_depth(dev);
226*4882a593Smuzhiyun rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
227*4882a593Smuzhiyun wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (rd_idx > wr_idx)
230*4882a593Smuzhiyun empty = rd_idx - wr_idx;
231*4882a593Smuzhiyun else
232*4882a593Smuzhiyun empty = hbuf_depth - (wr_idx - rd_idx);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return empty;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * mei_dma_ring_write - write data to dma ring host buffer
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * @dev: mei_device
241*4882a593Smuzhiyun * @buf: data will be written
242*4882a593Smuzhiyun * @len: data length
243*4882a593Smuzhiyun */
mei_dma_ring_write(struct mei_device * dev,unsigned char * buf,u32 len)244*4882a593Smuzhiyun void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
247*4882a593Smuzhiyun u32 hbuf_depth;
248*4882a593Smuzhiyun u32 wr_idx, rem, slots;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (WARN_ON(!ctrl))
251*4882a593Smuzhiyun return;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
254*4882a593Smuzhiyun hbuf_depth = mei_dma_ring_hbuf_depth(dev);
255*4882a593Smuzhiyun wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
256*4882a593Smuzhiyun slots = mei_data2slots(len);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (wr_idx + slots > hbuf_depth) {
259*4882a593Smuzhiyun buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
260*4882a593Smuzhiyun rem = slots - (hbuf_depth - wr_idx);
261*4882a593Smuzhiyun wr_idx = 0;
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun rem = slots;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun mei_dma_copy_to(dev, buf, wr_idx, rem);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
269*4882a593Smuzhiyun }
270