xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/wil6210/pmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/seq_file.h>
11*4882a593Smuzhiyun #include "wmi.h"
12*4882a593Smuzhiyun #include "wil6210.h"
13*4882a593Smuzhiyun #include "txrx.h"
14*4882a593Smuzhiyun #include "pmc.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct desc_alloc_info {
17*4882a593Smuzhiyun 	dma_addr_t pa;
18*4882a593Smuzhiyun 	void	  *va;
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun 
wil_is_pmc_allocated(struct pmc_ctx * pmc)21*4882a593Smuzhiyun static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	return !!pmc->pring_va;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
wil_pmc_init(struct wil6210_priv * wil)26*4882a593Smuzhiyun void wil_pmc_init(struct wil6210_priv *wil)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
29*4882a593Smuzhiyun 	mutex_init(&wil->pmc.lock);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* Allocate the physical ring (p-ring) and the required
33*4882a593Smuzhiyun  * number of descriptors of required size.
34*4882a593Smuzhiyun  * Initialize the descriptors as required by pmc dma.
35*4882a593Smuzhiyun  * The descriptors' buffers dwords are initialized to hold
36*4882a593Smuzhiyun  * dword's serial number in the lsw and reserved value
37*4882a593Smuzhiyun  * PCM_DATA_INVALID_DW_VAL in the msw.
38*4882a593Smuzhiyun  */
wil_pmc_alloc(struct wil6210_priv * wil,int num_descriptors,int descriptor_size)39*4882a593Smuzhiyun void wil_pmc_alloc(struct wil6210_priv *wil,
40*4882a593Smuzhiyun 		   int num_descriptors,
41*4882a593Smuzhiyun 		   int descriptor_size)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	u32 i;
44*4882a593Smuzhiyun 	struct pmc_ctx *pmc = &wil->pmc;
45*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
46*4882a593Smuzhiyun 	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
47*4882a593Smuzhiyun 	struct wmi_pmc_cmd pmc_cmd = {0};
48*4882a593Smuzhiyun 	int last_cmd_err = -ENOMEM;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	mutex_lock(&pmc->lock);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (wil_is_pmc_allocated(pmc)) {
53*4882a593Smuzhiyun 		/* sanity check */
54*4882a593Smuzhiyun 		wil_err(wil, "ERROR pmc is already allocated\n");
55*4882a593Smuzhiyun 		goto no_release_err;
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 	if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
58*4882a593Smuzhiyun 		wil_err(wil,
59*4882a593Smuzhiyun 			"Invalid params num_descriptors(%d), descriptor_size(%d)\n",
60*4882a593Smuzhiyun 			num_descriptors, descriptor_size);
61*4882a593Smuzhiyun 		last_cmd_err = -EINVAL;
62*4882a593Smuzhiyun 		goto no_release_err;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
66*4882a593Smuzhiyun 		wil_err(wil,
67*4882a593Smuzhiyun 			"num_descriptors(%d) exceeds max ring size %d\n",
68*4882a593Smuzhiyun 			num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
69*4882a593Smuzhiyun 		last_cmd_err = -EINVAL;
70*4882a593Smuzhiyun 		goto no_release_err;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (num_descriptors > INT_MAX / descriptor_size) {
74*4882a593Smuzhiyun 		wil_err(wil,
75*4882a593Smuzhiyun 			"Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
76*4882a593Smuzhiyun 			num_descriptors, descriptor_size);
77*4882a593Smuzhiyun 		last_cmd_err = -EINVAL;
78*4882a593Smuzhiyun 		goto no_release_err;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	pmc->num_descriptors = num_descriptors;
82*4882a593Smuzhiyun 	pmc->descriptor_size = descriptor_size;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
85*4882a593Smuzhiyun 		     num_descriptors, descriptor_size);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* allocate descriptors info list in pmc context*/
88*4882a593Smuzhiyun 	pmc->descriptors = kcalloc(num_descriptors,
89*4882a593Smuzhiyun 				  sizeof(struct desc_alloc_info),
90*4882a593Smuzhiyun 				  GFP_KERNEL);
91*4882a593Smuzhiyun 	if (!pmc->descriptors) {
92*4882a593Smuzhiyun 		wil_err(wil, "ERROR allocating pmc skb list\n");
93*4882a593Smuzhiyun 		goto no_release_err;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
97*4882a593Smuzhiyun 		     pmc->descriptors);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* Allocate pring buffer and descriptors.
100*4882a593Smuzhiyun 	 * vring->va should be aligned on its size rounded up to power of 2
101*4882a593Smuzhiyun 	 * This is granted by the dma_alloc_coherent.
102*4882a593Smuzhiyun 	 *
103*4882a593Smuzhiyun 	 * HW has limitation that all vrings addresses must share the same
104*4882a593Smuzhiyun 	 * upper 16 msb bits part of 48 bits address. To workaround that,
105*4882a593Smuzhiyun 	 * if we are using more than 32 bit addresses switch to 32 bit
106*4882a593Smuzhiyun 	 * allocation before allocating vring memory.
107*4882a593Smuzhiyun 	 *
108*4882a593Smuzhiyun 	 * There's no check for the return value of dma_set_mask_and_coherent,
109*4882a593Smuzhiyun 	 * since we assume if we were able to set the mask during
110*4882a593Smuzhiyun 	 * initialization in this system it will not fail if we set it again
111*4882a593Smuzhiyun 	 */
112*4882a593Smuzhiyun 	if (wil->dma_addr_size > 32)
113*4882a593Smuzhiyun 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	pmc->pring_va = dma_alloc_coherent(dev,
116*4882a593Smuzhiyun 			sizeof(struct vring_tx_desc) * num_descriptors,
117*4882a593Smuzhiyun 			&pmc->pring_pa,
118*4882a593Smuzhiyun 			GFP_KERNEL);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (wil->dma_addr_size > 32)
121*4882a593Smuzhiyun 		dma_set_mask_and_coherent(dev,
122*4882a593Smuzhiyun 					  DMA_BIT_MASK(wil->dma_addr_size));
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	wil_dbg_misc(wil,
125*4882a593Smuzhiyun 		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
126*4882a593Smuzhiyun 		     pmc->pring_va, &pmc->pring_pa,
127*4882a593Smuzhiyun 		     sizeof(struct vring_tx_desc),
128*4882a593Smuzhiyun 		     num_descriptors,
129*4882a593Smuzhiyun 		     sizeof(struct vring_tx_desc) * num_descriptors);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (!pmc->pring_va) {
132*4882a593Smuzhiyun 		wil_err(wil, "ERROR allocating pmc pring\n");
133*4882a593Smuzhiyun 		goto release_pmc_skb_list;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* initially, all descriptors are SW owned
137*4882a593Smuzhiyun 	 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
138*4882a593Smuzhiyun 	 * we can use any
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	for (i = 0; i < num_descriptors; i++) {
141*4882a593Smuzhiyun 		struct vring_tx_desc *_d = &pmc->pring_va[i];
142*4882a593Smuzhiyun 		struct vring_tx_desc dd = {}, *d = &dd;
143*4882a593Smuzhiyun 		int j = 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		pmc->descriptors[i].va = dma_alloc_coherent(dev,
146*4882a593Smuzhiyun 			descriptor_size,
147*4882a593Smuzhiyun 			&pmc->descriptors[i].pa,
148*4882a593Smuzhiyun 			GFP_KERNEL);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		if (unlikely(!pmc->descriptors[i].va)) {
151*4882a593Smuzhiyun 			wil_err(wil, "ERROR allocating pmc descriptor %d", i);
152*4882a593Smuzhiyun 			goto release_pmc_skbs;
153*4882a593Smuzhiyun 		}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		for (j = 0; j < descriptor_size / sizeof(u32); j++) {
156*4882a593Smuzhiyun 			u32 *p = (u32 *)pmc->descriptors[i].va + j;
157*4882a593Smuzhiyun 			*p = PCM_DATA_INVALID_DW_VAL | j;
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		/* configure dma descriptor */
161*4882a593Smuzhiyun 		d->dma.addr.addr_low =
162*4882a593Smuzhiyun 			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
163*4882a593Smuzhiyun 		d->dma.addr.addr_high =
164*4882a593Smuzhiyun 			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
165*4882a593Smuzhiyun 		d->dma.status = 0; /* 0 = HW_OWNED */
166*4882a593Smuzhiyun 		d->dma.length = cpu_to_le16(descriptor_size);
167*4882a593Smuzhiyun 		d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
168*4882a593Smuzhiyun 		*_d = *d;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	pmc_cmd.op = WMI_PMC_ALLOCATE;
174*4882a593Smuzhiyun 	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
175*4882a593Smuzhiyun 	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
178*4882a593Smuzhiyun 	pmc->last_cmd_status = wmi_send(wil,
179*4882a593Smuzhiyun 					WMI_PMC_CMDID,
180*4882a593Smuzhiyun 					vif->mid,
181*4882a593Smuzhiyun 					&pmc_cmd,
182*4882a593Smuzhiyun 					sizeof(pmc_cmd));
183*4882a593Smuzhiyun 	if (pmc->last_cmd_status) {
184*4882a593Smuzhiyun 		wil_err(wil,
185*4882a593Smuzhiyun 			"WMI_PMC_CMD with ALLOCATE op failed with status %d",
186*4882a593Smuzhiyun 			pmc->last_cmd_status);
187*4882a593Smuzhiyun 		goto release_pmc_skbs;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun release_pmc_skbs:
195*4882a593Smuzhiyun 	wil_err(wil, "exit on error: Releasing skbs...\n");
196*4882a593Smuzhiyun 	for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
197*4882a593Smuzhiyun 		dma_free_coherent(dev,
198*4882a593Smuzhiyun 				  descriptor_size,
199*4882a593Smuzhiyun 				  pmc->descriptors[i].va,
200*4882a593Smuzhiyun 				  pmc->descriptors[i].pa);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		pmc->descriptors[i].va = NULL;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 	wil_err(wil, "exit on error: Releasing pring...\n");
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	dma_free_coherent(dev,
207*4882a593Smuzhiyun 			  sizeof(struct vring_tx_desc) * num_descriptors,
208*4882a593Smuzhiyun 			  pmc->pring_va,
209*4882a593Smuzhiyun 			  pmc->pring_pa);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	pmc->pring_va = NULL;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun release_pmc_skb_list:
214*4882a593Smuzhiyun 	wil_err(wil, "exit on error: Releasing descriptors info list...\n");
215*4882a593Smuzhiyun 	kfree(pmc->descriptors);
216*4882a593Smuzhiyun 	pmc->descriptors = NULL;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun no_release_err:
219*4882a593Smuzhiyun 	pmc->last_cmd_status = last_cmd_err;
220*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /* Traverse the p-ring and release all buffers.
224*4882a593Smuzhiyun  * At the end release the p-ring memory
225*4882a593Smuzhiyun  */
wil_pmc_free(struct wil6210_priv * wil,int send_pmc_cmd)226*4882a593Smuzhiyun void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct pmc_ctx *pmc = &wil->pmc;
229*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
230*4882a593Smuzhiyun 	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
231*4882a593Smuzhiyun 	struct wmi_pmc_cmd pmc_cmd = {0};
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	mutex_lock(&pmc->lock);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	pmc->last_cmd_status = 0;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (!wil_is_pmc_allocated(pmc)) {
238*4882a593Smuzhiyun 		wil_dbg_misc(wil,
239*4882a593Smuzhiyun 			     "pmc_free: Error, can't free - not allocated\n");
240*4882a593Smuzhiyun 		pmc->last_cmd_status = -EPERM;
241*4882a593Smuzhiyun 		mutex_unlock(&pmc->lock);
242*4882a593Smuzhiyun 		return;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (send_pmc_cmd) {
246*4882a593Smuzhiyun 		wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
247*4882a593Smuzhiyun 		pmc_cmd.op = WMI_PMC_RELEASE;
248*4882a593Smuzhiyun 		pmc->last_cmd_status =
249*4882a593Smuzhiyun 				wmi_send(wil, WMI_PMC_CMDID, vif->mid,
250*4882a593Smuzhiyun 					 &pmc_cmd, sizeof(pmc_cmd));
251*4882a593Smuzhiyun 		if (pmc->last_cmd_status) {
252*4882a593Smuzhiyun 			wil_err(wil,
253*4882a593Smuzhiyun 				"WMI_PMC_CMD with RELEASE op failed, status %d",
254*4882a593Smuzhiyun 				pmc->last_cmd_status);
255*4882a593Smuzhiyun 			/* There's nothing we can do with this error.
256*4882a593Smuzhiyun 			 * Normally, it should never occur.
257*4882a593Smuzhiyun 			 * Continue to freeing all memory allocated for pmc.
258*4882a593Smuzhiyun 			 */
259*4882a593Smuzhiyun 		}
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (pmc->pring_va) {
263*4882a593Smuzhiyun 		size_t buf_size = sizeof(struct vring_tx_desc) *
264*4882a593Smuzhiyun 				  pmc->num_descriptors;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
267*4882a593Smuzhiyun 			     pmc->pring_va);
268*4882a593Smuzhiyun 		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		pmc->pring_va = NULL;
271*4882a593Smuzhiyun 	} else {
272*4882a593Smuzhiyun 		pmc->last_cmd_status = -ENOENT;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (pmc->descriptors) {
276*4882a593Smuzhiyun 		int i;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		for (i = 0;
279*4882a593Smuzhiyun 		     i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
280*4882a593Smuzhiyun 			dma_free_coherent(dev,
281*4882a593Smuzhiyun 					  pmc->descriptor_size,
282*4882a593Smuzhiyun 					  pmc->descriptors[i].va,
283*4882a593Smuzhiyun 					  pmc->descriptors[i].pa);
284*4882a593Smuzhiyun 			pmc->descriptors[i].va = NULL;
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 		wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
287*4882a593Smuzhiyun 			     pmc->num_descriptors);
288*4882a593Smuzhiyun 		wil_dbg_misc(wil,
289*4882a593Smuzhiyun 			     "pmc_free: free pmc descriptors info list %p\n",
290*4882a593Smuzhiyun 			     pmc->descriptors);
291*4882a593Smuzhiyun 		kfree(pmc->descriptors);
292*4882a593Smuzhiyun 		pmc->descriptors = NULL;
293*4882a593Smuzhiyun 	} else {
294*4882a593Smuzhiyun 		pmc->last_cmd_status = -ENOENT;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /* Status of the last operation requested via debugfs: alloc/free/read.
301*4882a593Smuzhiyun  * 0 - success or negative errno
302*4882a593Smuzhiyun  */
wil_pmc_last_cmd_status(struct wil6210_priv * wil)303*4882a593Smuzhiyun int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
306*4882a593Smuzhiyun 		     wil->pmc.last_cmd_status);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return wil->pmc.last_cmd_status;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /* Read from required position up to the end of current descriptor,
312*4882a593Smuzhiyun  * depends on descriptor size configured during alloc request.
313*4882a593Smuzhiyun  */
wil_pmc_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)314*4882a593Smuzhiyun ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
315*4882a593Smuzhiyun 		     loff_t *f_pos)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct wil6210_priv *wil = filp->private_data;
318*4882a593Smuzhiyun 	struct pmc_ctx *pmc = &wil->pmc;
319*4882a593Smuzhiyun 	size_t retval = 0;
320*4882a593Smuzhiyun 	unsigned long long idx;
321*4882a593Smuzhiyun 	loff_t offset;
322*4882a593Smuzhiyun 	size_t pmc_size;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	mutex_lock(&pmc->lock);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!wil_is_pmc_allocated(pmc)) {
327*4882a593Smuzhiyun 		wil_err(wil, "error, pmc is not allocated!\n");
328*4882a593Smuzhiyun 		pmc->last_cmd_status = -EPERM;
329*4882a593Smuzhiyun 		mutex_unlock(&pmc->lock);
330*4882a593Smuzhiyun 		return -EPERM;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	wil_dbg_misc(wil,
336*4882a593Smuzhiyun 		     "pmc_read: size %u, pos %lld\n",
337*4882a593Smuzhiyun 		     (u32)count, *f_pos);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	pmc->last_cmd_status = 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	idx = *f_pos;
342*4882a593Smuzhiyun 	do_div(idx, pmc->descriptor_size);
343*4882a593Smuzhiyun 	offset = *f_pos - (idx * pmc->descriptor_size);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (*f_pos >= pmc_size) {
346*4882a593Smuzhiyun 		wil_dbg_misc(wil,
347*4882a593Smuzhiyun 			     "pmc_read: reached end of pmc buf: %lld >= %u\n",
348*4882a593Smuzhiyun 			     *f_pos, (u32)pmc_size);
349*4882a593Smuzhiyun 		pmc->last_cmd_status = -ERANGE;
350*4882a593Smuzhiyun 		goto out;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	wil_dbg_misc(wil,
354*4882a593Smuzhiyun 		     "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
355*4882a593Smuzhiyun 		     *f_pos, idx, offset, count);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* if no errors, return the copied byte count */
358*4882a593Smuzhiyun 	retval = simple_read_from_buffer(buf,
359*4882a593Smuzhiyun 					 count,
360*4882a593Smuzhiyun 					 &offset,
361*4882a593Smuzhiyun 					 pmc->descriptors[idx].va,
362*4882a593Smuzhiyun 					 pmc->descriptor_size);
363*4882a593Smuzhiyun 	*f_pos += retval;
364*4882a593Smuzhiyun out:
365*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	return retval;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
wil_pmc_llseek(struct file * filp,loff_t off,int whence)370*4882a593Smuzhiyun loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	loff_t newpos;
373*4882a593Smuzhiyun 	struct wil6210_priv *wil = filp->private_data;
374*4882a593Smuzhiyun 	struct pmc_ctx *pmc = &wil->pmc;
375*4882a593Smuzhiyun 	size_t pmc_size;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	mutex_lock(&pmc->lock);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (!wil_is_pmc_allocated(pmc)) {
380*4882a593Smuzhiyun 		wil_err(wil, "error, pmc is not allocated!\n");
381*4882a593Smuzhiyun 		pmc->last_cmd_status = -EPERM;
382*4882a593Smuzhiyun 		mutex_unlock(&pmc->lock);
383*4882a593Smuzhiyun 		return -EPERM;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	switch (whence) {
389*4882a593Smuzhiyun 	case 0: /* SEEK_SET */
390*4882a593Smuzhiyun 		newpos = off;
391*4882a593Smuzhiyun 		break;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	case 1: /* SEEK_CUR */
394*4882a593Smuzhiyun 		newpos = filp->f_pos + off;
395*4882a593Smuzhiyun 		break;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	case 2: /* SEEK_END */
398*4882a593Smuzhiyun 		newpos = pmc_size;
399*4882a593Smuzhiyun 		break;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	default: /* can't happen */
402*4882a593Smuzhiyun 		newpos = -EINVAL;
403*4882a593Smuzhiyun 		goto out;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (newpos < 0) {
407*4882a593Smuzhiyun 		newpos = -EINVAL;
408*4882a593Smuzhiyun 		goto out;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	if (newpos > pmc_size)
411*4882a593Smuzhiyun 		newpos = pmc_size;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	filp->f_pos = newpos;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun out:
416*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return newpos;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
wil_pmcring_read(struct seq_file * s,void * data)421*4882a593Smuzhiyun int wil_pmcring_read(struct seq_file *s, void *data)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	struct wil6210_priv *wil = s->private;
424*4882a593Smuzhiyun 	struct pmc_ctx *pmc = &wil->pmc;
425*4882a593Smuzhiyun 	size_t pmc_ring_size =
426*4882a593Smuzhiyun 		sizeof(struct vring_rx_desc) * pmc->num_descriptors;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	mutex_lock(&pmc->lock);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (!wil_is_pmc_allocated(pmc)) {
431*4882a593Smuzhiyun 		wil_err(wil, "error, pmc is not allocated!\n");
432*4882a593Smuzhiyun 		pmc->last_cmd_status = -EPERM;
433*4882a593Smuzhiyun 		mutex_unlock(&pmc->lock);
434*4882a593Smuzhiyun 		return -EPERM;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	seq_write(s, pmc->pring_va, pmc_ring_size);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	mutex_unlock(&pmc->lock);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	return 0;
444*4882a593Smuzhiyun }
445