xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/ath11k/ce.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: BSD-3-Clause-Clear
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include "dp_rx.h"
7*4882a593Smuzhiyun #include "debug.h"
8*4882a593Smuzhiyun #include "hif.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
11*4882a593Smuzhiyun 	/* CE0: host->target HTC control and raw streams */
12*4882a593Smuzhiyun 	{
13*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
14*4882a593Smuzhiyun 		.src_nentries = 16,
15*4882a593Smuzhiyun 		.src_sz_max = 2048,
16*4882a593Smuzhiyun 		.dest_nentries = 0,
17*4882a593Smuzhiyun 	},
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	/* CE1: target->host HTT + HTC control */
20*4882a593Smuzhiyun 	{
21*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
22*4882a593Smuzhiyun 		.src_nentries = 0,
23*4882a593Smuzhiyun 		.src_sz_max = 2048,
24*4882a593Smuzhiyun 		.dest_nentries = 512,
25*4882a593Smuzhiyun 		.recv_cb = ath11k_htc_rx_completion_handler,
26*4882a593Smuzhiyun 	},
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* CE2: target->host WMI */
29*4882a593Smuzhiyun 	{
30*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
31*4882a593Smuzhiyun 		.src_nentries = 0,
32*4882a593Smuzhiyun 		.src_sz_max = 2048,
33*4882a593Smuzhiyun 		.dest_nentries = 512,
34*4882a593Smuzhiyun 		.recv_cb = ath11k_htc_rx_completion_handler,
35*4882a593Smuzhiyun 	},
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* CE3: host->target WMI (mac0) */
38*4882a593Smuzhiyun 	{
39*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
40*4882a593Smuzhiyun 		.src_nentries = 32,
41*4882a593Smuzhiyun 		.src_sz_max = 2048,
42*4882a593Smuzhiyun 		.dest_nentries = 0,
43*4882a593Smuzhiyun 	},
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* CE4: host->target HTT */
46*4882a593Smuzhiyun 	{
47*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
48*4882a593Smuzhiyun 		.src_nentries = 2048,
49*4882a593Smuzhiyun 		.src_sz_max = 256,
50*4882a593Smuzhiyun 		.dest_nentries = 0,
51*4882a593Smuzhiyun 	},
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	/* CE5: target->host pktlog */
54*4882a593Smuzhiyun 	{
55*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
56*4882a593Smuzhiyun 		.src_nentries = 0,
57*4882a593Smuzhiyun 		.src_sz_max = 2048,
58*4882a593Smuzhiyun 		.dest_nentries = 512,
59*4882a593Smuzhiyun 		.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
60*4882a593Smuzhiyun 	},
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* CE6: target autonomous hif_memcpy */
63*4882a593Smuzhiyun 	{
64*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
65*4882a593Smuzhiyun 		.src_nentries = 0,
66*4882a593Smuzhiyun 		.src_sz_max = 0,
67*4882a593Smuzhiyun 		.dest_nentries = 0,
68*4882a593Smuzhiyun 	},
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* CE7: host->target WMI (mac1) */
71*4882a593Smuzhiyun 	{
72*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
73*4882a593Smuzhiyun 		.src_nentries = 32,
74*4882a593Smuzhiyun 		.src_sz_max = 2048,
75*4882a593Smuzhiyun 		.dest_nentries = 0,
76*4882a593Smuzhiyun 	},
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* CE8: target autonomous hif_memcpy */
79*4882a593Smuzhiyun 	{
80*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
81*4882a593Smuzhiyun 		.src_nentries = 0,
82*4882a593Smuzhiyun 		.src_sz_max = 0,
83*4882a593Smuzhiyun 		.dest_nentries = 0,
84*4882a593Smuzhiyun 	},
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/* CE9: host->target WMI (mac2) */
87*4882a593Smuzhiyun 	{
88*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
89*4882a593Smuzhiyun 		.src_nentries = 32,
90*4882a593Smuzhiyun 		.src_sz_max = 2048,
91*4882a593Smuzhiyun 		.dest_nentries = 0,
92*4882a593Smuzhiyun 	},
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* CE10: target->host HTT */
95*4882a593Smuzhiyun 	{
96*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
97*4882a593Smuzhiyun 		.src_nentries = 0,
98*4882a593Smuzhiyun 		.src_sz_max = 2048,
99*4882a593Smuzhiyun 		.dest_nentries = 512,
100*4882a593Smuzhiyun 		.recv_cb = ath11k_htc_rx_completion_handler,
101*4882a593Smuzhiyun 	},
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* CE11: Not used */
104*4882a593Smuzhiyun 	{
105*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
106*4882a593Smuzhiyun 		.src_nentries = 0,
107*4882a593Smuzhiyun 		.src_sz_max = 0,
108*4882a593Smuzhiyun 		.dest_nentries = 0,
109*4882a593Smuzhiyun 	},
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun const struct ce_attr ath11k_host_ce_config_qca6390[] = {
113*4882a593Smuzhiyun 	/* CE0: host->target HTC control and raw streams */
114*4882a593Smuzhiyun 	{
115*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
116*4882a593Smuzhiyun 		.src_nentries = 16,
117*4882a593Smuzhiyun 		.src_sz_max = 2048,
118*4882a593Smuzhiyun 		.dest_nentries = 0,
119*4882a593Smuzhiyun 	},
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* CE1: target->host HTT + HTC control */
122*4882a593Smuzhiyun 	{
123*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
124*4882a593Smuzhiyun 		.src_nentries = 0,
125*4882a593Smuzhiyun 		.src_sz_max = 2048,
126*4882a593Smuzhiyun 		.dest_nentries = 512,
127*4882a593Smuzhiyun 		.recv_cb = ath11k_htc_rx_completion_handler,
128*4882a593Smuzhiyun 	},
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* CE2: target->host WMI */
131*4882a593Smuzhiyun 	{
132*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
133*4882a593Smuzhiyun 		.src_nentries = 0,
134*4882a593Smuzhiyun 		.src_sz_max = 2048,
135*4882a593Smuzhiyun 		.dest_nentries = 512,
136*4882a593Smuzhiyun 		.recv_cb = ath11k_htc_rx_completion_handler,
137*4882a593Smuzhiyun 	},
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* CE3: host->target WMI (mac0) */
140*4882a593Smuzhiyun 	{
141*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
142*4882a593Smuzhiyun 		.src_nentries = 32,
143*4882a593Smuzhiyun 		.src_sz_max = 2048,
144*4882a593Smuzhiyun 		.dest_nentries = 0,
145*4882a593Smuzhiyun 	},
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* CE4: host->target HTT */
148*4882a593Smuzhiyun 	{
149*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
150*4882a593Smuzhiyun 		.src_nentries = 2048,
151*4882a593Smuzhiyun 		.src_sz_max = 256,
152*4882a593Smuzhiyun 		.dest_nentries = 0,
153*4882a593Smuzhiyun 	},
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* CE5: target->host pktlog */
156*4882a593Smuzhiyun 	{
157*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
158*4882a593Smuzhiyun 		.src_nentries = 0,
159*4882a593Smuzhiyun 		.src_sz_max = 2048,
160*4882a593Smuzhiyun 		.dest_nentries = 512,
161*4882a593Smuzhiyun 		.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
162*4882a593Smuzhiyun 	},
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* CE6: target autonomous hif_memcpy */
165*4882a593Smuzhiyun 	{
166*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
167*4882a593Smuzhiyun 		.src_nentries = 0,
168*4882a593Smuzhiyun 		.src_sz_max = 0,
169*4882a593Smuzhiyun 		.dest_nentries = 0,
170*4882a593Smuzhiyun 	},
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* CE7: host->target WMI (mac1) */
173*4882a593Smuzhiyun 	{
174*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
175*4882a593Smuzhiyun 		.src_nentries = 32,
176*4882a593Smuzhiyun 		.src_sz_max = 2048,
177*4882a593Smuzhiyun 		.dest_nentries = 0,
178*4882a593Smuzhiyun 	},
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* CE8: target autonomous hif_memcpy */
181*4882a593Smuzhiyun 	{
182*4882a593Smuzhiyun 		.flags = CE_ATTR_FLAGS,
183*4882a593Smuzhiyun 		.src_nentries = 0,
184*4882a593Smuzhiyun 		.src_sz_max = 0,
185*4882a593Smuzhiyun 		.dest_nentries = 0,
186*4882a593Smuzhiyun 	},
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
ath11k_ce_need_shadow_fix(int ce_id)190*4882a593Smuzhiyun static bool ath11k_ce_need_shadow_fix(int ce_id)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	/* only ce4 needs shadow workaroud*/
193*4882a593Smuzhiyun 	if (ce_id == 4)
194*4882a593Smuzhiyun 		return true;
195*4882a593Smuzhiyun 	return false;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
ath11k_ce_stop_shadow_timers(struct ath11k_base * ab)198*4882a593Smuzhiyun static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	int i;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (!ab->hw_params.supports_shadow_regs)
203*4882a593Smuzhiyun 		return;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++)
206*4882a593Smuzhiyun 		if (ath11k_ce_need_shadow_fix(i))
207*4882a593Smuzhiyun 			ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe * pipe,struct sk_buff * skb,dma_addr_t paddr)210*4882a593Smuzhiyun static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
211*4882a593Smuzhiyun 					 struct sk_buff *skb, dma_addr_t paddr)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
214*4882a593Smuzhiyun 	struct ath11k_ce_ring *ring = pipe->dest_ring;
215*4882a593Smuzhiyun 	struct hal_srng *srng;
216*4882a593Smuzhiyun 	unsigned int write_index;
217*4882a593Smuzhiyun 	unsigned int nentries_mask = ring->nentries_mask;
218*4882a593Smuzhiyun 	u32 *desc;
219*4882a593Smuzhiyun 	int ret;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	lockdep_assert_held(&ab->ce.ce_lock);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	write_index = ring->write_index;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	srng = &ab->hal.srng_list[ring->hal_ring_id];
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	spin_lock_bh(&srng->lock);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	ath11k_hal_srng_access_begin(ab, srng);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
232*4882a593Smuzhiyun 		ret = -ENOSPC;
233*4882a593Smuzhiyun 		goto exit;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
237*4882a593Smuzhiyun 	if (!desc) {
238*4882a593Smuzhiyun 		ret = -ENOSPC;
239*4882a593Smuzhiyun 		goto exit;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	ath11k_hal_ce_dst_set_desc(desc, paddr);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	ring->skb[write_index] = skb;
245*4882a593Smuzhiyun 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
246*4882a593Smuzhiyun 	ring->write_index = write_index;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	pipe->rx_buf_needed--;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	ret = 0;
251*4882a593Smuzhiyun exit:
252*4882a593Smuzhiyun 	ath11k_hal_srng_access_end(ab, srng);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	spin_unlock_bh(&srng->lock);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return ret;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe * pipe)259*4882a593Smuzhiyun static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
262*4882a593Smuzhiyun 	struct sk_buff *skb;
263*4882a593Smuzhiyun 	dma_addr_t paddr;
264*4882a593Smuzhiyun 	int ret = 0;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (!(pipe->dest_ring || pipe->status_ring))
267*4882a593Smuzhiyun 		return 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	spin_lock_bh(&ab->ce.ce_lock);
270*4882a593Smuzhiyun 	while (pipe->rx_buf_needed) {
271*4882a593Smuzhiyun 		skb = dev_alloc_skb(pipe->buf_sz);
272*4882a593Smuzhiyun 		if (!skb) {
273*4882a593Smuzhiyun 			ret = -ENOMEM;
274*4882a593Smuzhiyun 			goto exit;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		paddr = dma_map_single(ab->dev, skb->data,
280*4882a593Smuzhiyun 				       skb->len + skb_tailroom(skb),
281*4882a593Smuzhiyun 				       DMA_FROM_DEVICE);
282*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(ab->dev, paddr))) {
283*4882a593Smuzhiyun 			ath11k_warn(ab, "failed to dma map ce rx buf\n");
284*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
285*4882a593Smuzhiyun 			ret = -EIO;
286*4882a593Smuzhiyun 			goto exit;
287*4882a593Smuzhiyun 		}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		if (ret) {
294*4882a593Smuzhiyun 			ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
295*4882a593Smuzhiyun 			dma_unmap_single(ab->dev, paddr,
296*4882a593Smuzhiyun 					 skb->len + skb_tailroom(skb),
297*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
298*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
299*4882a593Smuzhiyun 			goto exit;
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun exit:
304*4882a593Smuzhiyun 	spin_unlock_bh(&ab->ce.ce_lock);
305*4882a593Smuzhiyun 	return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
ath11k_ce_completed_recv_next(struct ath11k_ce_pipe * pipe,struct sk_buff ** skb,int * nbytes)308*4882a593Smuzhiyun static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
309*4882a593Smuzhiyun 					 struct sk_buff **skb, int *nbytes)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
312*4882a593Smuzhiyun 	struct hal_srng *srng;
313*4882a593Smuzhiyun 	unsigned int sw_index;
314*4882a593Smuzhiyun 	unsigned int nentries_mask;
315*4882a593Smuzhiyun 	u32 *desc;
316*4882a593Smuzhiyun 	int ret = 0;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	spin_lock_bh(&ab->ce.ce_lock);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	sw_index = pipe->dest_ring->sw_index;
321*4882a593Smuzhiyun 	nentries_mask = pipe->dest_ring->nentries_mask;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	spin_lock_bh(&srng->lock);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	ath11k_hal_srng_access_begin(ab, srng);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
330*4882a593Smuzhiyun 	if (!desc) {
331*4882a593Smuzhiyun 		ret = -EIO;
332*4882a593Smuzhiyun 		goto err;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
336*4882a593Smuzhiyun 	if (*nbytes == 0) {
337*4882a593Smuzhiyun 		ret = -EIO;
338*4882a593Smuzhiyun 		goto err;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	*skb = pipe->dest_ring->skb[sw_index];
342*4882a593Smuzhiyun 	pipe->dest_ring->skb[sw_index] = NULL;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
345*4882a593Smuzhiyun 	pipe->dest_ring->sw_index = sw_index;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	pipe->rx_buf_needed++;
348*4882a593Smuzhiyun err:
349*4882a593Smuzhiyun 	ath11k_hal_srng_access_end(ab, srng);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	spin_unlock_bh(&srng->lock);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	spin_unlock_bh(&ab->ce.ce_lock);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return ret;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
ath11k_ce_recv_process_cb(struct ath11k_ce_pipe * pipe)358*4882a593Smuzhiyun static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
361*4882a593Smuzhiyun 	struct sk_buff *skb;
362*4882a593Smuzhiyun 	struct sk_buff_head list;
363*4882a593Smuzhiyun 	unsigned int nbytes, max_nbytes;
364*4882a593Smuzhiyun 	int ret;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	__skb_queue_head_init(&list);
367*4882a593Smuzhiyun 	while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
368*4882a593Smuzhiyun 		max_nbytes = skb->len + skb_tailroom(skb);
369*4882a593Smuzhiyun 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
370*4882a593Smuzhiyun 				 max_nbytes, DMA_FROM_DEVICE);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		if (unlikely(max_nbytes < nbytes)) {
373*4882a593Smuzhiyun 			ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
374*4882a593Smuzhiyun 				    nbytes, max_nbytes);
375*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
376*4882a593Smuzhiyun 			continue;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		skb_put(skb, nbytes);
380*4882a593Smuzhiyun 		__skb_queue_tail(&list, skb);
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	while ((skb = __skb_dequeue(&list))) {
384*4882a593Smuzhiyun 		ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
385*4882a593Smuzhiyun 			   pipe->pipe_num, skb->len);
386*4882a593Smuzhiyun 		pipe->recv_cb(ab, skb);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	ret = ath11k_ce_rx_post_pipe(pipe);
390*4882a593Smuzhiyun 	if (ret && ret != -ENOSPC) {
391*4882a593Smuzhiyun 		ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
392*4882a593Smuzhiyun 			    pipe->pipe_num, ret);
393*4882a593Smuzhiyun 		mod_timer(&ab->rx_replenish_retry,
394*4882a593Smuzhiyun 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
ath11k_ce_completed_send_next(struct ath11k_ce_pipe * pipe)398*4882a593Smuzhiyun static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
401*4882a593Smuzhiyun 	struct hal_srng *srng;
402*4882a593Smuzhiyun 	unsigned int sw_index;
403*4882a593Smuzhiyun 	unsigned int nentries_mask;
404*4882a593Smuzhiyun 	struct sk_buff *skb;
405*4882a593Smuzhiyun 	u32 *desc;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	spin_lock_bh(&ab->ce.ce_lock);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	sw_index = pipe->src_ring->sw_index;
410*4882a593Smuzhiyun 	nentries_mask = pipe->src_ring->nentries_mask;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	spin_lock_bh(&srng->lock);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ath11k_hal_srng_access_begin(ab, srng);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	desc = ath11k_hal_srng_src_reap_next(ab, srng);
419*4882a593Smuzhiyun 	if (!desc) {
420*4882a593Smuzhiyun 		skb = ERR_PTR(-EIO);
421*4882a593Smuzhiyun 		goto err_unlock;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	skb = pipe->src_ring->skb[sw_index];
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	pipe->src_ring->skb[sw_index] = NULL;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
429*4882a593Smuzhiyun 	pipe->src_ring->sw_index = sw_index;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun err_unlock:
432*4882a593Smuzhiyun 	spin_unlock_bh(&srng->lock);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	spin_unlock_bh(&ab->ce.ce_lock);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return skb;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
ath11k_ce_send_done_cb(struct ath11k_ce_pipe * pipe)439*4882a593Smuzhiyun static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
442*4882a593Smuzhiyun 	struct sk_buff *skb;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
445*4882a593Smuzhiyun 		if (!skb)
446*4882a593Smuzhiyun 			continue;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 		dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
449*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
450*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base * ab,u32 ce_id,struct hal_srng_params * ring_params)454*4882a593Smuzhiyun static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
455*4882a593Smuzhiyun 						 struct hal_srng_params *ring_params)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	u32 msi_data_start;
458*4882a593Smuzhiyun 	u32 msi_data_count;
459*4882a593Smuzhiyun 	u32 msi_irq_start;
460*4882a593Smuzhiyun 	u32 addr_lo;
461*4882a593Smuzhiyun 	u32 addr_hi;
462*4882a593Smuzhiyun 	int ret;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	ret = ath11k_get_user_msi_vector(ab, "CE",
465*4882a593Smuzhiyun 					 &msi_data_count, &msi_data_start,
466*4882a593Smuzhiyun 					 &msi_irq_start);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (ret)
469*4882a593Smuzhiyun 		return;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	ring_params->msi_addr = addr_lo;
474*4882a593Smuzhiyun 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
475*4882a593Smuzhiyun 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
476*4882a593Smuzhiyun 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
ath11k_ce_init_ring(struct ath11k_base * ab,struct ath11k_ce_ring * ce_ring,int ce_id,enum hal_ring_type type)479*4882a593Smuzhiyun static int ath11k_ce_init_ring(struct ath11k_base *ab,
480*4882a593Smuzhiyun 			       struct ath11k_ce_ring *ce_ring,
481*4882a593Smuzhiyun 			       int ce_id, enum hal_ring_type type)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct hal_srng_params params = { 0 };
484*4882a593Smuzhiyun 	int ret;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	params.ring_base_paddr = ce_ring->base_addr_ce_space;
487*4882a593Smuzhiyun 	params.ring_base_vaddr = ce_ring->base_addr_owner_space;
488*4882a593Smuzhiyun 	params.num_entries = ce_ring->nentries;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
491*4882a593Smuzhiyun 		ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	switch (type) {
494*4882a593Smuzhiyun 	case HAL_CE_SRC:
495*4882a593Smuzhiyun 		if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
496*4882a593Smuzhiyun 			params.intr_batch_cntr_thres_entries = 1;
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 	case HAL_CE_DST:
499*4882a593Smuzhiyun 		params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
500*4882a593Smuzhiyun 		if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
501*4882a593Smuzhiyun 			params.intr_timer_thres_us = 1024;
502*4882a593Smuzhiyun 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
503*4882a593Smuzhiyun 			params.low_threshold = ce_ring->nentries - 3;
504*4882a593Smuzhiyun 		}
505*4882a593Smuzhiyun 		break;
506*4882a593Smuzhiyun 	case HAL_CE_DST_STATUS:
507*4882a593Smuzhiyun 		if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
508*4882a593Smuzhiyun 			params.intr_batch_cntr_thres_entries = 1;
509*4882a593Smuzhiyun 			params.intr_timer_thres_us = 0x1000;
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 		break;
512*4882a593Smuzhiyun 	default:
513*4882a593Smuzhiyun 		ath11k_warn(ab, "Invalid CE ring type %d\n", type);
514*4882a593Smuzhiyun 		return -EINVAL;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* TODO: Init other params needed by HAL to init the ring */
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
520*4882a593Smuzhiyun 	if (ret < 0) {
521*4882a593Smuzhiyun 		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
522*4882a593Smuzhiyun 			    ret, ce_id);
523*4882a593Smuzhiyun 		return ret;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	ce_ring->hal_ring_id = ret;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (ab->hw_params.supports_shadow_regs &&
529*4882a593Smuzhiyun 	    ath11k_ce_need_shadow_fix(ce_id))
530*4882a593Smuzhiyun 		ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
531*4882a593Smuzhiyun 					    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
532*4882a593Smuzhiyun 					    ce_ring->hal_ring_id);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun static struct ath11k_ce_ring *
ath11k_ce_alloc_ring(struct ath11k_base * ab,int nentries,int desc_sz)538*4882a593Smuzhiyun ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct ath11k_ce_ring *ce_ring;
541*4882a593Smuzhiyun 	dma_addr_t base_addr;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
544*4882a593Smuzhiyun 	if (ce_ring == NULL)
545*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	ce_ring->nentries = nentries;
548*4882a593Smuzhiyun 	ce_ring->nentries_mask = nentries - 1;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* Legacy platforms that do not support cache
551*4882a593Smuzhiyun 	 * coherent DMA are unsupported
552*4882a593Smuzhiyun 	 */
553*4882a593Smuzhiyun 	ce_ring->base_addr_owner_space_unaligned =
554*4882a593Smuzhiyun 		dma_alloc_coherent(ab->dev,
555*4882a593Smuzhiyun 				   nentries * desc_sz + CE_DESC_RING_ALIGN,
556*4882a593Smuzhiyun 				   &base_addr, GFP_KERNEL);
557*4882a593Smuzhiyun 	if (!ce_ring->base_addr_owner_space_unaligned) {
558*4882a593Smuzhiyun 		kfree(ce_ring);
559*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	ce_ring->base_addr_ce_space_unaligned = base_addr;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	ce_ring->base_addr_owner_space = PTR_ALIGN(
565*4882a593Smuzhiyun 			ce_ring->base_addr_owner_space_unaligned,
566*4882a593Smuzhiyun 			CE_DESC_RING_ALIGN);
567*4882a593Smuzhiyun 	ce_ring->base_addr_ce_space = ALIGN(
568*4882a593Smuzhiyun 			ce_ring->base_addr_ce_space_unaligned,
569*4882a593Smuzhiyun 			CE_DESC_RING_ALIGN);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return ce_ring;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
ath11k_ce_alloc_pipe(struct ath11k_base * ab,int ce_id)574*4882a593Smuzhiyun static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
577*4882a593Smuzhiyun 	const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
578*4882a593Smuzhiyun 	struct ath11k_ce_ring *ring;
579*4882a593Smuzhiyun 	int nentries;
580*4882a593Smuzhiyun 	int desc_sz;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	pipe->attr_flags = attr->flags;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (attr->src_nentries) {
585*4882a593Smuzhiyun 		pipe->send_cb = ath11k_ce_send_done_cb;
586*4882a593Smuzhiyun 		nentries = roundup_pow_of_two(attr->src_nentries);
587*4882a593Smuzhiyun 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
588*4882a593Smuzhiyun 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
589*4882a593Smuzhiyun 		if (IS_ERR(ring))
590*4882a593Smuzhiyun 			return PTR_ERR(ring);
591*4882a593Smuzhiyun 		pipe->src_ring = ring;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (attr->dest_nentries) {
595*4882a593Smuzhiyun 		pipe->recv_cb = attr->recv_cb;
596*4882a593Smuzhiyun 		nentries = roundup_pow_of_two(attr->dest_nentries);
597*4882a593Smuzhiyun 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
598*4882a593Smuzhiyun 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
599*4882a593Smuzhiyun 		if (IS_ERR(ring))
600*4882a593Smuzhiyun 			return PTR_ERR(ring);
601*4882a593Smuzhiyun 		pipe->dest_ring = ring;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
604*4882a593Smuzhiyun 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
605*4882a593Smuzhiyun 		if (IS_ERR(ring))
606*4882a593Smuzhiyun 			return PTR_ERR(ring);
607*4882a593Smuzhiyun 		pipe->status_ring = ring;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
ath11k_ce_per_engine_service(struct ath11k_base * ab,u16 ce_id)613*4882a593Smuzhiyun void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (pipe->send_cb)
618*4882a593Smuzhiyun 		pipe->send_cb(pipe);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (pipe->recv_cb)
621*4882a593Smuzhiyun 		ath11k_ce_recv_process_cb(pipe);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
ath11k_ce_poll_send_completed(struct ath11k_base * ab,u8 pipe_id)624*4882a593Smuzhiyun void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
629*4882a593Smuzhiyun 		pipe->send_cb(pipe);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_per_engine_service);
632*4882a593Smuzhiyun 
ath11k_ce_send(struct ath11k_base * ab,struct sk_buff * skb,u8 pipe_id,u16 transfer_id)633*4882a593Smuzhiyun int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
634*4882a593Smuzhiyun 		   u16 transfer_id)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
637*4882a593Smuzhiyun 	struct hal_srng *srng;
638*4882a593Smuzhiyun 	u32 *desc;
639*4882a593Smuzhiyun 	unsigned int write_index, sw_index;
640*4882a593Smuzhiyun 	unsigned int nentries_mask;
641*4882a593Smuzhiyun 	int ret = 0;
642*4882a593Smuzhiyun 	u8 byte_swap_data = 0;
643*4882a593Smuzhiyun 	int num_used;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* Check if some entries could be regained by handling tx completion if
646*4882a593Smuzhiyun 	 * the CE has interrupts disabled and the used entries is more than the
647*4882a593Smuzhiyun 	 * defined usage threshold.
648*4882a593Smuzhiyun 	 */
649*4882a593Smuzhiyun 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
650*4882a593Smuzhiyun 		spin_lock_bh(&ab->ce.ce_lock);
651*4882a593Smuzhiyun 		write_index = pipe->src_ring->write_index;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		sw_index = pipe->src_ring->sw_index;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 		if (write_index >= sw_index)
656*4882a593Smuzhiyun 			num_used = write_index - sw_index;
657*4882a593Smuzhiyun 		else
658*4882a593Smuzhiyun 			num_used = pipe->src_ring->nentries - sw_index +
659*4882a593Smuzhiyun 				   write_index;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 		spin_unlock_bh(&ab->ce.ce_lock);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
664*4882a593Smuzhiyun 			ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
668*4882a593Smuzhiyun 		return -ESHUTDOWN;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	spin_lock_bh(&ab->ce.ce_lock);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	write_index = pipe->src_ring->write_index;
673*4882a593Smuzhiyun 	nentries_mask = pipe->src_ring->nentries_mask;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	spin_lock_bh(&srng->lock);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	ath11k_hal_srng_access_begin(ab, srng);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
682*4882a593Smuzhiyun 		ath11k_hal_srng_access_end(ab, srng);
683*4882a593Smuzhiyun 		ret = -ENOBUFS;
684*4882a593Smuzhiyun 		goto err_unlock;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
688*4882a593Smuzhiyun 	if (!desc) {
689*4882a593Smuzhiyun 		ath11k_hal_srng_access_end(ab, srng);
690*4882a593Smuzhiyun 		ret = -ENOBUFS;
691*4882a593Smuzhiyun 		goto err_unlock;
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
695*4882a593Smuzhiyun 		byte_swap_data = 1;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
698*4882a593Smuzhiyun 				   skb->len, transfer_id, byte_swap_data);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	pipe->src_ring->skb[write_index] = skb;
701*4882a593Smuzhiyun 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
702*4882a593Smuzhiyun 						       write_index);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	ath11k_hal_srng_access_end(ab, srng);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (ath11k_ce_need_shadow_fix(pipe_id))
707*4882a593Smuzhiyun 		ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	spin_unlock_bh(&srng->lock);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	spin_unlock_bh(&ab->ce.ce_lock);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	return 0;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun err_unlock:
716*4882a593Smuzhiyun 	spin_unlock_bh(&srng->lock);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	spin_unlock_bh(&ab->ce.ce_lock);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return ret;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe * pipe)723*4882a593Smuzhiyun static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	struct ath11k_base *ab = pipe->ab;
726*4882a593Smuzhiyun 	struct ath11k_ce_ring *ring = pipe->dest_ring;
727*4882a593Smuzhiyun 	struct sk_buff *skb;
728*4882a593Smuzhiyun 	int i;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (!(ring && pipe->buf_sz))
731*4882a593Smuzhiyun 		return;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	for (i = 0; i < ring->nentries; i++) {
734*4882a593Smuzhiyun 		skb = ring->skb[i];
735*4882a593Smuzhiyun 		if (!skb)
736*4882a593Smuzhiyun 			continue;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		ring->skb[i] = NULL;
739*4882a593Smuzhiyun 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
740*4882a593Smuzhiyun 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
741*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
ath11k_ce_shadow_config(struct ath11k_base * ab)745*4882a593Smuzhiyun static void ath11k_ce_shadow_config(struct ath11k_base *ab)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	int i;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++) {
750*4882a593Smuzhiyun 		if (ab->hw_params.host_ce_config[i].src_nentries)
751*4882a593Smuzhiyun 			ath11k_hal_srng_update_shadow_config(ab,
752*4882a593Smuzhiyun 							     HAL_CE_SRC, i);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		if (ab->hw_params.host_ce_config[i].dest_nentries) {
755*4882a593Smuzhiyun 			ath11k_hal_srng_update_shadow_config(ab,
756*4882a593Smuzhiyun 							     HAL_CE_DST, i);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 			ath11k_hal_srng_update_shadow_config(ab,
759*4882a593Smuzhiyun 							     HAL_CE_DST_STATUS, i);
760*4882a593Smuzhiyun 		}
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
ath11k_ce_get_shadow_config(struct ath11k_base * ab,u32 ** shadow_cfg,u32 * shadow_cfg_len)764*4882a593Smuzhiyun void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
765*4882a593Smuzhiyun 				 u32 **shadow_cfg, u32 *shadow_cfg_len)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	if (!ab->hw_params.supports_shadow_regs)
768*4882a593Smuzhiyun 		return;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	/* shadow is already configured */
773*4882a593Smuzhiyun 	if (*shadow_cfg_len)
774*4882a593Smuzhiyun 		return;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	/* shadow isn't configured yet, configure now.
777*4882a593Smuzhiyun 	 * non-CE srngs are configured firstly, then
778*4882a593Smuzhiyun 	 * all CE srngs.
779*4882a593Smuzhiyun 	 */
780*4882a593Smuzhiyun 	ath11k_hal_srng_shadow_config(ab);
781*4882a593Smuzhiyun 	ath11k_ce_shadow_config(ab);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* get the shadow configuration */
784*4882a593Smuzhiyun 	ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
787*4882a593Smuzhiyun 
ath11k_ce_cleanup_pipes(struct ath11k_base * ab)788*4882a593Smuzhiyun void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe;
791*4882a593Smuzhiyun 	int pipe_num;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	ath11k_ce_stop_shadow_timers(ab);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
796*4882a593Smuzhiyun 		pipe = &ab->ce.ce_pipe[pipe_num];
797*4882a593Smuzhiyun 		ath11k_ce_rx_pipe_cleanup(pipe);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 		/* Cleanup any src CE's which have interrupts disabled */
800*4882a593Smuzhiyun 		ath11k_ce_poll_send_completed(ab, pipe_num);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		/* NOTE: Should we also clean up tx buffer in all pipes? */
803*4882a593Smuzhiyun 	}
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
806*4882a593Smuzhiyun 
ath11k_ce_rx_post_buf(struct ath11k_base * ab)807*4882a593Smuzhiyun void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe;
810*4882a593Smuzhiyun 	int i;
811*4882a593Smuzhiyun 	int ret;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++) {
814*4882a593Smuzhiyun 		pipe = &ab->ce.ce_pipe[i];
815*4882a593Smuzhiyun 		ret = ath11k_ce_rx_post_pipe(pipe);
816*4882a593Smuzhiyun 		if (ret) {
817*4882a593Smuzhiyun 			if (ret == -ENOSPC)
818*4882a593Smuzhiyun 				continue;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 			ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
821*4882a593Smuzhiyun 				    i, ret);
822*4882a593Smuzhiyun 			mod_timer(&ab->rx_replenish_retry,
823*4882a593Smuzhiyun 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 			return;
826*4882a593Smuzhiyun 		}
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
830*4882a593Smuzhiyun 
ath11k_ce_rx_replenish_retry(struct timer_list * t)831*4882a593Smuzhiyun void ath11k_ce_rx_replenish_retry(struct timer_list *t)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	ath11k_ce_rx_post_buf(ab);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
ath11k_ce_init_pipes(struct ath11k_base * ab)838*4882a593Smuzhiyun int ath11k_ce_init_pipes(struct ath11k_base *ab)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe;
841*4882a593Smuzhiyun 	int i;
842*4882a593Smuzhiyun 	int ret;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
845*4882a593Smuzhiyun 				    &ab->qmi.ce_cfg.shadow_reg_v2_len);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++) {
848*4882a593Smuzhiyun 		pipe = &ab->ce.ce_pipe[i];
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		if (pipe->src_ring) {
851*4882a593Smuzhiyun 			ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
852*4882a593Smuzhiyun 						  HAL_CE_SRC);
853*4882a593Smuzhiyun 			if (ret) {
854*4882a593Smuzhiyun 				ath11k_warn(ab, "failed to init src ring: %d\n",
855*4882a593Smuzhiyun 					    ret);
856*4882a593Smuzhiyun 				/* Should we clear any partial init */
857*4882a593Smuzhiyun 				return ret;
858*4882a593Smuzhiyun 			}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 			pipe->src_ring->write_index = 0;
861*4882a593Smuzhiyun 			pipe->src_ring->sw_index = 0;
862*4882a593Smuzhiyun 		}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		if (pipe->dest_ring) {
865*4882a593Smuzhiyun 			ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
866*4882a593Smuzhiyun 						  HAL_CE_DST);
867*4882a593Smuzhiyun 			if (ret) {
868*4882a593Smuzhiyun 				ath11k_warn(ab, "failed to init dest ring: %d\n",
869*4882a593Smuzhiyun 					    ret);
870*4882a593Smuzhiyun 				/* Should we clear any partial init */
871*4882a593Smuzhiyun 				return ret;
872*4882a593Smuzhiyun 			}
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
875*4882a593Smuzhiyun 					      pipe->dest_ring->nentries - 2 : 0;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 			pipe->dest_ring->write_index = 0;
878*4882a593Smuzhiyun 			pipe->dest_ring->sw_index = 0;
879*4882a593Smuzhiyun 		}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 		if (pipe->status_ring) {
882*4882a593Smuzhiyun 			ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
883*4882a593Smuzhiyun 						  HAL_CE_DST_STATUS);
884*4882a593Smuzhiyun 			if (ret) {
885*4882a593Smuzhiyun 				ath11k_warn(ab, "failed to init dest status ing: %d\n",
886*4882a593Smuzhiyun 					    ret);
887*4882a593Smuzhiyun 				/* Should we clear any partial init */
888*4882a593Smuzhiyun 				return ret;
889*4882a593Smuzhiyun 			}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 			pipe->status_ring->write_index = 0;
892*4882a593Smuzhiyun 			pipe->status_ring->sw_index = 0;
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
ath11k_ce_free_pipes(struct ath11k_base * ab)899*4882a593Smuzhiyun void ath11k_ce_free_pipes(struct ath11k_base *ab)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe;
902*4882a593Smuzhiyun 	int desc_sz;
903*4882a593Smuzhiyun 	int i;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++) {
906*4882a593Smuzhiyun 		pipe = &ab->ce.ce_pipe[i];
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 		if (ath11k_ce_need_shadow_fix(i))
909*4882a593Smuzhiyun 			ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		if (pipe->src_ring) {
912*4882a593Smuzhiyun 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
913*4882a593Smuzhiyun 			dma_free_coherent(ab->dev,
914*4882a593Smuzhiyun 					  pipe->src_ring->nentries * desc_sz +
915*4882a593Smuzhiyun 					  CE_DESC_RING_ALIGN,
916*4882a593Smuzhiyun 					  pipe->src_ring->base_addr_owner_space,
917*4882a593Smuzhiyun 					  pipe->src_ring->base_addr_ce_space);
918*4882a593Smuzhiyun 			kfree(pipe->src_ring);
919*4882a593Smuzhiyun 			pipe->src_ring = NULL;
920*4882a593Smuzhiyun 		}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 		if (pipe->dest_ring) {
923*4882a593Smuzhiyun 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
924*4882a593Smuzhiyun 			dma_free_coherent(ab->dev,
925*4882a593Smuzhiyun 					  pipe->dest_ring->nentries * desc_sz +
926*4882a593Smuzhiyun 					  CE_DESC_RING_ALIGN,
927*4882a593Smuzhiyun 					  pipe->dest_ring->base_addr_owner_space,
928*4882a593Smuzhiyun 					  pipe->dest_ring->base_addr_ce_space);
929*4882a593Smuzhiyun 			kfree(pipe->dest_ring);
930*4882a593Smuzhiyun 			pipe->dest_ring = NULL;
931*4882a593Smuzhiyun 		}
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 		if (pipe->status_ring) {
934*4882a593Smuzhiyun 			desc_sz =
935*4882a593Smuzhiyun 			  ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
936*4882a593Smuzhiyun 			dma_free_coherent(ab->dev,
937*4882a593Smuzhiyun 					  pipe->status_ring->nentries * desc_sz +
938*4882a593Smuzhiyun 					  CE_DESC_RING_ALIGN,
939*4882a593Smuzhiyun 					  pipe->status_ring->base_addr_owner_space,
940*4882a593Smuzhiyun 					  pipe->status_ring->base_addr_ce_space);
941*4882a593Smuzhiyun 			kfree(pipe->status_ring);
942*4882a593Smuzhiyun 			pipe->status_ring = NULL;
943*4882a593Smuzhiyun 		}
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_free_pipes);
947*4882a593Smuzhiyun 
ath11k_ce_alloc_pipes(struct ath11k_base * ab)948*4882a593Smuzhiyun int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct ath11k_ce_pipe *pipe;
951*4882a593Smuzhiyun 	int i;
952*4882a593Smuzhiyun 	int ret;
953*4882a593Smuzhiyun 	const struct ce_attr *attr;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	spin_lock_init(&ab->ce.ce_lock);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	for (i = 0; i < ab->hw_params.ce_count; i++) {
958*4882a593Smuzhiyun 		attr = &ab->hw_params.host_ce_config[i];
959*4882a593Smuzhiyun 		pipe = &ab->ce.ce_pipe[i];
960*4882a593Smuzhiyun 		pipe->pipe_num = i;
961*4882a593Smuzhiyun 		pipe->ab = ab;
962*4882a593Smuzhiyun 		pipe->buf_sz = attr->src_sz_max;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 		ret = ath11k_ce_alloc_pipe(ab, i);
965*4882a593Smuzhiyun 		if (ret) {
966*4882a593Smuzhiyun 			/* Free any parial successful allocation */
967*4882a593Smuzhiyun 			ath11k_ce_free_pipes(ab);
968*4882a593Smuzhiyun 			return ret;
969*4882a593Smuzhiyun 		}
970*4882a593Smuzhiyun 	}
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	return 0;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun /* For Big Endian Host, Copy Engine byte_swap is enabled
977*4882a593Smuzhiyun  * When Copy Engine does byte_swap, need to byte swap again for the
978*4882a593Smuzhiyun  * Host to get/put buffer content in the correct byte order
979*4882a593Smuzhiyun  */
ath11k_ce_byte_swap(void * mem,u32 len)980*4882a593Smuzhiyun void ath11k_ce_byte_swap(void *mem, u32 len)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	int i;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
985*4882a593Smuzhiyun 		if (!mem)
986*4882a593Smuzhiyun 			return;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		for (i = 0; i < (len / 4); i++) {
989*4882a593Smuzhiyun 			*(u32 *)mem = swab32(*(u32 *)mem);
990*4882a593Smuzhiyun 			mem += 4;
991*4882a593Smuzhiyun 		}
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
ath11k_ce_get_attr_flags(struct ath11k_base * ab,int ce_id)995*4882a593Smuzhiyun int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	if (ce_id >= ab->hw_params.ce_count)
998*4882a593Smuzhiyun 		return -EINVAL;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	return ab->hw_params.host_ce_config[ce_id].flags;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1003