xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/ath6kl/htc_mbox.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007-2011 Atheros Communications Inc.
3*4882a593Smuzhiyun  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission to use, copy, modify, and/or distribute this software for any
6*4882a593Smuzhiyun  * purpose with or without fee is hereby granted, provided that the above
7*4882a593Smuzhiyun  * copyright notice and this permission notice appear in all copies.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10*4882a593Smuzhiyun  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11*4882a593Smuzhiyun  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12*4882a593Smuzhiyun  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13*4882a593Smuzhiyun  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14*4882a593Smuzhiyun  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15*4882a593Smuzhiyun  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "core.h"
19*4882a593Smuzhiyun #include "hif.h"
20*4882a593Smuzhiyun #include "debug.h"
21*4882a593Smuzhiyun #include "hif-ops.h"
22*4882a593Smuzhiyun #include "trace.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/unaligned.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
29*4882a593Smuzhiyun static void ath6kl_htc_mbox_stop(struct htc_target *target);
30*4882a593Smuzhiyun static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
31*4882a593Smuzhiyun 					      struct list_head *pkt_queue);
32*4882a593Smuzhiyun static void ath6kl_htc_set_credit_dist(struct htc_target *target,
33*4882a593Smuzhiyun 				       struct ath6kl_htc_credit_info *cred_info,
34*4882a593Smuzhiyun 				       u16 svc_pri_order[], int len);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* threshold to re-enable Tx bundling for an AC*/
37*4882a593Smuzhiyun #define TX_RESUME_BUNDLE_THRESHOLD	1500
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Functions for Tx credit handling */
ath6kl_credit_deposit(struct ath6kl_htc_credit_info * cred_info,struct htc_endpoint_credit_dist * ep_dist,int credits)40*4882a593Smuzhiyun static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
41*4882a593Smuzhiyun 				  struct htc_endpoint_credit_dist *ep_dist,
42*4882a593Smuzhiyun 				  int credits)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
45*4882a593Smuzhiyun 		   ep_dist->endpoint, credits);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	ep_dist->credits += credits;
48*4882a593Smuzhiyun 	ep_dist->cred_assngd += credits;
49*4882a593Smuzhiyun 	cred_info->cur_free_credits -= credits;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
ath6kl_credit_init(struct ath6kl_htc_credit_info * cred_info,struct list_head * ep_list,int tot_credits)52*4882a593Smuzhiyun static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
53*4882a593Smuzhiyun 			       struct list_head *ep_list,
54*4882a593Smuzhiyun 			       int tot_credits)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct htc_endpoint_credit_dist *cur_ep_dist;
57*4882a593Smuzhiyun 	int count;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	cred_info->cur_free_credits = tot_credits;
62*4882a593Smuzhiyun 	cred_info->total_avail_credits = tot_credits;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	list_for_each_entry(cur_ep_dist, ep_list, list) {
65*4882a593Smuzhiyun 		if (cur_ep_dist->endpoint == ENDPOINT_0)
66*4882a593Smuzhiyun 			continue;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		if (tot_credits > 4) {
71*4882a593Smuzhiyun 			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
72*4882a593Smuzhiyun 			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
73*4882a593Smuzhiyun 				ath6kl_credit_deposit(cred_info,
74*4882a593Smuzhiyun 						      cur_ep_dist,
75*4882a593Smuzhiyun 						      cur_ep_dist->cred_min);
76*4882a593Smuzhiyun 				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
77*4882a593Smuzhiyun 			}
78*4882a593Smuzhiyun 		}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
81*4882a593Smuzhiyun 			ath6kl_credit_deposit(cred_info, cur_ep_dist,
82*4882a593Smuzhiyun 					      cur_ep_dist->cred_min);
83*4882a593Smuzhiyun 			/*
84*4882a593Smuzhiyun 			 * Control service is always marked active, it
85*4882a593Smuzhiyun 			 * never goes inactive EVER.
86*4882a593Smuzhiyun 			 */
87*4882a593Smuzhiyun 			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
88*4882a593Smuzhiyun 		}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		/*
91*4882a593Smuzhiyun 		 * Streams have to be created (explicit | implicit) for all
92*4882a593Smuzhiyun 		 * kinds of traffic. BE endpoints are also inactive in the
93*4882a593Smuzhiyun 		 * beginning. When BE traffic starts it creates implicit
94*4882a593Smuzhiyun 		 * streams that redistributes credits.
95*4882a593Smuzhiyun 		 *
96*4882a593Smuzhiyun 		 * Note: all other endpoints have minimums set but are
97*4882a593Smuzhiyun 		 * initially given NO credits. credits will be distributed
98*4882a593Smuzhiyun 		 * as traffic activity demands
99*4882a593Smuzhiyun 		 */
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/*
103*4882a593Smuzhiyun 	 * For ath6kl_credit_seek function,
104*4882a593Smuzhiyun 	 * it use list_for_each_entry_reverse to walk around the whole ep list.
105*4882a593Smuzhiyun 	 * Therefore assign this lowestpri_ep_dist after walk around the ep_list
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	cred_info->lowestpri_ep_dist = cur_ep_dist->list;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	WARN_ON(cred_info->cur_free_credits <= 0);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	list_for_each_entry(cur_ep_dist, ep_list, list) {
112*4882a593Smuzhiyun 		if (cur_ep_dist->endpoint == ENDPOINT_0)
113*4882a593Smuzhiyun 			continue;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
116*4882a593Smuzhiyun 			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
117*4882a593Smuzhiyun 		} else {
118*4882a593Smuzhiyun 			/*
119*4882a593Smuzhiyun 			 * For the remaining data endpoints, we assume that
120*4882a593Smuzhiyun 			 * each cred_per_msg are the same. We use a simple
121*4882a593Smuzhiyun 			 * calculation here, we take the remaining credits
122*4882a593Smuzhiyun 			 * and determine how many max messages this can
123*4882a593Smuzhiyun 			 * cover and then set each endpoint's normal value
124*4882a593Smuzhiyun 			 * equal to 3/4 this amount.
125*4882a593Smuzhiyun 			 */
126*4882a593Smuzhiyun 			count = (cred_info->cur_free_credits /
127*4882a593Smuzhiyun 				 cur_ep_dist->cred_per_msg)
128*4882a593Smuzhiyun 				* cur_ep_dist->cred_per_msg;
129*4882a593Smuzhiyun 			count = (count * 3) >> 2;
130*4882a593Smuzhiyun 			count = max(count, cur_ep_dist->cred_per_msg);
131*4882a593Smuzhiyun 			cur_ep_dist->cred_norm = count;
132*4882a593Smuzhiyun 		}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_CREDIT,
135*4882a593Smuzhiyun 			   "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
136*4882a593Smuzhiyun 			   cur_ep_dist->endpoint,
137*4882a593Smuzhiyun 			   cur_ep_dist->svc_id,
138*4882a593Smuzhiyun 			   cur_ep_dist->credits,
139*4882a593Smuzhiyun 			   cur_ep_dist->cred_per_msg,
140*4882a593Smuzhiyun 			   cur_ep_dist->cred_norm,
141*4882a593Smuzhiyun 			   cur_ep_dist->cred_min);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* initialize and setup credit distribution */
ath6kl_htc_mbox_credit_setup(struct htc_target * htc_target,struct ath6kl_htc_credit_info * cred_info)146*4882a593Smuzhiyun static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
147*4882a593Smuzhiyun 			       struct ath6kl_htc_credit_info *cred_info)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	u16 servicepriority[5];
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
154*4882a593Smuzhiyun 	servicepriority[1] = WMI_DATA_VO_SVC;
155*4882a593Smuzhiyun 	servicepriority[2] = WMI_DATA_VI_SVC;
156*4882a593Smuzhiyun 	servicepriority[3] = WMI_DATA_BE_SVC;
157*4882a593Smuzhiyun 	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* set priority list */
160*4882a593Smuzhiyun 	ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* reduce an ep's credits back to a set limit */
ath6kl_credit_reduce(struct ath6kl_htc_credit_info * cred_info,struct htc_endpoint_credit_dist * ep_dist,int limit)166*4882a593Smuzhiyun static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
167*4882a593Smuzhiyun 				 struct htc_endpoint_credit_dist *ep_dist,
168*4882a593Smuzhiyun 				 int limit)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	int credits;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
173*4882a593Smuzhiyun 		   ep_dist->endpoint, limit);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	ep_dist->cred_assngd = limit;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (ep_dist->credits <= limit)
178*4882a593Smuzhiyun 		return;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	credits = ep_dist->credits - limit;
181*4882a593Smuzhiyun 	ep_dist->credits -= credits;
182*4882a593Smuzhiyun 	cred_info->cur_free_credits += credits;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
ath6kl_credit_update(struct ath6kl_htc_credit_info * cred_info,struct list_head * epdist_list)185*4882a593Smuzhiyun static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
186*4882a593Smuzhiyun 				 struct list_head *epdist_list)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct htc_endpoint_credit_dist *cur_list;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	list_for_each_entry(cur_list, epdist_list, list) {
191*4882a593Smuzhiyun 		if (cur_list->endpoint == ENDPOINT_0)
192*4882a593Smuzhiyun 			continue;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		if (cur_list->cred_to_dist > 0) {
195*4882a593Smuzhiyun 			cur_list->credits += cur_list->cred_to_dist;
196*4882a593Smuzhiyun 			cur_list->cred_to_dist = 0;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 			if (cur_list->credits > cur_list->cred_assngd)
199*4882a593Smuzhiyun 				ath6kl_credit_reduce(cred_info,
200*4882a593Smuzhiyun 						     cur_list,
201*4882a593Smuzhiyun 						     cur_list->cred_assngd);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 			if (cur_list->credits > cur_list->cred_norm)
204*4882a593Smuzhiyun 				ath6kl_credit_reduce(cred_info, cur_list,
205*4882a593Smuzhiyun 						     cur_list->cred_norm);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 			if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
208*4882a593Smuzhiyun 				if (cur_list->txq_depth == 0)
209*4882a593Smuzhiyun 					ath6kl_credit_reduce(cred_info,
210*4882a593Smuzhiyun 							     cur_list, 0);
211*4882a593Smuzhiyun 			}
212*4882a593Smuzhiyun 		}
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun  * HTC has an endpoint that needs credits, ep_dist is the endpoint in
218*4882a593Smuzhiyun  * question.
219*4882a593Smuzhiyun  */
ath6kl_credit_seek(struct ath6kl_htc_credit_info * cred_info,struct htc_endpoint_credit_dist * ep_dist)220*4882a593Smuzhiyun static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
221*4882a593Smuzhiyun 				struct htc_endpoint_credit_dist *ep_dist)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct htc_endpoint_credit_dist *curdist_list;
224*4882a593Smuzhiyun 	int credits = 0;
225*4882a593Smuzhiyun 	int need;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (ep_dist->svc_id == WMI_CONTROL_SVC)
228*4882a593Smuzhiyun 		goto out;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
231*4882a593Smuzhiyun 	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
232*4882a593Smuzhiyun 		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
233*4882a593Smuzhiyun 			goto out;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * For all other services, we follow a simple algorithm of:
237*4882a593Smuzhiyun 	 *
238*4882a593Smuzhiyun 	 * 1. checking the free pool for credits
239*4882a593Smuzhiyun 	 * 2. checking lower priority endpoints for credits to take
240*4882a593Smuzhiyun 	 */
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (credits >= ep_dist->seek_cred)
245*4882a593Smuzhiyun 		goto out;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/*
248*4882a593Smuzhiyun 	 * We don't have enough in the free pool, try taking away from
249*4882a593Smuzhiyun 	 * lower priority services The rule for taking away credits:
250*4882a593Smuzhiyun 	 *
251*4882a593Smuzhiyun 	 *   1. Only take from lower priority endpoints
252*4882a593Smuzhiyun 	 *   2. Only take what is allocated above the minimum (never
253*4882a593Smuzhiyun 	 *      starve an endpoint completely)
254*4882a593Smuzhiyun 	 *   3. Only take what you need.
255*4882a593Smuzhiyun 	 */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	list_for_each_entry_reverse(curdist_list,
258*4882a593Smuzhiyun 				    &cred_info->lowestpri_ep_dist,
259*4882a593Smuzhiyun 				    list) {
260*4882a593Smuzhiyun 		if (curdist_list == ep_dist)
261*4882a593Smuzhiyun 			break;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 		need = ep_dist->seek_cred - cred_info->cur_free_credits;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		if ((curdist_list->cred_assngd - need) >=
266*4882a593Smuzhiyun 		     curdist_list->cred_min) {
267*4882a593Smuzhiyun 			/*
268*4882a593Smuzhiyun 			 * The current one has been allocated more than
269*4882a593Smuzhiyun 			 * it's minimum and it has enough credits assigned
270*4882a593Smuzhiyun 			 * above it's minimum to fulfill our need try to
271*4882a593Smuzhiyun 			 * take away just enough to fulfill our need.
272*4882a593Smuzhiyun 			 */
273*4882a593Smuzhiyun 			ath6kl_credit_reduce(cred_info, curdist_list,
274*4882a593Smuzhiyun 					     curdist_list->cred_assngd - need);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 			if (cred_info->cur_free_credits >=
277*4882a593Smuzhiyun 			    ep_dist->seek_cred)
278*4882a593Smuzhiyun 				break;
279*4882a593Smuzhiyun 		}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		if (curdist_list->endpoint == ENDPOINT_0)
282*4882a593Smuzhiyun 			break;
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun out:
288*4882a593Smuzhiyun 	/* did we find some credits? */
289*4882a593Smuzhiyun 	if (credits)
290*4882a593Smuzhiyun 		ath6kl_credit_deposit(cred_info, ep_dist, credits);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	ep_dist->seek_cred = 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /* redistribute credits based on activity change */
ath6kl_credit_redistribute(struct ath6kl_htc_credit_info * info,struct list_head * ep_dist_list)296*4882a593Smuzhiyun static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
297*4882a593Smuzhiyun 				       struct list_head *ep_dist_list)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct htc_endpoint_credit_dist *curdist_list;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	list_for_each_entry(curdist_list, ep_dist_list, list) {
302*4882a593Smuzhiyun 		if (curdist_list->endpoint == ENDPOINT_0)
303*4882a593Smuzhiyun 			continue;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
306*4882a593Smuzhiyun 		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
307*4882a593Smuzhiyun 			curdist_list->dist_flags |= HTC_EP_ACTIVE;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
310*4882a593Smuzhiyun 		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
311*4882a593Smuzhiyun 			if (curdist_list->txq_depth == 0)
312*4882a593Smuzhiyun 				ath6kl_credit_reduce(info, curdist_list, 0);
313*4882a593Smuzhiyun 			else
314*4882a593Smuzhiyun 				ath6kl_credit_reduce(info,
315*4882a593Smuzhiyun 						     curdist_list,
316*4882a593Smuzhiyun 						     curdist_list->cred_min);
317*4882a593Smuzhiyun 		}
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * This function is invoked whenever endpoints require credit
324*4882a593Smuzhiyun  * distributions. A lock is held while this function is invoked, this
325*4882a593Smuzhiyun  * function shall NOT block. The ep_dist_list is a list of distribution
326*4882a593Smuzhiyun  * structures in prioritized order as defined by the call to the
327*4882a593Smuzhiyun  * htc_set_credit_dist() api.
328*4882a593Smuzhiyun  */
ath6kl_credit_distribute(struct ath6kl_htc_credit_info * cred_info,struct list_head * ep_dist_list,enum htc_credit_dist_reason reason)329*4882a593Smuzhiyun static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
330*4882a593Smuzhiyun 				     struct list_head *ep_dist_list,
331*4882a593Smuzhiyun 			      enum htc_credit_dist_reason reason)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	switch (reason) {
334*4882a593Smuzhiyun 	case HTC_CREDIT_DIST_SEND_COMPLETE:
335*4882a593Smuzhiyun 		ath6kl_credit_update(cred_info, ep_dist_list);
336*4882a593Smuzhiyun 		break;
337*4882a593Smuzhiyun 	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
338*4882a593Smuzhiyun 		ath6kl_credit_redistribute(cred_info, ep_dist_list);
339*4882a593Smuzhiyun 		break;
340*4882a593Smuzhiyun 	default:
341*4882a593Smuzhiyun 		break;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
345*4882a593Smuzhiyun 	WARN_ON(cred_info->cur_free_credits < 0);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
ath6kl_htc_tx_buf_align(u8 ** buf,unsigned long len)348*4882a593Smuzhiyun static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	u8 *align_addr;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (!IS_ALIGNED((unsigned long) *buf, 4)) {
353*4882a593Smuzhiyun 		align_addr = PTR_ALIGN(*buf - 4, 4);
354*4882a593Smuzhiyun 		memmove(align_addr, *buf, len);
355*4882a593Smuzhiyun 		*buf = align_addr;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
ath6kl_htc_tx_prep_pkt(struct htc_packet * packet,u8 flags,int ctrl0,int ctrl1)359*4882a593Smuzhiyun static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
360*4882a593Smuzhiyun 				   int ctrl0, int ctrl1)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct htc_frame_hdr *hdr;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	packet->buf -= HTC_HDR_LENGTH;
365*4882a593Smuzhiyun 	hdr =  (struct htc_frame_hdr *)packet->buf;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Endianess? */
368*4882a593Smuzhiyun 	put_unaligned((u16)packet->act_len, &hdr->payld_len);
369*4882a593Smuzhiyun 	hdr->flags = flags;
370*4882a593Smuzhiyun 	hdr->eid = packet->endpoint;
371*4882a593Smuzhiyun 	hdr->ctrl[0] = ctrl0;
372*4882a593Smuzhiyun 	hdr->ctrl[1] = ctrl1;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
htc_reclaim_txctrl_buf(struct htc_target * target,struct htc_packet * pkt)375*4882a593Smuzhiyun static void htc_reclaim_txctrl_buf(struct htc_target *target,
376*4882a593Smuzhiyun 				   struct htc_packet *pkt)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	spin_lock_bh(&target->htc_lock);
379*4882a593Smuzhiyun 	list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
380*4882a593Smuzhiyun 	spin_unlock_bh(&target->htc_lock);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
htc_get_control_buf(struct htc_target * target,bool tx)383*4882a593Smuzhiyun static struct htc_packet *htc_get_control_buf(struct htc_target *target,
384*4882a593Smuzhiyun 					      bool tx)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct htc_packet *packet = NULL;
387*4882a593Smuzhiyun 	struct list_head *buf_list;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	spin_lock_bh(&target->htc_lock);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (list_empty(buf_list)) {
394*4882a593Smuzhiyun 		spin_unlock_bh(&target->htc_lock);
395*4882a593Smuzhiyun 		return NULL;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	packet = list_first_entry(buf_list, struct htc_packet, list);
399*4882a593Smuzhiyun 	list_del(&packet->list);
400*4882a593Smuzhiyun 	spin_unlock_bh(&target->htc_lock);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (tx)
403*4882a593Smuzhiyun 		packet->buf = packet->buf_start + HTC_HDR_LENGTH;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	return packet;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
htc_tx_comp_update(struct htc_target * target,struct htc_endpoint * endpoint,struct htc_packet * packet)408*4882a593Smuzhiyun static void htc_tx_comp_update(struct htc_target *target,
409*4882a593Smuzhiyun 			       struct htc_endpoint *endpoint,
410*4882a593Smuzhiyun 			       struct htc_packet *packet)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	packet->completion = NULL;
413*4882a593Smuzhiyun 	packet->buf += HTC_HDR_LENGTH;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (!packet->status)
416*4882a593Smuzhiyun 		return;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
419*4882a593Smuzhiyun 		   packet->status, packet->endpoint, packet->act_len,
420*4882a593Smuzhiyun 		   packet->info.tx.cred_used);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* on failure to submit, reclaim credits for this packet */
423*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
424*4882a593Smuzhiyun 	endpoint->cred_dist.cred_to_dist +=
425*4882a593Smuzhiyun 				packet->info.tx.cred_used;
426*4882a593Smuzhiyun 	endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
429*4882a593Smuzhiyun 		   target->credit_info, &target->cred_dist_list);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	ath6kl_credit_distribute(target->credit_info,
432*4882a593Smuzhiyun 				 &target->cred_dist_list,
433*4882a593Smuzhiyun 				 HTC_CREDIT_DIST_SEND_COMPLETE);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
htc_tx_complete(struct htc_endpoint * endpoint,struct list_head * txq)438*4882a593Smuzhiyun static void htc_tx_complete(struct htc_endpoint *endpoint,
439*4882a593Smuzhiyun 			    struct list_head *txq)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	if (list_empty(txq))
442*4882a593Smuzhiyun 		return;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
445*4882a593Smuzhiyun 		   "htc tx complete ep %d pkts %d\n",
446*4882a593Smuzhiyun 		   endpoint->eid, get_queue_depth(txq));
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	ath6kl_tx_complete(endpoint->target, txq);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
htc_tx_comp_handler(struct htc_target * target,struct htc_packet * packet)451*4882a593Smuzhiyun static void htc_tx_comp_handler(struct htc_target *target,
452*4882a593Smuzhiyun 				struct htc_packet *packet)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
455*4882a593Smuzhiyun 	struct list_head container;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
458*4882a593Smuzhiyun 		   packet->info.tx.seqno);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	htc_tx_comp_update(target, endpoint, packet);
461*4882a593Smuzhiyun 	INIT_LIST_HEAD(&container);
462*4882a593Smuzhiyun 	list_add_tail(&packet->list, &container);
463*4882a593Smuzhiyun 	/* do completion */
464*4882a593Smuzhiyun 	htc_tx_complete(endpoint, &container);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
htc_async_tx_scat_complete(struct htc_target * target,struct hif_scatter_req * scat_req)467*4882a593Smuzhiyun static void htc_async_tx_scat_complete(struct htc_target *target,
468*4882a593Smuzhiyun 				       struct hif_scatter_req *scat_req)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
471*4882a593Smuzhiyun 	struct htc_packet *packet;
472*4882a593Smuzhiyun 	struct list_head tx_compq;
473*4882a593Smuzhiyun 	int i;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tx_compq);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
478*4882a593Smuzhiyun 		   "htc tx scat complete len %d entries %d\n",
479*4882a593Smuzhiyun 		   scat_req->len, scat_req->scat_entries);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (scat_req->status)
482*4882a593Smuzhiyun 		ath6kl_err("send scatter req failed: %d\n", scat_req->status);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	packet = scat_req->scat_list[0].packet;
485*4882a593Smuzhiyun 	endpoint = &target->endpoint[packet->endpoint];
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* walk through the scatter list and process */
488*4882a593Smuzhiyun 	for (i = 0; i < scat_req->scat_entries; i++) {
489*4882a593Smuzhiyun 		packet = scat_req->scat_list[i].packet;
490*4882a593Smuzhiyun 		if (!packet) {
491*4882a593Smuzhiyun 			WARN_ON(1);
492*4882a593Smuzhiyun 			return;
493*4882a593Smuzhiyun 		}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		packet->status = scat_req->status;
496*4882a593Smuzhiyun 		htc_tx_comp_update(target, endpoint, packet);
497*4882a593Smuzhiyun 		list_add_tail(&packet->list, &tx_compq);
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* free scatter request */
501*4882a593Smuzhiyun 	hif_scatter_req_add(target->dev->ar, scat_req);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* complete all packets */
504*4882a593Smuzhiyun 	htc_tx_complete(endpoint, &tx_compq);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
ath6kl_htc_tx_issue(struct htc_target * target,struct htc_packet * packet)507*4882a593Smuzhiyun static int ath6kl_htc_tx_issue(struct htc_target *target,
508*4882a593Smuzhiyun 			       struct htc_packet *packet)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	int status;
511*4882a593Smuzhiyun 	bool sync = false;
512*4882a593Smuzhiyun 	u32 padded_len, send_len;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	if (!packet->completion)
515*4882a593Smuzhiyun 		sync = true;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	send_len = packet->act_len + HTC_HDR_LENGTH;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
522*4882a593Smuzhiyun 		   "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
523*4882a593Smuzhiyun 		   send_len, packet->info.tx.seqno, padded_len,
524*4882a593Smuzhiyun 		   target->dev->ar->mbox_info.htc_addr,
525*4882a593Smuzhiyun 		   sync ? "sync" : "async");
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (sync) {
528*4882a593Smuzhiyun 		status = hif_read_write_sync(target->dev->ar,
529*4882a593Smuzhiyun 				target->dev->ar->mbox_info.htc_addr,
530*4882a593Smuzhiyun 				 packet->buf, padded_len,
531*4882a593Smuzhiyun 				 HIF_WR_SYNC_BLOCK_INC);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 		packet->status = status;
534*4882a593Smuzhiyun 		packet->buf += HTC_HDR_LENGTH;
535*4882a593Smuzhiyun 	} else
536*4882a593Smuzhiyun 		status = hif_write_async(target->dev->ar,
537*4882a593Smuzhiyun 				target->dev->ar->mbox_info.htc_addr,
538*4882a593Smuzhiyun 				packet->buf, padded_len,
539*4882a593Smuzhiyun 				HIF_WR_ASYNC_BLOCK_INC, packet);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return status;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
htc_check_credits(struct htc_target * target,struct htc_endpoint * ep,u8 * flags,enum htc_endpoint_id eid,unsigned int len,int * req_cred)546*4882a593Smuzhiyun static int htc_check_credits(struct htc_target *target,
547*4882a593Smuzhiyun 			     struct htc_endpoint *ep, u8 *flags,
548*4882a593Smuzhiyun 			     enum htc_endpoint_id eid, unsigned int len,
549*4882a593Smuzhiyun 			     int *req_cred)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	*req_cred = (len > target->tgt_cred_sz) ?
552*4882a593Smuzhiyun 		     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
555*4882a593Smuzhiyun 		   *req_cred, ep->cred_dist.credits);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (ep->cred_dist.credits < *req_cred) {
558*4882a593Smuzhiyun 		if (eid == ENDPOINT_0)
559*4882a593Smuzhiyun 			return -EINVAL;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		/* Seek more credits */
562*4882a593Smuzhiyun 		ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		ep->cred_dist.seek_cred = 0;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		if (ep->cred_dist.credits < *req_cred) {
569*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_CREDIT,
570*4882a593Smuzhiyun 				   "credit not found for ep %d\n",
571*4882a593Smuzhiyun 				   eid);
572*4882a593Smuzhiyun 			return -EINVAL;
573*4882a593Smuzhiyun 		}
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	ep->cred_dist.credits -= *req_cred;
577*4882a593Smuzhiyun 	ep->ep_st.cred_cosumd += *req_cred;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	 /* When we are getting low on credits, ask for more */
580*4882a593Smuzhiyun 	if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
581*4882a593Smuzhiyun 		ep->cred_dist.seek_cred =
582*4882a593Smuzhiyun 		ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		/* see if we were successful in getting more */
587*4882a593Smuzhiyun 		if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
588*4882a593Smuzhiyun 			/* tell the target we need credits ASAP! */
589*4882a593Smuzhiyun 			*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
590*4882a593Smuzhiyun 			ep->ep_st.cred_low_indicate += 1;
591*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_CREDIT,
592*4882a593Smuzhiyun 				   "credit we need credits asap\n");
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
ath6kl_htc_tx_pkts_get(struct htc_target * target,struct htc_endpoint * endpoint,struct list_head * queue)599*4882a593Smuzhiyun static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
600*4882a593Smuzhiyun 				   struct htc_endpoint *endpoint,
601*4882a593Smuzhiyun 				   struct list_head *queue)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	int req_cred;
604*4882a593Smuzhiyun 	u8 flags;
605*4882a593Smuzhiyun 	struct htc_packet *packet;
606*4882a593Smuzhiyun 	unsigned int len;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	while (true) {
609*4882a593Smuzhiyun 		flags = 0;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		if (list_empty(&endpoint->txq))
612*4882a593Smuzhiyun 			break;
613*4882a593Smuzhiyun 		packet = list_first_entry(&endpoint->txq, struct htc_packet,
614*4882a593Smuzhiyun 					  list);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
617*4882a593Smuzhiyun 			   "htc tx got packet 0x%p queue depth %d\n",
618*4882a593Smuzhiyun 			   packet, get_queue_depth(&endpoint->txq));
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		len = CALC_TXRX_PADDED_LEN(target,
621*4882a593Smuzhiyun 					   packet->act_len + HTC_HDR_LENGTH);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		if (htc_check_credits(target, endpoint, &flags,
624*4882a593Smuzhiyun 				      packet->endpoint, len, &req_cred))
625*4882a593Smuzhiyun 			break;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		/* now we can fully move onto caller's queue */
628*4882a593Smuzhiyun 		packet = list_first_entry(&endpoint->txq, struct htc_packet,
629*4882a593Smuzhiyun 					  list);
630*4882a593Smuzhiyun 		list_move_tail(&packet->list, queue);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		/* save the number of credits this packet consumed */
633*4882a593Smuzhiyun 		packet->info.tx.cred_used = req_cred;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		/* all TX packets are handled asynchronously */
636*4882a593Smuzhiyun 		packet->completion = htc_tx_comp_handler;
637*4882a593Smuzhiyun 		packet->context = target;
638*4882a593Smuzhiyun 		endpoint->ep_st.tx_issued += 1;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		/* save send flags */
641*4882a593Smuzhiyun 		packet->info.tx.flags = flags;
642*4882a593Smuzhiyun 		packet->info.tx.seqno = endpoint->seqno;
643*4882a593Smuzhiyun 		endpoint->seqno++;
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /* See if the padded tx length falls on a credit boundary */
htc_get_credit_padding(unsigned int cred_sz,int * len,struct htc_endpoint * ep)648*4882a593Smuzhiyun static int htc_get_credit_padding(unsigned int cred_sz, int *len,
649*4882a593Smuzhiyun 				  struct htc_endpoint *ep)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	int rem_cred, cred_pad;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	rem_cred = *len % cred_sz;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	/* No padding needed */
656*4882a593Smuzhiyun 	if  (!rem_cred)
657*4882a593Smuzhiyun 		return 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
660*4882a593Smuzhiyun 		return -1;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/*
663*4882a593Smuzhiyun 	 * The transfer consumes a "partial" credit, this
664*4882a593Smuzhiyun 	 * packet cannot be bundled unless we add
665*4882a593Smuzhiyun 	 * additional "dummy" padding (max 255 bytes) to
666*4882a593Smuzhiyun 	 * consume the entire credit.
667*4882a593Smuzhiyun 	 */
668*4882a593Smuzhiyun 	cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	if ((cred_pad > 0) && (cred_pad <= 255))
671*4882a593Smuzhiyun 		*len += cred_pad;
672*4882a593Smuzhiyun 	else
673*4882a593Smuzhiyun 		/* The amount of padding is too large, send as non-bundled */
674*4882a593Smuzhiyun 		return -1;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	return cred_pad;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
ath6kl_htc_tx_setup_scat_list(struct htc_target * target,struct htc_endpoint * endpoint,struct hif_scatter_req * scat_req,int n_scat,struct list_head * queue)679*4882a593Smuzhiyun static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
680*4882a593Smuzhiyun 					 struct htc_endpoint *endpoint,
681*4882a593Smuzhiyun 					 struct hif_scatter_req *scat_req,
682*4882a593Smuzhiyun 					 int n_scat,
683*4882a593Smuzhiyun 					 struct list_head *queue)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	struct htc_packet *packet;
686*4882a593Smuzhiyun 	int i, len, rem_scat, cred_pad;
687*4882a593Smuzhiyun 	int status = 0;
688*4882a593Smuzhiyun 	u8 flags;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	rem_scat = target->max_tx_bndl_sz;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	for (i = 0; i < n_scat; i++) {
693*4882a593Smuzhiyun 		scat_req->scat_list[i].packet = NULL;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 		if (list_empty(queue))
696*4882a593Smuzhiyun 			break;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		packet = list_first_entry(queue, struct htc_packet, list);
699*4882a593Smuzhiyun 		len = CALC_TXRX_PADDED_LEN(target,
700*4882a593Smuzhiyun 					   packet->act_len + HTC_HDR_LENGTH);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
703*4882a593Smuzhiyun 						  &len, endpoint);
704*4882a593Smuzhiyun 		if (cred_pad < 0 || rem_scat < len) {
705*4882a593Smuzhiyun 			status = -ENOSPC;
706*4882a593Smuzhiyun 			break;
707*4882a593Smuzhiyun 		}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		rem_scat -= len;
710*4882a593Smuzhiyun 		/* now remove it from the queue */
711*4882a593Smuzhiyun 		list_del(&packet->list);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		scat_req->scat_list[i].packet = packet;
714*4882a593Smuzhiyun 		/* prepare packet and flag message as part of a send bundle */
715*4882a593Smuzhiyun 		flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
716*4882a593Smuzhiyun 		ath6kl_htc_tx_prep_pkt(packet, flags,
717*4882a593Smuzhiyun 				       cred_pad, packet->info.tx.seqno);
718*4882a593Smuzhiyun 		/* Make sure the buffer is 4-byte aligned */
719*4882a593Smuzhiyun 		ath6kl_htc_tx_buf_align(&packet->buf,
720*4882a593Smuzhiyun 					packet->act_len + HTC_HDR_LENGTH);
721*4882a593Smuzhiyun 		scat_req->scat_list[i].buf = packet->buf;
722*4882a593Smuzhiyun 		scat_req->scat_list[i].len = len;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		scat_req->len += len;
725*4882a593Smuzhiyun 		scat_req->scat_entries++;
726*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
727*4882a593Smuzhiyun 			   "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
728*4882a593Smuzhiyun 			   i, packet, packet->info.tx.seqno, len, rem_scat);
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* Roll back scatter setup in case of any failure */
732*4882a593Smuzhiyun 	if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
733*4882a593Smuzhiyun 		for (i = scat_req->scat_entries - 1; i >= 0; i--) {
734*4882a593Smuzhiyun 			packet = scat_req->scat_list[i].packet;
735*4882a593Smuzhiyun 			if (packet) {
736*4882a593Smuzhiyun 				packet->buf += HTC_HDR_LENGTH;
737*4882a593Smuzhiyun 				list_add(&packet->list, queue);
738*4882a593Smuzhiyun 			}
739*4882a593Smuzhiyun 		}
740*4882a593Smuzhiyun 		return -EAGAIN;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	return status;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /*
747*4882a593Smuzhiyun  * Drain a queue and send as bundles this function may return without fully
748*4882a593Smuzhiyun  * draining the queue when
749*4882a593Smuzhiyun  *
750*4882a593Smuzhiyun  *    1. scatter resources are exhausted
751*4882a593Smuzhiyun  *    2. a message that will consume a partial credit will stop the
752*4882a593Smuzhiyun  *    bundling process early
753*4882a593Smuzhiyun  *    3. we drop below the minimum number of messages for a bundle
754*4882a593Smuzhiyun  */
ath6kl_htc_tx_bundle(struct htc_endpoint * endpoint,struct list_head * queue,int * sent_bundle,int * n_bundle_pkts)755*4882a593Smuzhiyun static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
756*4882a593Smuzhiyun 				 struct list_head *queue,
757*4882a593Smuzhiyun 				 int *sent_bundle, int *n_bundle_pkts)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	struct htc_target *target = endpoint->target;
760*4882a593Smuzhiyun 	struct hif_scatter_req *scat_req = NULL;
761*4882a593Smuzhiyun 	int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
762*4882a593Smuzhiyun 	struct htc_packet *packet;
763*4882a593Smuzhiyun 	int status;
764*4882a593Smuzhiyun 	u32 txb_mask;
765*4882a593Smuzhiyun 	u8 ac = WMM_NUM_AC;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
768*4882a593Smuzhiyun 	    (WMI_CONTROL_SVC != endpoint->svc_id))
769*4882a593Smuzhiyun 		ac = target->dev->ar->ep2ac_map[endpoint->eid];
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	while (true) {
772*4882a593Smuzhiyun 		status = 0;
773*4882a593Smuzhiyun 		n_scat = get_queue_depth(queue);
774*4882a593Smuzhiyun 		n_scat = min(n_scat, target->msg_per_bndl_max);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 		if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
777*4882a593Smuzhiyun 			/* not enough to bundle */
778*4882a593Smuzhiyun 			break;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		scat_req = hif_scatter_req_get(target->dev->ar);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		if (!scat_req) {
783*4882a593Smuzhiyun 			/* no scatter resources  */
784*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
785*4882a593Smuzhiyun 				   "htc tx no more scatter resources\n");
786*4882a593Smuzhiyun 			break;
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
790*4882a593Smuzhiyun 			if (WMM_AC_BE == ac)
791*4882a593Smuzhiyun 				/*
792*4882a593Smuzhiyun 				 * BE, BK have priorities and bit
793*4882a593Smuzhiyun 				 * positions reversed
794*4882a593Smuzhiyun 				 */
795*4882a593Smuzhiyun 				txb_mask = (1 << WMM_AC_BK);
796*4882a593Smuzhiyun 			else
797*4882a593Smuzhiyun 				/*
798*4882a593Smuzhiyun 				 * any AC with priority lower than
799*4882a593Smuzhiyun 				 * itself
800*4882a593Smuzhiyun 				 */
801*4882a593Smuzhiyun 				txb_mask = ((1 << ac) - 1);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 			/*
804*4882a593Smuzhiyun 			 * when the scatter request resources drop below a
805*4882a593Smuzhiyun 			 * certain threshold, disable Tx bundling for all
806*4882a593Smuzhiyun 			 * AC's with priority lower than the current requesting
807*4882a593Smuzhiyun 			 * AC. Otherwise re-enable Tx bundling for them
808*4882a593Smuzhiyun 			 */
809*4882a593Smuzhiyun 			if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
810*4882a593Smuzhiyun 				target->tx_bndl_mask &= ~txb_mask;
811*4882a593Smuzhiyun 			else
812*4882a593Smuzhiyun 				target->tx_bndl_mask |= txb_mask;
813*4882a593Smuzhiyun 		}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
816*4882a593Smuzhiyun 			   n_scat);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 		scat_req->len = 0;
819*4882a593Smuzhiyun 		scat_req->scat_entries = 0;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
822*4882a593Smuzhiyun 						       scat_req, n_scat,
823*4882a593Smuzhiyun 						       queue);
824*4882a593Smuzhiyun 		if (status == -EAGAIN) {
825*4882a593Smuzhiyun 			hif_scatter_req_add(target->dev->ar, scat_req);
826*4882a593Smuzhiyun 			break;
827*4882a593Smuzhiyun 		}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 		/* send path is always asynchronous */
830*4882a593Smuzhiyun 		scat_req->complete = htc_async_tx_scat_complete;
831*4882a593Smuzhiyun 		n_sent_bundle++;
832*4882a593Smuzhiyun 		tot_pkts_bundle += scat_req->scat_entries;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
835*4882a593Smuzhiyun 			   "htc tx scatter bytes %d entries %d\n",
836*4882a593Smuzhiyun 			   scat_req->len, scat_req->scat_entries);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		for (i = 0; i < scat_req->scat_entries; i++) {
839*4882a593Smuzhiyun 			packet = scat_req->scat_list[i].packet;
840*4882a593Smuzhiyun 			trace_ath6kl_htc_tx(packet->status, packet->endpoint,
841*4882a593Smuzhiyun 					    packet->buf, packet->act_len);
842*4882a593Smuzhiyun 		}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		if (status)
847*4882a593Smuzhiyun 			break;
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	*sent_bundle = n_sent_bundle;
851*4882a593Smuzhiyun 	*n_bundle_pkts = tot_pkts_bundle;
852*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
853*4882a593Smuzhiyun 		   n_sent_bundle);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	return;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
ath6kl_htc_tx_from_queue(struct htc_target * target,struct htc_endpoint * endpoint)858*4882a593Smuzhiyun static void ath6kl_htc_tx_from_queue(struct htc_target *target,
859*4882a593Smuzhiyun 				     struct htc_endpoint *endpoint)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun 	struct list_head txq;
862*4882a593Smuzhiyun 	struct htc_packet *packet;
863*4882a593Smuzhiyun 	int bundle_sent;
864*4882a593Smuzhiyun 	int n_pkts_bundle;
865*4882a593Smuzhiyun 	u8 ac = WMM_NUM_AC;
866*4882a593Smuzhiyun 	int status;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	endpoint->tx_proc_cnt++;
871*4882a593Smuzhiyun 	if (endpoint->tx_proc_cnt > 1) {
872*4882a593Smuzhiyun 		endpoint->tx_proc_cnt--;
873*4882a593Smuzhiyun 		spin_unlock_bh(&target->tx_lock);
874*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
875*4882a593Smuzhiyun 		return;
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/*
879*4882a593Smuzhiyun 	 * drain the endpoint TX queue for transmission as long
880*4882a593Smuzhiyun 	 * as we have enough credits.
881*4882a593Smuzhiyun 	 */
882*4882a593Smuzhiyun 	INIT_LIST_HEAD(&txq);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
885*4882a593Smuzhiyun 	    (WMI_CONTROL_SVC != endpoint->svc_id))
886*4882a593Smuzhiyun 		ac = target->dev->ar->ep2ac_map[endpoint->eid];
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	while (true) {
889*4882a593Smuzhiyun 		if (list_empty(&endpoint->txq))
890*4882a593Smuzhiyun 			break;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 		ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 		if (list_empty(&txq))
895*4882a593Smuzhiyun 			break;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		spin_unlock_bh(&target->tx_lock);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		bundle_sent = 0;
900*4882a593Smuzhiyun 		n_pkts_bundle = 0;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 		while (true) {
903*4882a593Smuzhiyun 			/* try to send a bundle on each pass */
904*4882a593Smuzhiyun 			if ((target->tx_bndl_mask) &&
905*4882a593Smuzhiyun 			    (get_queue_depth(&txq) >=
906*4882a593Smuzhiyun 			    HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
907*4882a593Smuzhiyun 				int temp1 = 0, temp2 = 0;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 				/* check if bundling is enabled for an AC */
910*4882a593Smuzhiyun 				if (target->tx_bndl_mask & (1 << ac)) {
911*4882a593Smuzhiyun 					ath6kl_htc_tx_bundle(endpoint, &txq,
912*4882a593Smuzhiyun 							     &temp1, &temp2);
913*4882a593Smuzhiyun 					bundle_sent += temp1;
914*4882a593Smuzhiyun 					n_pkts_bundle += temp2;
915*4882a593Smuzhiyun 				}
916*4882a593Smuzhiyun 			}
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 			if (list_empty(&txq))
919*4882a593Smuzhiyun 				break;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 			packet = list_first_entry(&txq, struct htc_packet,
922*4882a593Smuzhiyun 						  list);
923*4882a593Smuzhiyun 			list_del(&packet->list);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 			ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
926*4882a593Smuzhiyun 					       0, packet->info.tx.seqno);
927*4882a593Smuzhiyun 			status = ath6kl_htc_tx_issue(target, packet);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 			if (status) {
930*4882a593Smuzhiyun 				packet->status = status;
931*4882a593Smuzhiyun 				packet->completion(packet->context, packet);
932*4882a593Smuzhiyun 			}
933*4882a593Smuzhiyun 		}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 		spin_lock_bh(&target->tx_lock);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 		endpoint->ep_st.tx_bundles += bundle_sent;
938*4882a593Smuzhiyun 		endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 		/*
941*4882a593Smuzhiyun 		 * if an AC has bundling disabled and no tx bundling
942*4882a593Smuzhiyun 		 * has occured continously for a certain number of TX,
943*4882a593Smuzhiyun 		 * enable tx bundling for this AC
944*4882a593Smuzhiyun 		 */
945*4882a593Smuzhiyun 		if (!bundle_sent) {
946*4882a593Smuzhiyun 			if (!(target->tx_bndl_mask & (1 << ac)) &&
947*4882a593Smuzhiyun 			    (ac < WMM_NUM_AC)) {
948*4882a593Smuzhiyun 				if (++target->ac_tx_count[ac] >=
949*4882a593Smuzhiyun 					TX_RESUME_BUNDLE_THRESHOLD) {
950*4882a593Smuzhiyun 					target->ac_tx_count[ac] = 0;
951*4882a593Smuzhiyun 					target->tx_bndl_mask |= (1 << ac);
952*4882a593Smuzhiyun 				}
953*4882a593Smuzhiyun 			}
954*4882a593Smuzhiyun 		} else {
955*4882a593Smuzhiyun 			/* tx bundling will reset the counter */
956*4882a593Smuzhiyun 			if (ac < WMM_NUM_AC)
957*4882a593Smuzhiyun 				target->ac_tx_count[ac] = 0;
958*4882a593Smuzhiyun 		}
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	endpoint->tx_proc_cnt = 0;
962*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
ath6kl_htc_tx_try(struct htc_target * target,struct htc_endpoint * endpoint,struct htc_packet * tx_pkt)965*4882a593Smuzhiyun static bool ath6kl_htc_tx_try(struct htc_target *target,
966*4882a593Smuzhiyun 			      struct htc_endpoint *endpoint,
967*4882a593Smuzhiyun 			      struct htc_packet *tx_pkt)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	struct htc_ep_callbacks ep_cb;
970*4882a593Smuzhiyun 	int txq_depth;
971*4882a593Smuzhiyun 	bool overflow = false;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	ep_cb = endpoint->ep_cb;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
976*4882a593Smuzhiyun 	txq_depth = get_queue_depth(&endpoint->txq);
977*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	if (txq_depth >= endpoint->max_txq_depth)
980*4882a593Smuzhiyun 		overflow = true;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	if (overflow)
983*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
984*4882a593Smuzhiyun 			   "htc tx overflow ep %d depth %d max %d\n",
985*4882a593Smuzhiyun 			   endpoint->eid, txq_depth,
986*4882a593Smuzhiyun 			   endpoint->max_txq_depth);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	if (overflow && ep_cb.tx_full) {
989*4882a593Smuzhiyun 		if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
990*4882a593Smuzhiyun 		    HTC_SEND_FULL_DROP) {
991*4882a593Smuzhiyun 			endpoint->ep_st.tx_dropped += 1;
992*4882a593Smuzhiyun 			return false;
993*4882a593Smuzhiyun 		}
994*4882a593Smuzhiyun 	}
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
997*4882a593Smuzhiyun 	list_add_tail(&tx_pkt->list, &endpoint->txq);
998*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	ath6kl_htc_tx_from_queue(target, endpoint);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	return true;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun 
htc_chk_ep_txq(struct htc_target * target)1005*4882a593Smuzhiyun static void htc_chk_ep_txq(struct htc_target *target)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
1008*4882a593Smuzhiyun 	struct htc_endpoint_credit_dist *cred_dist;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	/*
1011*4882a593Smuzhiyun 	 * Run through the credit distribution list to see if there are
1012*4882a593Smuzhiyun 	 * packets queued. NOTE: no locks need to be taken since the
1013*4882a593Smuzhiyun 	 * distribution list is not dynamic (cannot be re-ordered) and we
1014*4882a593Smuzhiyun 	 * are not modifying any state.
1015*4882a593Smuzhiyun 	 */
1016*4882a593Smuzhiyun 	list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
1017*4882a593Smuzhiyun 		endpoint = cred_dist->htc_ep;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 		spin_lock_bh(&target->tx_lock);
1020*4882a593Smuzhiyun 		if (!list_empty(&endpoint->txq)) {
1021*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
1022*4882a593Smuzhiyun 				   "htc creds ep %d credits %d pkts %d\n",
1023*4882a593Smuzhiyun 				   cred_dist->endpoint,
1024*4882a593Smuzhiyun 				   endpoint->cred_dist.credits,
1025*4882a593Smuzhiyun 				   get_queue_depth(&endpoint->txq));
1026*4882a593Smuzhiyun 			spin_unlock_bh(&target->tx_lock);
1027*4882a593Smuzhiyun 			/*
1028*4882a593Smuzhiyun 			 * Try to start the stalled queue, this list is
1029*4882a593Smuzhiyun 			 * ordered by priority. If there are credits
1030*4882a593Smuzhiyun 			 * available the highest priority queue will get a
1031*4882a593Smuzhiyun 			 * chance to reclaim credits from lower priority
1032*4882a593Smuzhiyun 			 * ones.
1033*4882a593Smuzhiyun 			 */
1034*4882a593Smuzhiyun 			ath6kl_htc_tx_from_queue(target, endpoint);
1035*4882a593Smuzhiyun 			spin_lock_bh(&target->tx_lock);
1036*4882a593Smuzhiyun 		}
1037*4882a593Smuzhiyun 		spin_unlock_bh(&target->tx_lock);
1038*4882a593Smuzhiyun 	}
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
htc_setup_tx_complete(struct htc_target * target)1041*4882a593Smuzhiyun static int htc_setup_tx_complete(struct htc_target *target)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	struct htc_packet *send_pkt = NULL;
1044*4882a593Smuzhiyun 	int status;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	send_pkt = htc_get_control_buf(target, true);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	if (!send_pkt)
1049*4882a593Smuzhiyun 		return -ENOMEM;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1052*4882a593Smuzhiyun 		struct htc_setup_comp_ext_msg *setup_comp_ext;
1053*4882a593Smuzhiyun 		u32 flags = 0;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		setup_comp_ext =
1056*4882a593Smuzhiyun 		    (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1057*4882a593Smuzhiyun 		memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1058*4882a593Smuzhiyun 		setup_comp_ext->msg_id =
1059*4882a593Smuzhiyun 			cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 		if (target->msg_per_bndl_max > 0) {
1062*4882a593Smuzhiyun 			/* Indicate HTC bundling to the target */
1063*4882a593Smuzhiyun 			flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1064*4882a593Smuzhiyun 			setup_comp_ext->msg_per_rxbndl =
1065*4882a593Smuzhiyun 						target->msg_per_bndl_max;
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		memcpy(&setup_comp_ext->flags, &flags,
1069*4882a593Smuzhiyun 		       sizeof(setup_comp_ext->flags));
1070*4882a593Smuzhiyun 		set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1071*4882a593Smuzhiyun 				 sizeof(struct htc_setup_comp_ext_msg),
1072*4882a593Smuzhiyun 				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	} else {
1075*4882a593Smuzhiyun 		struct htc_setup_comp_msg *setup_comp;
1076*4882a593Smuzhiyun 		setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1077*4882a593Smuzhiyun 		memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1078*4882a593Smuzhiyun 		setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1079*4882a593Smuzhiyun 		set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1080*4882a593Smuzhiyun 				 sizeof(struct htc_setup_comp_msg),
1081*4882a593Smuzhiyun 				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* we want synchronous operation */
1085*4882a593Smuzhiyun 	send_pkt->completion = NULL;
1086*4882a593Smuzhiyun 	ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1087*4882a593Smuzhiyun 	status = ath6kl_htc_tx_issue(target, send_pkt);
1088*4882a593Smuzhiyun 	htc_reclaim_txctrl_buf(target, send_pkt);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	return status;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun 
ath6kl_htc_set_credit_dist(struct htc_target * target,struct ath6kl_htc_credit_info * credit_info,u16 srvc_pri_order[],int list_len)1093*4882a593Smuzhiyun static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1094*4882a593Smuzhiyun 				struct ath6kl_htc_credit_info *credit_info,
1095*4882a593Smuzhiyun 				u16 srvc_pri_order[], int list_len)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
1098*4882a593Smuzhiyun 	int i, ep;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	target->credit_info = credit_info;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1103*4882a593Smuzhiyun 		      &target->cred_dist_list);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	for (i = 0; i < list_len; i++) {
1106*4882a593Smuzhiyun 		for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1107*4882a593Smuzhiyun 			endpoint = &target->endpoint[ep];
1108*4882a593Smuzhiyun 			if (endpoint->svc_id == srvc_pri_order[i]) {
1109*4882a593Smuzhiyun 				list_add_tail(&endpoint->cred_dist.list,
1110*4882a593Smuzhiyun 					      &target->cred_dist_list);
1111*4882a593Smuzhiyun 				break;
1112*4882a593Smuzhiyun 			}
1113*4882a593Smuzhiyun 		}
1114*4882a593Smuzhiyun 		if (ep >= ENDPOINT_MAX) {
1115*4882a593Smuzhiyun 			WARN_ON(1);
1116*4882a593Smuzhiyun 			return;
1117*4882a593Smuzhiyun 		}
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun 
ath6kl_htc_mbox_tx(struct htc_target * target,struct htc_packet * packet)1121*4882a593Smuzhiyun static int ath6kl_htc_mbox_tx(struct htc_target *target,
1122*4882a593Smuzhiyun 			      struct htc_packet *packet)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
1125*4882a593Smuzhiyun 	struct list_head queue;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
1128*4882a593Smuzhiyun 		   "htc tx ep id %d buf 0x%p len %d\n",
1129*4882a593Smuzhiyun 		   packet->endpoint, packet->buf, packet->act_len);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	if (packet->endpoint >= ENDPOINT_MAX) {
1132*4882a593Smuzhiyun 		WARN_ON(1);
1133*4882a593Smuzhiyun 		return -EINVAL;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	endpoint = &target->endpoint[packet->endpoint];
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1139*4882a593Smuzhiyun 		packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1140*4882a593Smuzhiyun 				 -ECANCELED : -ENOSPC;
1141*4882a593Smuzhiyun 		INIT_LIST_HEAD(&queue);
1142*4882a593Smuzhiyun 		list_add(&packet->list, &queue);
1143*4882a593Smuzhiyun 		htc_tx_complete(endpoint, &queue);
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	return 0;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun /* flush endpoint TX queue */
ath6kl_htc_mbox_flush_txep(struct htc_target * target,enum htc_endpoint_id eid,u16 tag)1150*4882a593Smuzhiyun static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1151*4882a593Smuzhiyun 			   enum htc_endpoint_id eid, u16 tag)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_pkt;
1154*4882a593Smuzhiyun 	struct list_head discard_q, container;
1155*4882a593Smuzhiyun 	struct htc_endpoint *endpoint = &target->endpoint[eid];
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	if (!endpoint->svc_id) {
1158*4882a593Smuzhiyun 		WARN_ON(1);
1159*4882a593Smuzhiyun 		return;
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	/* initialize the discard queue */
1163*4882a593Smuzhiyun 	INIT_LIST_HEAD(&discard_q);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1168*4882a593Smuzhiyun 		if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1169*4882a593Smuzhiyun 		    (tag == packet->info.tx.tag))
1170*4882a593Smuzhiyun 			list_move_tail(&packet->list, &discard_q);
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1176*4882a593Smuzhiyun 		packet->status = -ECANCELED;
1177*4882a593Smuzhiyun 		list_del(&packet->list);
1178*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
1179*4882a593Smuzhiyun 			   "htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
1180*4882a593Smuzhiyun 			   packet, packet->act_len,
1181*4882a593Smuzhiyun 			   packet->endpoint, packet->info.tx.tag);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 		INIT_LIST_HEAD(&container);
1184*4882a593Smuzhiyun 		list_add_tail(&packet->list, &container);
1185*4882a593Smuzhiyun 		htc_tx_complete(endpoint, &container);
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun 
ath6kl_htc_flush_txep_all(struct htc_target * target)1189*4882a593Smuzhiyun static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
1192*4882a593Smuzhiyun 	int i;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	dump_cred_dist_stats(target);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1197*4882a593Smuzhiyun 		endpoint = &target->endpoint[i];
1198*4882a593Smuzhiyun 		if (endpoint->svc_id == 0)
1199*4882a593Smuzhiyun 			/* not in use.. */
1200*4882a593Smuzhiyun 			continue;
1201*4882a593Smuzhiyun 		ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun 
ath6kl_htc_mbox_activity_changed(struct htc_target * target,enum htc_endpoint_id eid,bool active)1205*4882a593Smuzhiyun static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1206*4882a593Smuzhiyun 					     enum htc_endpoint_id eid,
1207*4882a593Smuzhiyun 					     bool active)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	struct htc_endpoint *endpoint = &target->endpoint[eid];
1210*4882a593Smuzhiyun 	bool dist = false;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	if (endpoint->svc_id == 0) {
1213*4882a593Smuzhiyun 		WARN_ON(1);
1214*4882a593Smuzhiyun 		return;
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	if (active) {
1220*4882a593Smuzhiyun 		if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1221*4882a593Smuzhiyun 			endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1222*4882a593Smuzhiyun 			dist = true;
1223*4882a593Smuzhiyun 		}
1224*4882a593Smuzhiyun 	} else {
1225*4882a593Smuzhiyun 		if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1226*4882a593Smuzhiyun 			endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1227*4882a593Smuzhiyun 			dist = true;
1228*4882a593Smuzhiyun 		}
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	if (dist) {
1232*4882a593Smuzhiyun 		endpoint->cred_dist.txq_depth =
1233*4882a593Smuzhiyun 			get_queue_depth(&endpoint->txq);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
1236*4882a593Smuzhiyun 			   "htc tx activity ctxt 0x%p dist 0x%p\n",
1237*4882a593Smuzhiyun 			   target->credit_info, &target->cred_dist_list);
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 		ath6kl_credit_distribute(target->credit_info,
1240*4882a593Smuzhiyun 					 &target->cred_dist_list,
1241*4882a593Smuzhiyun 					 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1242*4882a593Smuzhiyun 	}
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	if (dist && !active)
1247*4882a593Smuzhiyun 		htc_chk_ep_txq(target);
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun /* HTC Rx */
1251*4882a593Smuzhiyun 
ath6kl_htc_rx_update_stats(struct htc_endpoint * endpoint,int n_look_ahds)1252*4882a593Smuzhiyun static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1253*4882a593Smuzhiyun 					      int n_look_ahds)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	endpoint->ep_st.rx_pkts++;
1256*4882a593Smuzhiyun 	if (n_look_ahds == 1)
1257*4882a593Smuzhiyun 		endpoint->ep_st.rx_lkahds++;
1258*4882a593Smuzhiyun 	else if (n_look_ahds > 1)
1259*4882a593Smuzhiyun 		endpoint->ep_st.rx_bundle_lkahd++;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
htc_valid_rx_frame_len(struct htc_target * target,enum htc_endpoint_id eid,int len)1262*4882a593Smuzhiyun static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1263*4882a593Smuzhiyun 					  enum htc_endpoint_id eid, int len)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	return (eid == target->dev->ar->ctrl_ep) ?
1266*4882a593Smuzhiyun 		len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
htc_add_rxbuf(struct htc_target * target,struct htc_packet * packet)1269*4882a593Smuzhiyun static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	struct list_head queue;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue);
1274*4882a593Smuzhiyun 	list_add_tail(&packet->list, &queue);
1275*4882a593Smuzhiyun 	return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
htc_reclaim_rxbuf(struct htc_target * target,struct htc_packet * packet,struct htc_endpoint * ep)1278*4882a593Smuzhiyun static void htc_reclaim_rxbuf(struct htc_target *target,
1279*4882a593Smuzhiyun 			      struct htc_packet *packet,
1280*4882a593Smuzhiyun 			      struct htc_endpoint *ep)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1283*4882a593Smuzhiyun 		htc_rxpkt_reset(packet);
1284*4882a593Smuzhiyun 		packet->status = -ECANCELED;
1285*4882a593Smuzhiyun 		ep->ep_cb.rx(ep->target, packet);
1286*4882a593Smuzhiyun 	} else {
1287*4882a593Smuzhiyun 		htc_rxpkt_reset(packet);
1288*4882a593Smuzhiyun 		htc_add_rxbuf((void *)(target), packet);
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
reclaim_rx_ctrl_buf(struct htc_target * target,struct htc_packet * packet)1292*4882a593Smuzhiyun static void reclaim_rx_ctrl_buf(struct htc_target *target,
1293*4882a593Smuzhiyun 				struct htc_packet *packet)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun 	spin_lock_bh(&target->htc_lock);
1296*4882a593Smuzhiyun 	list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1297*4882a593Smuzhiyun 	spin_unlock_bh(&target->htc_lock);
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun 
ath6kl_htc_rx_packet(struct htc_target * target,struct htc_packet * packet,u32 rx_len)1300*4882a593Smuzhiyun static int ath6kl_htc_rx_packet(struct htc_target *target,
1301*4882a593Smuzhiyun 				struct htc_packet *packet,
1302*4882a593Smuzhiyun 				u32 rx_len)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	struct ath6kl_device *dev = target->dev;
1305*4882a593Smuzhiyun 	u32 padded_len;
1306*4882a593Smuzhiyun 	int status;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	if (padded_len > packet->buf_len) {
1311*4882a593Smuzhiyun 		ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1312*4882a593Smuzhiyun 			   padded_len, rx_len, packet->buf_len);
1313*4882a593Smuzhiyun 		return -ENOMEM;
1314*4882a593Smuzhiyun 	}
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
1317*4882a593Smuzhiyun 		   "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1318*4882a593Smuzhiyun 		   packet, packet->info.rx.exp_hdr,
1319*4882a593Smuzhiyun 		   padded_len, dev->ar->mbox_info.htc_addr);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	status = hif_read_write_sync(dev->ar,
1322*4882a593Smuzhiyun 				     dev->ar->mbox_info.htc_addr,
1323*4882a593Smuzhiyun 				     packet->buf, padded_len,
1324*4882a593Smuzhiyun 				     HIF_RD_SYNC_BLOCK_FIX);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	packet->status = status;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	return status;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun /*
1332*4882a593Smuzhiyun  * optimization for recv packets, we can indicate a
1333*4882a593Smuzhiyun  * "hint" that there are more  single-packets to fetch
1334*4882a593Smuzhiyun  * on this endpoint.
1335*4882a593Smuzhiyun  */
ath6kl_htc_rx_set_indicate(u32 lk_ahd,struct htc_endpoint * endpoint,struct htc_packet * packet)1336*4882a593Smuzhiyun static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1337*4882a593Smuzhiyun 				       struct htc_endpoint *endpoint,
1338*4882a593Smuzhiyun 				       struct htc_packet *packet)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun 	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	if (htc_hdr->eid == packet->endpoint) {
1343*4882a593Smuzhiyun 		if (!list_empty(&endpoint->rx_bufq))
1344*4882a593Smuzhiyun 			packet->info.rx.indicat_flags |=
1345*4882a593Smuzhiyun 					HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun 
ath6kl_htc_rx_chk_water_mark(struct htc_endpoint * endpoint)1349*4882a593Smuzhiyun static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun 	struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	if (ep_cb.rx_refill_thresh > 0) {
1354*4882a593Smuzhiyun 		spin_lock_bh(&endpoint->target->rx_lock);
1355*4882a593Smuzhiyun 		if (get_queue_depth(&endpoint->rx_bufq)
1356*4882a593Smuzhiyun 		    < ep_cb.rx_refill_thresh) {
1357*4882a593Smuzhiyun 			spin_unlock_bh(&endpoint->target->rx_lock);
1358*4882a593Smuzhiyun 			ep_cb.rx_refill(endpoint->target, endpoint->eid);
1359*4882a593Smuzhiyun 			return;
1360*4882a593Smuzhiyun 		}
1361*4882a593Smuzhiyun 		spin_unlock_bh(&endpoint->target->rx_lock);
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun /* This function is called with rx_lock held */
ath6kl_htc_rx_setup(struct htc_target * target,struct htc_endpoint * ep,u32 * lk_ahds,struct list_head * queue,int n_msg)1366*4882a593Smuzhiyun static int ath6kl_htc_rx_setup(struct htc_target *target,
1367*4882a593Smuzhiyun 			       struct htc_endpoint *ep,
1368*4882a593Smuzhiyun 			       u32 *lk_ahds, struct list_head *queue, int n_msg)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct htc_packet *packet;
1371*4882a593Smuzhiyun 	/* FIXME: type of lk_ahds can't be right */
1372*4882a593Smuzhiyun 	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1373*4882a593Smuzhiyun 	struct htc_ep_callbacks ep_cb;
1374*4882a593Smuzhiyun 	int status = 0, j, full_len;
1375*4882a593Smuzhiyun 	bool no_recycle;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	full_len = CALC_TXRX_PADDED_LEN(target,
1378*4882a593Smuzhiyun 					le16_to_cpu(htc_hdr->payld_len) +
1379*4882a593Smuzhiyun 					sizeof(*htc_hdr));
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1382*4882a593Smuzhiyun 		ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1383*4882a593Smuzhiyun 			    htc_hdr->eid, htc_hdr->flags,
1384*4882a593Smuzhiyun 			    le16_to_cpu(htc_hdr->payld_len));
1385*4882a593Smuzhiyun 		return -EINVAL;
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	ep_cb = ep->ep_cb;
1389*4882a593Smuzhiyun 	for (j = 0; j < n_msg; j++) {
1390*4882a593Smuzhiyun 		/*
1391*4882a593Smuzhiyun 		 * Reset flag, any packets allocated using the
1392*4882a593Smuzhiyun 		 * rx_alloc() API cannot be recycled on
1393*4882a593Smuzhiyun 		 * cleanup,they must be explicitly returned.
1394*4882a593Smuzhiyun 		 */
1395*4882a593Smuzhiyun 		no_recycle = false;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 		if (ep_cb.rx_allocthresh &&
1398*4882a593Smuzhiyun 		    (full_len > ep_cb.rx_alloc_thresh)) {
1399*4882a593Smuzhiyun 			ep->ep_st.rx_alloc_thresh_hit += 1;
1400*4882a593Smuzhiyun 			ep->ep_st.rxalloc_thresh_byte +=
1401*4882a593Smuzhiyun 				le16_to_cpu(htc_hdr->payld_len);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 			spin_unlock_bh(&target->rx_lock);
1404*4882a593Smuzhiyun 			no_recycle = true;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 			packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1407*4882a593Smuzhiyun 						      full_len);
1408*4882a593Smuzhiyun 			spin_lock_bh(&target->rx_lock);
1409*4882a593Smuzhiyun 		} else {
1410*4882a593Smuzhiyun 			/* refill handler is being used */
1411*4882a593Smuzhiyun 			if (list_empty(&ep->rx_bufq)) {
1412*4882a593Smuzhiyun 				if (ep_cb.rx_refill) {
1413*4882a593Smuzhiyun 					spin_unlock_bh(&target->rx_lock);
1414*4882a593Smuzhiyun 					ep_cb.rx_refill(ep->target, ep->eid);
1415*4882a593Smuzhiyun 					spin_lock_bh(&target->rx_lock);
1416*4882a593Smuzhiyun 				}
1417*4882a593Smuzhiyun 			}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 			if (list_empty(&ep->rx_bufq)) {
1420*4882a593Smuzhiyun 				packet = NULL;
1421*4882a593Smuzhiyun 			} else {
1422*4882a593Smuzhiyun 				packet = list_first_entry(&ep->rx_bufq,
1423*4882a593Smuzhiyun 						struct htc_packet, list);
1424*4882a593Smuzhiyun 				list_del(&packet->list);
1425*4882a593Smuzhiyun 			}
1426*4882a593Smuzhiyun 		}
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 		if (!packet) {
1429*4882a593Smuzhiyun 			target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1430*4882a593Smuzhiyun 			target->ep_waiting = ep->eid;
1431*4882a593Smuzhiyun 			return -ENOSPC;
1432*4882a593Smuzhiyun 		}
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 		/* clear flags */
1435*4882a593Smuzhiyun 		packet->info.rx.rx_flags = 0;
1436*4882a593Smuzhiyun 		packet->info.rx.indicat_flags = 0;
1437*4882a593Smuzhiyun 		packet->status = 0;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 		if (no_recycle)
1440*4882a593Smuzhiyun 			/*
1441*4882a593Smuzhiyun 			 * flag that these packets cannot be
1442*4882a593Smuzhiyun 			 * recycled, they have to be returned to
1443*4882a593Smuzhiyun 			 * the user
1444*4882a593Smuzhiyun 			 */
1445*4882a593Smuzhiyun 			packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 		/* Caller needs to free this upon any failure */
1448*4882a593Smuzhiyun 		list_add_tail(&packet->list, queue);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 		if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1451*4882a593Smuzhiyun 			status = -ECANCELED;
1452*4882a593Smuzhiyun 			break;
1453*4882a593Smuzhiyun 		}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 		if (j) {
1456*4882a593Smuzhiyun 			packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1457*4882a593Smuzhiyun 			packet->info.rx.exp_hdr = 0xFFFFFFFF;
1458*4882a593Smuzhiyun 		} else
1459*4882a593Smuzhiyun 			/* set expected look ahead */
1460*4882a593Smuzhiyun 			packet->info.rx.exp_hdr = *lk_ahds;
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 		packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1463*4882a593Smuzhiyun 			HTC_HDR_LENGTH;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	return status;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
ath6kl_htc_rx_alloc(struct htc_target * target,u32 lk_ahds[],int msg,struct htc_endpoint * endpoint,struct list_head * queue)1469*4882a593Smuzhiyun static int ath6kl_htc_rx_alloc(struct htc_target *target,
1470*4882a593Smuzhiyun 			       u32 lk_ahds[], int msg,
1471*4882a593Smuzhiyun 			       struct htc_endpoint *endpoint,
1472*4882a593Smuzhiyun 			       struct list_head *queue)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	int status = 0;
1475*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_pkt;
1476*4882a593Smuzhiyun 	struct htc_frame_hdr *htc_hdr;
1477*4882a593Smuzhiyun 	int i, n_msg;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	spin_lock_bh(&target->rx_lock);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	for (i = 0; i < msg; i++) {
1482*4882a593Smuzhiyun 		htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 		if (htc_hdr->eid >= ENDPOINT_MAX) {
1485*4882a593Smuzhiyun 			ath6kl_err("invalid ep in look-ahead: %d\n",
1486*4882a593Smuzhiyun 				   htc_hdr->eid);
1487*4882a593Smuzhiyun 			status = -ENOMEM;
1488*4882a593Smuzhiyun 			break;
1489*4882a593Smuzhiyun 		}
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 		if (htc_hdr->eid != endpoint->eid) {
1492*4882a593Smuzhiyun 			ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1493*4882a593Smuzhiyun 				   htc_hdr->eid, endpoint->eid, i);
1494*4882a593Smuzhiyun 			status = -ENOMEM;
1495*4882a593Smuzhiyun 			break;
1496*4882a593Smuzhiyun 		}
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 		if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1499*4882a593Smuzhiyun 			ath6kl_err("payload len %d exceeds max htc : %d !\n",
1500*4882a593Smuzhiyun 				   htc_hdr->payld_len,
1501*4882a593Smuzhiyun 				   (u32) HTC_MAX_PAYLOAD_LENGTH);
1502*4882a593Smuzhiyun 			status = -ENOMEM;
1503*4882a593Smuzhiyun 			break;
1504*4882a593Smuzhiyun 		}
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 		if (endpoint->svc_id == 0) {
1507*4882a593Smuzhiyun 			ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1508*4882a593Smuzhiyun 			status = -ENOMEM;
1509*4882a593Smuzhiyun 			break;
1510*4882a593Smuzhiyun 		}
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 		if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1513*4882a593Smuzhiyun 			/*
1514*4882a593Smuzhiyun 			 * HTC header indicates that every packet to follow
1515*4882a593Smuzhiyun 			 * has the same padded length so that it can be
1516*4882a593Smuzhiyun 			 * optimally fetched as a full bundle.
1517*4882a593Smuzhiyun 			 */
1518*4882a593Smuzhiyun 			n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1519*4882a593Smuzhiyun 				HTC_FLG_RX_BNDL_CNT_S;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 			/* the count doesn't include the starter frame */
1522*4882a593Smuzhiyun 			n_msg++;
1523*4882a593Smuzhiyun 			if (n_msg > target->msg_per_bndl_max) {
1524*4882a593Smuzhiyun 				status = -ENOMEM;
1525*4882a593Smuzhiyun 				break;
1526*4882a593Smuzhiyun 			}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 			endpoint->ep_st.rx_bundle_from_hdr += 1;
1529*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
1530*4882a593Smuzhiyun 				   "htc rx bundle pkts %d\n",
1531*4882a593Smuzhiyun 				   n_msg);
1532*4882a593Smuzhiyun 		} else
1533*4882a593Smuzhiyun 			/* HTC header only indicates 1 message to fetch */
1534*4882a593Smuzhiyun 			n_msg = 1;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		/* Setup packet buffers for each message */
1537*4882a593Smuzhiyun 		status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1538*4882a593Smuzhiyun 					     queue, n_msg);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 		/*
1541*4882a593Smuzhiyun 		 * This is due to unavailabilty of buffers to rx entire data.
1542*4882a593Smuzhiyun 		 * Return no error so that free buffers from queue can be used
1543*4882a593Smuzhiyun 		 * to receive partial data.
1544*4882a593Smuzhiyun 		 */
1545*4882a593Smuzhiyun 		if (status == -ENOSPC) {
1546*4882a593Smuzhiyun 			spin_unlock_bh(&target->rx_lock);
1547*4882a593Smuzhiyun 			return 0;
1548*4882a593Smuzhiyun 		}
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 		if (status)
1551*4882a593Smuzhiyun 			break;
1552*4882a593Smuzhiyun 	}
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	spin_unlock_bh(&target->rx_lock);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	if (status) {
1557*4882a593Smuzhiyun 		list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1558*4882a593Smuzhiyun 			list_del(&packet->list);
1559*4882a593Smuzhiyun 			htc_reclaim_rxbuf(target, packet,
1560*4882a593Smuzhiyun 					  &target->endpoint[packet->endpoint]);
1561*4882a593Smuzhiyun 		}
1562*4882a593Smuzhiyun 	}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	return status;
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun 
htc_ctrl_rx(struct htc_target * context,struct htc_packet * packets)1567*4882a593Smuzhiyun static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun 	if (packets->endpoint != ENDPOINT_0) {
1570*4882a593Smuzhiyun 		WARN_ON(1);
1571*4882a593Smuzhiyun 		return;
1572*4882a593Smuzhiyun 	}
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	if (packets->status == -ECANCELED) {
1575*4882a593Smuzhiyun 		reclaim_rx_ctrl_buf(context, packets);
1576*4882a593Smuzhiyun 		return;
1577*4882a593Smuzhiyun 	}
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	if (packets->act_len > 0) {
1580*4882a593Smuzhiyun 		ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1581*4882a593Smuzhiyun 			   packets->act_len + HTC_HDR_LENGTH);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 		ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1584*4882a593Smuzhiyun 				"htc rx unexpected endpoint 0 message", "",
1585*4882a593Smuzhiyun 				packets->buf - HTC_HDR_LENGTH,
1586*4882a593Smuzhiyun 				packets->act_len + HTC_HDR_LENGTH);
1587*4882a593Smuzhiyun 	}
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
htc_proc_cred_rpt(struct htc_target * target,struct htc_credit_report * rpt,int n_entries,enum htc_endpoint_id from_ep)1592*4882a593Smuzhiyun static void htc_proc_cred_rpt(struct htc_target *target,
1593*4882a593Smuzhiyun 			      struct htc_credit_report *rpt,
1594*4882a593Smuzhiyun 			      int n_entries,
1595*4882a593Smuzhiyun 			      enum htc_endpoint_id from_ep)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
1598*4882a593Smuzhiyun 	int tot_credits = 0, i;
1599*4882a593Smuzhiyun 	bool dist = false;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	spin_lock_bh(&target->tx_lock);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	for (i = 0; i < n_entries; i++, rpt++) {
1604*4882a593Smuzhiyun 		if (rpt->eid >= ENDPOINT_MAX) {
1605*4882a593Smuzhiyun 			WARN_ON(1);
1606*4882a593Smuzhiyun 			spin_unlock_bh(&target->tx_lock);
1607*4882a593Smuzhiyun 			return;
1608*4882a593Smuzhiyun 		}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		endpoint = &target->endpoint[rpt->eid];
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_CREDIT,
1613*4882a593Smuzhiyun 			   "credit report ep %d credits %d\n",
1614*4882a593Smuzhiyun 			   rpt->eid, rpt->credits);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		endpoint->ep_st.tx_cred_rpt += 1;
1617*4882a593Smuzhiyun 		endpoint->ep_st.cred_retnd += rpt->credits;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 		if (from_ep == rpt->eid) {
1620*4882a593Smuzhiyun 			/*
1621*4882a593Smuzhiyun 			 * This credit report arrived on the same endpoint
1622*4882a593Smuzhiyun 			 * indicating it arrived in an RX packet.
1623*4882a593Smuzhiyun 			 */
1624*4882a593Smuzhiyun 			endpoint->ep_st.cred_from_rx += rpt->credits;
1625*4882a593Smuzhiyun 			endpoint->ep_st.cred_rpt_from_rx += 1;
1626*4882a593Smuzhiyun 		} else if (from_ep == ENDPOINT_0) {
1627*4882a593Smuzhiyun 			/* credit arrived on endpoint 0 as a NULL message */
1628*4882a593Smuzhiyun 			endpoint->ep_st.cred_from_ep0 += rpt->credits;
1629*4882a593Smuzhiyun 			endpoint->ep_st.cred_rpt_ep0 += 1;
1630*4882a593Smuzhiyun 		} else {
1631*4882a593Smuzhiyun 			endpoint->ep_st.cred_from_other += rpt->credits;
1632*4882a593Smuzhiyun 			endpoint->ep_st.cred_rpt_from_other += 1;
1633*4882a593Smuzhiyun 		}
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 		if (rpt->eid == ENDPOINT_0)
1636*4882a593Smuzhiyun 			/* always give endpoint 0 credits back */
1637*4882a593Smuzhiyun 			endpoint->cred_dist.credits += rpt->credits;
1638*4882a593Smuzhiyun 		else {
1639*4882a593Smuzhiyun 			endpoint->cred_dist.cred_to_dist += rpt->credits;
1640*4882a593Smuzhiyun 			dist = true;
1641*4882a593Smuzhiyun 		}
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 		/*
1644*4882a593Smuzhiyun 		 * Refresh tx depth for distribution function that will
1645*4882a593Smuzhiyun 		 * recover these credits NOTE: this is only valid when
1646*4882a593Smuzhiyun 		 * there are credits to recover!
1647*4882a593Smuzhiyun 		 */
1648*4882a593Smuzhiyun 		endpoint->cred_dist.txq_depth =
1649*4882a593Smuzhiyun 			get_queue_depth(&endpoint->txq);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 		tot_credits += rpt->credits;
1652*4882a593Smuzhiyun 	}
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 	if (dist) {
1655*4882a593Smuzhiyun 		/*
1656*4882a593Smuzhiyun 		 * This was a credit return based on a completed send
1657*4882a593Smuzhiyun 		 * operations note, this is done with the lock held
1658*4882a593Smuzhiyun 		 */
1659*4882a593Smuzhiyun 		ath6kl_credit_distribute(target->credit_info,
1660*4882a593Smuzhiyun 					 &target->cred_dist_list,
1661*4882a593Smuzhiyun 					 HTC_CREDIT_DIST_SEND_COMPLETE);
1662*4882a593Smuzhiyun 	}
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	spin_unlock_bh(&target->tx_lock);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	if (tot_credits)
1667*4882a593Smuzhiyun 		htc_chk_ep_txq(target);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun 
htc_parse_trailer(struct htc_target * target,struct htc_record_hdr * record,u8 * record_buf,u32 * next_lk_ahds,enum htc_endpoint_id endpoint,int * n_lk_ahds)1670*4882a593Smuzhiyun static int htc_parse_trailer(struct htc_target *target,
1671*4882a593Smuzhiyun 			     struct htc_record_hdr *record,
1672*4882a593Smuzhiyun 			     u8 *record_buf, u32 *next_lk_ahds,
1673*4882a593Smuzhiyun 			     enum htc_endpoint_id endpoint,
1674*4882a593Smuzhiyun 			     int *n_lk_ahds)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1677*4882a593Smuzhiyun 	struct htc_lookahead_report *lk_ahd;
1678*4882a593Smuzhiyun 	int len;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	switch (record->rec_id) {
1681*4882a593Smuzhiyun 	case HTC_RECORD_CREDITS:
1682*4882a593Smuzhiyun 		len = record->len / sizeof(struct htc_credit_report);
1683*4882a593Smuzhiyun 		if (!len) {
1684*4882a593Smuzhiyun 			WARN_ON(1);
1685*4882a593Smuzhiyun 			return -EINVAL;
1686*4882a593Smuzhiyun 		}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		htc_proc_cred_rpt(target,
1689*4882a593Smuzhiyun 				  (struct htc_credit_report *) record_buf,
1690*4882a593Smuzhiyun 				  len, endpoint);
1691*4882a593Smuzhiyun 		break;
1692*4882a593Smuzhiyun 	case HTC_RECORD_LOOKAHEAD:
1693*4882a593Smuzhiyun 		len = record->len / sizeof(*lk_ahd);
1694*4882a593Smuzhiyun 		if (!len) {
1695*4882a593Smuzhiyun 			WARN_ON(1);
1696*4882a593Smuzhiyun 			return -EINVAL;
1697*4882a593Smuzhiyun 		}
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 		lk_ahd = (struct htc_lookahead_report *) record_buf;
1700*4882a593Smuzhiyun 		if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1701*4882a593Smuzhiyun 		    next_lk_ahds) {
1702*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
1703*4882a593Smuzhiyun 				   "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1704*4882a593Smuzhiyun 				   lk_ahd->pre_valid, lk_ahd->post_valid);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 			/* look ahead bytes are valid, copy them over */
1707*4882a593Smuzhiyun 			memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 			ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1710*4882a593Smuzhiyun 					"htc rx next look ahead",
1711*4882a593Smuzhiyun 					"", next_lk_ahds, 4);
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 			*n_lk_ahds = 1;
1714*4882a593Smuzhiyun 		}
1715*4882a593Smuzhiyun 		break;
1716*4882a593Smuzhiyun 	case HTC_RECORD_LOOKAHEAD_BUNDLE:
1717*4882a593Smuzhiyun 		len = record->len / sizeof(*bundle_lkahd_rpt);
1718*4882a593Smuzhiyun 		if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1719*4882a593Smuzhiyun 			WARN_ON(1);
1720*4882a593Smuzhiyun 			return -EINVAL;
1721*4882a593Smuzhiyun 		}
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 		if (next_lk_ahds) {
1724*4882a593Smuzhiyun 			int i;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 			bundle_lkahd_rpt =
1727*4882a593Smuzhiyun 				(struct htc_bundle_lkahd_rpt *) record_buf;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 			ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1730*4882a593Smuzhiyun 					"", record_buf, record->len);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 			for (i = 0; i < len; i++) {
1733*4882a593Smuzhiyun 				memcpy((u8 *)&next_lk_ahds[i],
1734*4882a593Smuzhiyun 				       bundle_lkahd_rpt->lk_ahd, 4);
1735*4882a593Smuzhiyun 				bundle_lkahd_rpt++;
1736*4882a593Smuzhiyun 			}
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 			*n_lk_ahds = i;
1739*4882a593Smuzhiyun 		}
1740*4882a593Smuzhiyun 		break;
1741*4882a593Smuzhiyun 	default:
1742*4882a593Smuzhiyun 		ath6kl_err("unhandled record: id:%d len:%d\n",
1743*4882a593Smuzhiyun 			   record->rec_id, record->len);
1744*4882a593Smuzhiyun 		break;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	return 0;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun 
htc_proc_trailer(struct htc_target * target,u8 * buf,int len,u32 * next_lk_ahds,int * n_lk_ahds,enum htc_endpoint_id endpoint)1750*4882a593Smuzhiyun static int htc_proc_trailer(struct htc_target *target,
1751*4882a593Smuzhiyun 			    u8 *buf, int len, u32 *next_lk_ahds,
1752*4882a593Smuzhiyun 			    int *n_lk_ahds, enum htc_endpoint_id endpoint)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun 	struct htc_record_hdr *record;
1755*4882a593Smuzhiyun 	int orig_len;
1756*4882a593Smuzhiyun 	int status;
1757*4882a593Smuzhiyun 	u8 *record_buf;
1758*4882a593Smuzhiyun 	u8 *orig_buf;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1761*4882a593Smuzhiyun 	ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	orig_buf = buf;
1764*4882a593Smuzhiyun 	orig_len = len;
1765*4882a593Smuzhiyun 	status = 0;
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	while (len > 0) {
1768*4882a593Smuzhiyun 		if (len < sizeof(struct htc_record_hdr)) {
1769*4882a593Smuzhiyun 			status = -ENOMEM;
1770*4882a593Smuzhiyun 			break;
1771*4882a593Smuzhiyun 		}
1772*4882a593Smuzhiyun 		/* these are byte aligned structs */
1773*4882a593Smuzhiyun 		record = (struct htc_record_hdr *) buf;
1774*4882a593Smuzhiyun 		len -= sizeof(struct htc_record_hdr);
1775*4882a593Smuzhiyun 		buf += sizeof(struct htc_record_hdr);
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 		if (record->len > len) {
1778*4882a593Smuzhiyun 			ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1779*4882a593Smuzhiyun 				   record->len, record->rec_id, len);
1780*4882a593Smuzhiyun 			status = -ENOMEM;
1781*4882a593Smuzhiyun 			break;
1782*4882a593Smuzhiyun 		}
1783*4882a593Smuzhiyun 		record_buf = buf;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		status = htc_parse_trailer(target, record, record_buf,
1786*4882a593Smuzhiyun 					   next_lk_ahds, endpoint, n_lk_ahds);
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 		if (status)
1789*4882a593Smuzhiyun 			break;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 		/* advance buffer past this record for next time around */
1792*4882a593Smuzhiyun 		buf += record->len;
1793*4882a593Smuzhiyun 		len -= record->len;
1794*4882a593Smuzhiyun 	}
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	if (status)
1797*4882a593Smuzhiyun 		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1798*4882a593Smuzhiyun 				"", orig_buf, orig_len);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 	return status;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun 
ath6kl_htc_rx_process_hdr(struct htc_target * target,struct htc_packet * packet,u32 * next_lkahds,int * n_lkahds)1803*4882a593Smuzhiyun static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1804*4882a593Smuzhiyun 				     struct htc_packet *packet,
1805*4882a593Smuzhiyun 				     u32 *next_lkahds, int *n_lkahds)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun 	int status = 0;
1808*4882a593Smuzhiyun 	u16 payload_len;
1809*4882a593Smuzhiyun 	u32 lk_ahd;
1810*4882a593Smuzhiyun 	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	if (n_lkahds != NULL)
1813*4882a593Smuzhiyun 		*n_lkahds = 0;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	/*
1816*4882a593Smuzhiyun 	 * NOTE: we cannot assume the alignment of buf, so we use the safe
1817*4882a593Smuzhiyun 	 * macros to retrieve 16 bit fields.
1818*4882a593Smuzhiyun 	 */
1819*4882a593Smuzhiyun 	payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1824*4882a593Smuzhiyun 		/*
1825*4882a593Smuzhiyun 		 * Refresh the expected header and the actual length as it
1826*4882a593Smuzhiyun 		 * was unknown when this packet was grabbed as part of the
1827*4882a593Smuzhiyun 		 * bundle.
1828*4882a593Smuzhiyun 		 */
1829*4882a593Smuzhiyun 		packet->info.rx.exp_hdr = lk_ahd;
1830*4882a593Smuzhiyun 		packet->act_len = payload_len + HTC_HDR_LENGTH;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 		/* validate the actual header that was refreshed  */
1833*4882a593Smuzhiyun 		if (packet->act_len > packet->buf_len) {
1834*4882a593Smuzhiyun 			ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1835*4882a593Smuzhiyun 				   payload_len, lk_ahd);
1836*4882a593Smuzhiyun 			/*
1837*4882a593Smuzhiyun 			 * Limit this to max buffer just to print out some
1838*4882a593Smuzhiyun 			 * of the buffer.
1839*4882a593Smuzhiyun 			 */
1840*4882a593Smuzhiyun 			packet->act_len = min(packet->act_len, packet->buf_len);
1841*4882a593Smuzhiyun 			status = -ENOMEM;
1842*4882a593Smuzhiyun 			goto fail_rx;
1843*4882a593Smuzhiyun 		}
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 		if (packet->endpoint != htc_hdr->eid) {
1846*4882a593Smuzhiyun 			ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1847*4882a593Smuzhiyun 				   htc_hdr->eid, packet->endpoint);
1848*4882a593Smuzhiyun 			status = -ENOMEM;
1849*4882a593Smuzhiyun 			goto fail_rx;
1850*4882a593Smuzhiyun 		}
1851*4882a593Smuzhiyun 	}
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	if (lk_ahd != packet->info.rx.exp_hdr) {
1854*4882a593Smuzhiyun 		ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1855*4882a593Smuzhiyun 			   __func__, packet, packet->info.rx.rx_flags);
1856*4882a593Smuzhiyun 		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1857*4882a593Smuzhiyun 				"", &packet->info.rx.exp_hdr, 4);
1858*4882a593Smuzhiyun 		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1859*4882a593Smuzhiyun 				"", (u8 *)&lk_ahd, sizeof(lk_ahd));
1860*4882a593Smuzhiyun 		status = -ENOMEM;
1861*4882a593Smuzhiyun 		goto fail_rx;
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1865*4882a593Smuzhiyun 		if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1866*4882a593Smuzhiyun 		    htc_hdr->ctrl[0] > payload_len) {
1867*4882a593Smuzhiyun 			ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1868*4882a593Smuzhiyun 				   __func__, payload_len, htc_hdr->ctrl[0]);
1869*4882a593Smuzhiyun 			status = -ENOMEM;
1870*4882a593Smuzhiyun 			goto fail_rx;
1871*4882a593Smuzhiyun 		}
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 		if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1874*4882a593Smuzhiyun 			next_lkahds = NULL;
1875*4882a593Smuzhiyun 			n_lkahds = NULL;
1876*4882a593Smuzhiyun 		}
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 		status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1879*4882a593Smuzhiyun 					  + payload_len - htc_hdr->ctrl[0],
1880*4882a593Smuzhiyun 					  htc_hdr->ctrl[0], next_lkahds,
1881*4882a593Smuzhiyun 					   n_lkahds, packet->endpoint);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 		if (status)
1884*4882a593Smuzhiyun 			goto fail_rx;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 		packet->act_len -= htc_hdr->ctrl[0];
1887*4882a593Smuzhiyun 	}
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	packet->buf += HTC_HDR_LENGTH;
1890*4882a593Smuzhiyun 	packet->act_len -= HTC_HDR_LENGTH;
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun fail_rx:
1893*4882a593Smuzhiyun 	if (status)
1894*4882a593Smuzhiyun 		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1895*4882a593Smuzhiyun 				"", packet->buf, packet->act_len);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	return status;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun 
ath6kl_htc_rx_complete(struct htc_endpoint * endpoint,struct htc_packet * packet)1900*4882a593Smuzhiyun static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1901*4882a593Smuzhiyun 				   struct htc_packet *packet)
1902*4882a593Smuzhiyun {
1903*4882a593Smuzhiyun 		ath6kl_dbg(ATH6KL_DBG_HTC,
1904*4882a593Smuzhiyun 			   "htc rx complete ep %d packet 0x%p\n",
1905*4882a593Smuzhiyun 			   endpoint->eid, packet);
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 		endpoint->ep_cb.rx(endpoint->target, packet);
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun 
ath6kl_htc_rx_bundle(struct htc_target * target,struct list_head * rxq,struct list_head * sync_compq,int * n_pkt_fetched,bool part_bundle)1910*4882a593Smuzhiyun static int ath6kl_htc_rx_bundle(struct htc_target *target,
1911*4882a593Smuzhiyun 				struct list_head *rxq,
1912*4882a593Smuzhiyun 				struct list_head *sync_compq,
1913*4882a593Smuzhiyun 				int *n_pkt_fetched, bool part_bundle)
1914*4882a593Smuzhiyun {
1915*4882a593Smuzhiyun 	struct hif_scatter_req *scat_req;
1916*4882a593Smuzhiyun 	struct htc_packet *packet;
1917*4882a593Smuzhiyun 	int rem_space = target->max_rx_bndl_sz;
1918*4882a593Smuzhiyun 	int n_scat_pkt, status = 0, i, len;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	n_scat_pkt = get_queue_depth(rxq);
1921*4882a593Smuzhiyun 	n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1924*4882a593Smuzhiyun 		/*
1925*4882a593Smuzhiyun 		 * We were forced to split this bundle receive operation
1926*4882a593Smuzhiyun 		 * all packets in this partial bundle must have their
1927*4882a593Smuzhiyun 		 * lookaheads ignored.
1928*4882a593Smuzhiyun 		 */
1929*4882a593Smuzhiyun 		part_bundle = true;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 		/*
1932*4882a593Smuzhiyun 		 * This would only happen if the target ignored our max
1933*4882a593Smuzhiyun 		 * bundle limit.
1934*4882a593Smuzhiyun 		 */
1935*4882a593Smuzhiyun 		ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1936*4882a593Smuzhiyun 			    __func__, get_queue_depth(rxq), n_scat_pkt);
1937*4882a593Smuzhiyun 	}
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	len = 0;
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
1942*4882a593Smuzhiyun 		   "htc rx bundle depth %d pkts %d\n",
1943*4882a593Smuzhiyun 		   get_queue_depth(rxq), n_scat_pkt);
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	scat_req = hif_scatter_req_get(target->dev->ar);
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	if (scat_req == NULL)
1948*4882a593Smuzhiyun 		goto fail_rx_pkt;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	for (i = 0; i < n_scat_pkt; i++) {
1951*4882a593Smuzhiyun 		int pad_len;
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 		packet = list_first_entry(rxq, struct htc_packet, list);
1954*4882a593Smuzhiyun 		list_del(&packet->list);
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 		pad_len = CALC_TXRX_PADDED_LEN(target,
1957*4882a593Smuzhiyun 						   packet->act_len);
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 		if ((rem_space - pad_len) < 0) {
1960*4882a593Smuzhiyun 			list_add(&packet->list, rxq);
1961*4882a593Smuzhiyun 			break;
1962*4882a593Smuzhiyun 		}
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 		rem_space -= pad_len;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 		if (part_bundle || (i < (n_scat_pkt - 1)))
1967*4882a593Smuzhiyun 			/*
1968*4882a593Smuzhiyun 			 * Packet 0..n-1 cannot be checked for look-aheads
1969*4882a593Smuzhiyun 			 * since we are fetching a bundle the last packet
1970*4882a593Smuzhiyun 			 * however can have it's lookahead used
1971*4882a593Smuzhiyun 			 */
1972*4882a593Smuzhiyun 			packet->info.rx.rx_flags |=
1973*4882a593Smuzhiyun 			    HTC_RX_PKT_IGNORE_LOOKAHEAD;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 		/* NOTE: 1 HTC packet per scatter entry */
1976*4882a593Smuzhiyun 		scat_req->scat_list[i].buf = packet->buf;
1977*4882a593Smuzhiyun 		scat_req->scat_list[i].len = pad_len;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 		packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 		list_add_tail(&packet->list, sync_compq);
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		WARN_ON(!scat_req->scat_list[i].len);
1984*4882a593Smuzhiyun 		len += scat_req->scat_list[i].len;
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	scat_req->len = len;
1988*4882a593Smuzhiyun 	scat_req->scat_entries = i;
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	if (!status)
1993*4882a593Smuzhiyun 		*n_pkt_fetched = i;
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	/* free scatter request */
1996*4882a593Smuzhiyun 	hif_scatter_req_add(target->dev->ar, scat_req);
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun fail_rx_pkt:
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	return status;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun 
ath6kl_htc_rx_process_packets(struct htc_target * target,struct list_head * comp_pktq,u32 lk_ahds[],int * n_lk_ahd)2003*4882a593Smuzhiyun static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2004*4882a593Smuzhiyun 					 struct list_head *comp_pktq,
2005*4882a593Smuzhiyun 					 u32 lk_ahds[],
2006*4882a593Smuzhiyun 					 int *n_lk_ahd)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_pkt;
2009*4882a593Smuzhiyun 	struct htc_endpoint *ep;
2010*4882a593Smuzhiyun 	int status = 0;
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2013*4882a593Smuzhiyun 		ep = &target->endpoint[packet->endpoint];
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 		trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2016*4882a593Smuzhiyun 				    packet->buf, packet->act_len);
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 		/* process header for each of the recv packet */
2019*4882a593Smuzhiyun 		status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2020*4882a593Smuzhiyun 						   n_lk_ahd);
2021*4882a593Smuzhiyun 		if (status)
2022*4882a593Smuzhiyun 			return status;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 		list_del(&packet->list);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 		if (list_empty(comp_pktq)) {
2027*4882a593Smuzhiyun 			/*
2028*4882a593Smuzhiyun 			 * Last packet's more packet flag is set
2029*4882a593Smuzhiyun 			 * based on the lookahead.
2030*4882a593Smuzhiyun 			 */
2031*4882a593Smuzhiyun 			if (*n_lk_ahd > 0)
2032*4882a593Smuzhiyun 				ath6kl_htc_rx_set_indicate(lk_ahds[0],
2033*4882a593Smuzhiyun 							   ep, packet);
2034*4882a593Smuzhiyun 		} else
2035*4882a593Smuzhiyun 			/*
2036*4882a593Smuzhiyun 			 * Packets in a bundle automatically have
2037*4882a593Smuzhiyun 			 * this flag set.
2038*4882a593Smuzhiyun 			 */
2039*4882a593Smuzhiyun 			packet->info.rx.indicat_flags |=
2040*4882a593Smuzhiyun 				HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 		ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 		if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2045*4882a593Smuzhiyun 			ep->ep_st.rx_bundl += 1;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 		ath6kl_htc_rx_complete(ep, packet);
2048*4882a593Smuzhiyun 	}
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	return status;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun 
ath6kl_htc_rx_fetch(struct htc_target * target,struct list_head * rx_pktq,struct list_head * comp_pktq)2053*4882a593Smuzhiyun static int ath6kl_htc_rx_fetch(struct htc_target *target,
2054*4882a593Smuzhiyun 			       struct list_head *rx_pktq,
2055*4882a593Smuzhiyun 			       struct list_head *comp_pktq)
2056*4882a593Smuzhiyun {
2057*4882a593Smuzhiyun 	int fetched_pkts;
2058*4882a593Smuzhiyun 	bool part_bundle = false;
2059*4882a593Smuzhiyun 	int status = 0;
2060*4882a593Smuzhiyun 	struct list_head tmp_rxq;
2061*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_pkt;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	/* now go fetch the list of HTC packets */
2064*4882a593Smuzhiyun 	while (!list_empty(rx_pktq)) {
2065*4882a593Smuzhiyun 		fetched_pkts = 0;
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 		INIT_LIST_HEAD(&tmp_rxq);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 		if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2070*4882a593Smuzhiyun 			/*
2071*4882a593Smuzhiyun 			 * There are enough packets to attempt a
2072*4882a593Smuzhiyun 			 * bundle transfer and recv bundling is
2073*4882a593Smuzhiyun 			 * allowed.
2074*4882a593Smuzhiyun 			 */
2075*4882a593Smuzhiyun 			status = ath6kl_htc_rx_bundle(target, rx_pktq,
2076*4882a593Smuzhiyun 						      &tmp_rxq,
2077*4882a593Smuzhiyun 						      &fetched_pkts,
2078*4882a593Smuzhiyun 						      part_bundle);
2079*4882a593Smuzhiyun 			if (status)
2080*4882a593Smuzhiyun 				goto fail_rx;
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 			if (!list_empty(rx_pktq))
2083*4882a593Smuzhiyun 				part_bundle = true;
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 			list_splice_tail_init(&tmp_rxq, comp_pktq);
2086*4882a593Smuzhiyun 		}
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 		if (!fetched_pkts) {
2089*4882a593Smuzhiyun 			packet = list_first_entry(rx_pktq, struct htc_packet,
2090*4882a593Smuzhiyun 						   list);
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 			/* fully synchronous */
2093*4882a593Smuzhiyun 			packet->completion = NULL;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 			if (!list_is_singular(rx_pktq))
2096*4882a593Smuzhiyun 				/*
2097*4882a593Smuzhiyun 				 * look_aheads in all packet
2098*4882a593Smuzhiyun 				 * except the last one in the
2099*4882a593Smuzhiyun 				 * bundle must be ignored
2100*4882a593Smuzhiyun 				 */
2101*4882a593Smuzhiyun 				packet->info.rx.rx_flags |=
2102*4882a593Smuzhiyun 					HTC_RX_PKT_IGNORE_LOOKAHEAD;
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 			/* go fetch the packet */
2105*4882a593Smuzhiyun 			status = ath6kl_htc_rx_packet(target, packet,
2106*4882a593Smuzhiyun 						      packet->act_len);
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 			list_move_tail(&packet->list, &tmp_rxq);
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 			if (status)
2111*4882a593Smuzhiyun 				goto fail_rx;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 			list_splice_tail_init(&tmp_rxq, comp_pktq);
2114*4882a593Smuzhiyun 		}
2115*4882a593Smuzhiyun 	}
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	return 0;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun fail_rx:
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	/*
2122*4882a593Smuzhiyun 	 * Cleanup any packets we allocated but didn't use to
2123*4882a593Smuzhiyun 	 * actually fetch any packets.
2124*4882a593Smuzhiyun 	 */
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2127*4882a593Smuzhiyun 		list_del(&packet->list);
2128*4882a593Smuzhiyun 		htc_reclaim_rxbuf(target, packet,
2129*4882a593Smuzhiyun 				  &target->endpoint[packet->endpoint]);
2130*4882a593Smuzhiyun 	}
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2133*4882a593Smuzhiyun 		list_del(&packet->list);
2134*4882a593Smuzhiyun 		htc_reclaim_rxbuf(target, packet,
2135*4882a593Smuzhiyun 				  &target->endpoint[packet->endpoint]);
2136*4882a593Smuzhiyun 	}
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	return status;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun 
ath6kl_htc_rxmsg_pending_handler(struct htc_target * target,u32 msg_look_ahead,int * num_pkts)2141*4882a593Smuzhiyun int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2142*4882a593Smuzhiyun 				     u32 msg_look_ahead, int *num_pkts)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun 	struct htc_packet *packets, *tmp_pkt;
2145*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
2146*4882a593Smuzhiyun 	struct list_head rx_pktq, comp_pktq;
2147*4882a593Smuzhiyun 	int status = 0;
2148*4882a593Smuzhiyun 	u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2149*4882a593Smuzhiyun 	int num_look_ahead = 1;
2150*4882a593Smuzhiyun 	enum htc_endpoint_id id;
2151*4882a593Smuzhiyun 	int n_fetched = 0;
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	INIT_LIST_HEAD(&comp_pktq);
2154*4882a593Smuzhiyun 	*num_pkts = 0;
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	/*
2157*4882a593Smuzhiyun 	 * On first entry copy the look_aheads into our temp array for
2158*4882a593Smuzhiyun 	 * processing
2159*4882a593Smuzhiyun 	 */
2160*4882a593Smuzhiyun 	look_aheads[0] = msg_look_ahead;
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	while (true) {
2163*4882a593Smuzhiyun 		/*
2164*4882a593Smuzhiyun 		 * First lookahead sets the expected endpoint IDs for all
2165*4882a593Smuzhiyun 		 * packets in a bundle.
2166*4882a593Smuzhiyun 		 */
2167*4882a593Smuzhiyun 		id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2168*4882a593Smuzhiyun 		endpoint = &target->endpoint[id];
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 		if (id >= ENDPOINT_MAX) {
2171*4882a593Smuzhiyun 			ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2172*4882a593Smuzhiyun 				   id);
2173*4882a593Smuzhiyun 			status = -ENOMEM;
2174*4882a593Smuzhiyun 			break;
2175*4882a593Smuzhiyun 		}
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 		INIT_LIST_HEAD(&rx_pktq);
2178*4882a593Smuzhiyun 		INIT_LIST_HEAD(&comp_pktq);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 		/*
2181*4882a593Smuzhiyun 		 * Try to allocate as many HTC RX packets indicated by the
2182*4882a593Smuzhiyun 		 * look_aheads.
2183*4882a593Smuzhiyun 		 */
2184*4882a593Smuzhiyun 		status = ath6kl_htc_rx_alloc(target, look_aheads,
2185*4882a593Smuzhiyun 					     num_look_ahead, endpoint,
2186*4882a593Smuzhiyun 					     &rx_pktq);
2187*4882a593Smuzhiyun 		if (status)
2188*4882a593Smuzhiyun 			break;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 		if (get_queue_depth(&rx_pktq) >= 2)
2191*4882a593Smuzhiyun 			/*
2192*4882a593Smuzhiyun 			 * A recv bundle was detected, force IRQ status
2193*4882a593Smuzhiyun 			 * re-check again
2194*4882a593Smuzhiyun 			 */
2195*4882a593Smuzhiyun 			target->chk_irq_status_cnt = 1;
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 		n_fetched += get_queue_depth(&rx_pktq);
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 		num_look_ahead = 0;
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 		status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 		if (!status)
2204*4882a593Smuzhiyun 			ath6kl_htc_rx_chk_water_mark(endpoint);
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 		/* Process fetched packets */
2207*4882a593Smuzhiyun 		status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2208*4882a593Smuzhiyun 						       look_aheads,
2209*4882a593Smuzhiyun 						       &num_look_ahead);
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 		if (!num_look_ahead || status)
2212*4882a593Smuzhiyun 			break;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 		/*
2215*4882a593Smuzhiyun 		 * For SYNCH processing, if we get here, we are running
2216*4882a593Smuzhiyun 		 * through the loop again due to a detected lookahead. Set
2217*4882a593Smuzhiyun 		 * flag that we should re-check IRQ status registers again
2218*4882a593Smuzhiyun 		 * before leaving IRQ processing, this can net better
2219*4882a593Smuzhiyun 		 * performance in high throughput situations.
2220*4882a593Smuzhiyun 		 */
2221*4882a593Smuzhiyun 		target->chk_irq_status_cnt = 1;
2222*4882a593Smuzhiyun 	}
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	if (status) {
2225*4882a593Smuzhiyun 		if (status != -ECANCELED)
2226*4882a593Smuzhiyun 			ath6kl_err("failed to get pending recv messages: %d\n",
2227*4882a593Smuzhiyun 				   status);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 		/* cleanup any packets in sync completion queue */
2230*4882a593Smuzhiyun 		list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2231*4882a593Smuzhiyun 			list_del(&packets->list);
2232*4882a593Smuzhiyun 			htc_reclaim_rxbuf(target, packets,
2233*4882a593Smuzhiyun 					  &target->endpoint[packets->endpoint]);
2234*4882a593Smuzhiyun 		}
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 		if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2237*4882a593Smuzhiyun 			ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2238*4882a593Smuzhiyun 			ath6kl_hif_rx_control(target->dev, false);
2239*4882a593Smuzhiyun 		}
2240*4882a593Smuzhiyun 	}
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	/*
2243*4882a593Smuzhiyun 	 * Before leaving, check to see if host ran out of buffers and
2244*4882a593Smuzhiyun 	 * needs to stop the receiver.
2245*4882a593Smuzhiyun 	 */
2246*4882a593Smuzhiyun 	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2247*4882a593Smuzhiyun 		ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2248*4882a593Smuzhiyun 		ath6kl_hif_rx_control(target->dev, false);
2249*4882a593Smuzhiyun 	}
2250*4882a593Smuzhiyun 	*num_pkts = n_fetched;
2251*4882a593Smuzhiyun 
2252*4882a593Smuzhiyun 	return status;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun /*
2256*4882a593Smuzhiyun  * Synchronously wait for a control message from the target,
2257*4882a593Smuzhiyun  * This function is used at initialization time ONLY.  At init messages
2258*4882a593Smuzhiyun  * on ENDPOINT 0 are expected.
2259*4882a593Smuzhiyun  */
htc_wait_for_ctrl_msg(struct htc_target * target)2260*4882a593Smuzhiyun static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2261*4882a593Smuzhiyun {
2262*4882a593Smuzhiyun 	struct htc_packet *packet = NULL;
2263*4882a593Smuzhiyun 	struct htc_frame_hdr *htc_hdr;
2264*4882a593Smuzhiyun 	u32 look_ahead;
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2267*4882a593Smuzhiyun 				       HTC_TARGET_RESPONSE_TIMEOUT))
2268*4882a593Smuzhiyun 		return NULL;
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
2271*4882a593Smuzhiyun 		   "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	if (htc_hdr->eid != ENDPOINT_0)
2276*4882a593Smuzhiyun 		return NULL;
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	packet = htc_get_control_buf(target, false);
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	if (!packet)
2281*4882a593Smuzhiyun 		return NULL;
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	packet->info.rx.rx_flags = 0;
2284*4882a593Smuzhiyun 	packet->info.rx.exp_hdr = look_ahead;
2285*4882a593Smuzhiyun 	packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (packet->act_len > packet->buf_len)
2288*4882a593Smuzhiyun 		goto fail_ctrl_rx;
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	/* we want synchronous operation */
2291*4882a593Smuzhiyun 	packet->completion = NULL;
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	/* get the message from the device, this will block */
2294*4882a593Smuzhiyun 	if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2295*4882a593Smuzhiyun 		goto fail_ctrl_rx;
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2298*4882a593Smuzhiyun 			    packet->buf, packet->act_len);
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	/* process receive header */
2301*4882a593Smuzhiyun 	packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	if (packet->status) {
2304*4882a593Smuzhiyun 		ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2305*4882a593Smuzhiyun 			   packet->status);
2306*4882a593Smuzhiyun 		goto fail_ctrl_rx;
2307*4882a593Smuzhiyun 	}
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	return packet;
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun fail_ctrl_rx:
2312*4882a593Smuzhiyun 	if (packet != NULL) {
2313*4882a593Smuzhiyun 		htc_rxpkt_reset(packet);
2314*4882a593Smuzhiyun 		reclaim_rx_ctrl_buf(target, packet);
2315*4882a593Smuzhiyun 	}
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun 	return NULL;
2318*4882a593Smuzhiyun }
2319*4882a593Smuzhiyun 
ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target * target,struct list_head * pkt_queue)2320*4882a593Smuzhiyun static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2321*4882a593Smuzhiyun 				  struct list_head *pkt_queue)
2322*4882a593Smuzhiyun {
2323*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
2324*4882a593Smuzhiyun 	struct htc_packet *first_pkt;
2325*4882a593Smuzhiyun 	bool rx_unblock = false;
2326*4882a593Smuzhiyun 	int status = 0, depth;
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	if (list_empty(pkt_queue))
2329*4882a593Smuzhiyun 		return -ENOMEM;
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	if (first_pkt->endpoint >= ENDPOINT_MAX)
2334*4882a593Smuzhiyun 		return status;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	depth = get_queue_depth(pkt_queue);
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
2339*4882a593Smuzhiyun 		   "htc rx add multiple ep id %d cnt %d len %d\n",
2340*4882a593Smuzhiyun 		first_pkt->endpoint, depth, first_pkt->buf_len);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	endpoint = &target->endpoint[first_pkt->endpoint];
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2345*4882a593Smuzhiyun 		struct htc_packet *packet, *tmp_pkt;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 		/* walk through queue and mark each one canceled */
2348*4882a593Smuzhiyun 		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2349*4882a593Smuzhiyun 			packet->status = -ECANCELED;
2350*4882a593Smuzhiyun 			list_del(&packet->list);
2351*4882a593Smuzhiyun 			ath6kl_htc_rx_complete(endpoint, packet);
2352*4882a593Smuzhiyun 		}
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 		return status;
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	spin_lock_bh(&target->rx_lock);
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	/* check if we are blocked waiting for a new buffer */
2362*4882a593Smuzhiyun 	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2363*4882a593Smuzhiyun 		if (target->ep_waiting == first_pkt->endpoint) {
2364*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
2365*4882a593Smuzhiyun 				   "htc rx blocked on ep %d, unblocking\n",
2366*4882a593Smuzhiyun 				   target->ep_waiting);
2367*4882a593Smuzhiyun 			target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2368*4882a593Smuzhiyun 			target->ep_waiting = ENDPOINT_MAX;
2369*4882a593Smuzhiyun 			rx_unblock = true;
2370*4882a593Smuzhiyun 		}
2371*4882a593Smuzhiyun 	}
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 	spin_unlock_bh(&target->rx_lock);
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 	if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2376*4882a593Smuzhiyun 		/* TODO : implement a buffer threshold count? */
2377*4882a593Smuzhiyun 		ath6kl_hif_rx_control(target->dev, true);
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	return status;
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun 
ath6kl_htc_mbox_flush_rx_buf(struct htc_target * target)2382*4882a593Smuzhiyun static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2383*4882a593Smuzhiyun {
2384*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
2385*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_pkt;
2386*4882a593Smuzhiyun 	int i;
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2389*4882a593Smuzhiyun 		endpoint = &target->endpoint[i];
2390*4882a593Smuzhiyun 		if (!endpoint->svc_id)
2391*4882a593Smuzhiyun 			/* not in use.. */
2392*4882a593Smuzhiyun 			continue;
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 		spin_lock_bh(&target->rx_lock);
2395*4882a593Smuzhiyun 		list_for_each_entry_safe(packet, tmp_pkt,
2396*4882a593Smuzhiyun 					 &endpoint->rx_bufq, list) {
2397*4882a593Smuzhiyun 			list_del(&packet->list);
2398*4882a593Smuzhiyun 			spin_unlock_bh(&target->rx_lock);
2399*4882a593Smuzhiyun 			ath6kl_dbg(ATH6KL_DBG_HTC,
2400*4882a593Smuzhiyun 				   "htc rx flush pkt 0x%p  len %d  ep %d\n",
2401*4882a593Smuzhiyun 				   packet, packet->buf_len,
2402*4882a593Smuzhiyun 				   packet->endpoint);
2403*4882a593Smuzhiyun 			/*
2404*4882a593Smuzhiyun 			 * packets in rx_bufq of endpoint 0 have originally
2405*4882a593Smuzhiyun 			 * been queued from target->free_ctrl_rxbuf where
2406*4882a593Smuzhiyun 			 * packet and packet->buf_start are allocated
2407*4882a593Smuzhiyun 			 * separately using kmalloc(). For other endpoint
2408*4882a593Smuzhiyun 			 * rx_bufq, it is allocated as skb where packet is
2409*4882a593Smuzhiyun 			 * skb->head. Take care of this difference while freeing
2410*4882a593Smuzhiyun 			 * the memory.
2411*4882a593Smuzhiyun 			 */
2412*4882a593Smuzhiyun 			if (packet->endpoint == ENDPOINT_0) {
2413*4882a593Smuzhiyun 				kfree(packet->buf_start);
2414*4882a593Smuzhiyun 				kfree(packet);
2415*4882a593Smuzhiyun 			} else {
2416*4882a593Smuzhiyun 				dev_kfree_skb(packet->pkt_cntxt);
2417*4882a593Smuzhiyun 			}
2418*4882a593Smuzhiyun 			spin_lock_bh(&target->rx_lock);
2419*4882a593Smuzhiyun 		}
2420*4882a593Smuzhiyun 		spin_unlock_bh(&target->rx_lock);
2421*4882a593Smuzhiyun 	}
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun 
ath6kl_htc_mbox_conn_service(struct htc_target * target,struct htc_service_connect_req * conn_req,struct htc_service_connect_resp * conn_resp)2424*4882a593Smuzhiyun static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2425*4882a593Smuzhiyun 			    struct htc_service_connect_req *conn_req,
2426*4882a593Smuzhiyun 			    struct htc_service_connect_resp *conn_resp)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	struct htc_packet *rx_pkt = NULL;
2429*4882a593Smuzhiyun 	struct htc_packet *tx_pkt = NULL;
2430*4882a593Smuzhiyun 	struct htc_conn_service_resp *resp_msg;
2431*4882a593Smuzhiyun 	struct htc_conn_service_msg *conn_msg;
2432*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
2433*4882a593Smuzhiyun 	enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2434*4882a593Smuzhiyun 	unsigned int max_msg_sz = 0;
2435*4882a593Smuzhiyun 	int status = 0;
2436*4882a593Smuzhiyun 	u16 msg_id;
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_HTC,
2439*4882a593Smuzhiyun 		   "htc connect service target 0x%p service id 0x%x\n",
2440*4882a593Smuzhiyun 		   target, conn_req->svc_id);
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2443*4882a593Smuzhiyun 		/* special case for pseudo control service */
2444*4882a593Smuzhiyun 		assigned_ep = ENDPOINT_0;
2445*4882a593Smuzhiyun 		max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2446*4882a593Smuzhiyun 	} else {
2447*4882a593Smuzhiyun 		/* allocate a packet to send to the target */
2448*4882a593Smuzhiyun 		tx_pkt = htc_get_control_buf(target, true);
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 		if (!tx_pkt)
2451*4882a593Smuzhiyun 			return -ENOMEM;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 		conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2454*4882a593Smuzhiyun 		memset(conn_msg, 0, sizeof(*conn_msg));
2455*4882a593Smuzhiyun 		conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2456*4882a593Smuzhiyun 		conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2457*4882a593Smuzhiyun 		conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 		set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2460*4882a593Smuzhiyun 				 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2461*4882a593Smuzhiyun 				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun 		/* we want synchronous operation */
2464*4882a593Smuzhiyun 		tx_pkt->completion = NULL;
2465*4882a593Smuzhiyun 		ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2466*4882a593Smuzhiyun 		status = ath6kl_htc_tx_issue(target, tx_pkt);
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun 		if (status)
2469*4882a593Smuzhiyun 			goto fail_tx;
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 		/* wait for response */
2472*4882a593Smuzhiyun 		rx_pkt = htc_wait_for_ctrl_msg(target);
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 		if (!rx_pkt) {
2475*4882a593Smuzhiyun 			status = -ENOMEM;
2476*4882a593Smuzhiyun 			goto fail_tx;
2477*4882a593Smuzhiyun 		}
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 		resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2480*4882a593Smuzhiyun 		msg_id = le16_to_cpu(resp_msg->msg_id);
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 		if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
2483*4882a593Smuzhiyun 		    (rx_pkt->act_len < sizeof(*resp_msg))) {
2484*4882a593Smuzhiyun 			status = -ENOMEM;
2485*4882a593Smuzhiyun 			goto fail_tx;
2486*4882a593Smuzhiyun 		}
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 		conn_resp->resp_code = resp_msg->status;
2489*4882a593Smuzhiyun 		/* check response status */
2490*4882a593Smuzhiyun 		if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2491*4882a593Smuzhiyun 			ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2492*4882a593Smuzhiyun 				   resp_msg->svc_id, resp_msg->status);
2493*4882a593Smuzhiyun 			status = -ENOMEM;
2494*4882a593Smuzhiyun 			goto fail_tx;
2495*4882a593Smuzhiyun 		}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 		assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2498*4882a593Smuzhiyun 		max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2499*4882a593Smuzhiyun 	}
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
2502*4882a593Smuzhiyun 			 assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
2503*4882a593Smuzhiyun 		status = -ENOMEM;
2504*4882a593Smuzhiyun 		goto fail_tx;
2505*4882a593Smuzhiyun 	}
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun 	endpoint = &target->endpoint[assigned_ep];
2508*4882a593Smuzhiyun 	endpoint->eid = assigned_ep;
2509*4882a593Smuzhiyun 	if (endpoint->svc_id) {
2510*4882a593Smuzhiyun 		status = -ENOMEM;
2511*4882a593Smuzhiyun 		goto fail_tx;
2512*4882a593Smuzhiyun 	}
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun 	/* return assigned endpoint to caller */
2515*4882a593Smuzhiyun 	conn_resp->endpoint = assigned_ep;
2516*4882a593Smuzhiyun 	conn_resp->len_max = max_msg_sz;
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun 	/* setup the endpoint */
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	/* this marks the endpoint in use */
2521*4882a593Smuzhiyun 	endpoint->svc_id = conn_req->svc_id;
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	endpoint->max_txq_depth = conn_req->max_txq_depth;
2524*4882a593Smuzhiyun 	endpoint->len_max = max_msg_sz;
2525*4882a593Smuzhiyun 	endpoint->ep_cb = conn_req->ep_cb;
2526*4882a593Smuzhiyun 	endpoint->cred_dist.svc_id = conn_req->svc_id;
2527*4882a593Smuzhiyun 	endpoint->cred_dist.htc_ep = endpoint;
2528*4882a593Smuzhiyun 	endpoint->cred_dist.endpoint = assigned_ep;
2529*4882a593Smuzhiyun 	endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	switch (endpoint->svc_id) {
2532*4882a593Smuzhiyun 	case WMI_DATA_BK_SVC:
2533*4882a593Smuzhiyun 		endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2534*4882a593Smuzhiyun 		break;
2535*4882a593Smuzhiyun 	default:
2536*4882a593Smuzhiyun 		endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2537*4882a593Smuzhiyun 		break;
2538*4882a593Smuzhiyun 	}
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	if (conn_req->max_rxmsg_sz) {
2541*4882a593Smuzhiyun 		/*
2542*4882a593Smuzhiyun 		 * Override cred_per_msg calculation, this optimizes
2543*4882a593Smuzhiyun 		 * the credit-low indications since the host will actually
2544*4882a593Smuzhiyun 		 * issue smaller messages in the Send path.
2545*4882a593Smuzhiyun 		 */
2546*4882a593Smuzhiyun 		if (conn_req->max_rxmsg_sz > max_msg_sz) {
2547*4882a593Smuzhiyun 			status = -ENOMEM;
2548*4882a593Smuzhiyun 			goto fail_tx;
2549*4882a593Smuzhiyun 		}
2550*4882a593Smuzhiyun 		endpoint->cred_dist.cred_per_msg =
2551*4882a593Smuzhiyun 		    conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2552*4882a593Smuzhiyun 	} else
2553*4882a593Smuzhiyun 		endpoint->cred_dist.cred_per_msg =
2554*4882a593Smuzhiyun 		    max_msg_sz / target->tgt_cred_sz;
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	if (!endpoint->cred_dist.cred_per_msg)
2557*4882a593Smuzhiyun 		endpoint->cred_dist.cred_per_msg = 1;
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 	/* save local connection flags */
2560*4882a593Smuzhiyun 	endpoint->conn_flags = conn_req->flags;
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun fail_tx:
2563*4882a593Smuzhiyun 	if (tx_pkt)
2564*4882a593Smuzhiyun 		htc_reclaim_txctrl_buf(target, tx_pkt);
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	if (rx_pkt) {
2567*4882a593Smuzhiyun 		htc_rxpkt_reset(rx_pkt);
2568*4882a593Smuzhiyun 		reclaim_rx_ctrl_buf(target, rx_pkt);
2569*4882a593Smuzhiyun 	}
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	return status;
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun 
reset_ep_state(struct htc_target * target)2574*4882a593Smuzhiyun static void reset_ep_state(struct htc_target *target)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun 	struct htc_endpoint *endpoint;
2577*4882a593Smuzhiyun 	int i;
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2580*4882a593Smuzhiyun 		endpoint = &target->endpoint[i];
2581*4882a593Smuzhiyun 		memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2582*4882a593Smuzhiyun 		endpoint->svc_id = 0;
2583*4882a593Smuzhiyun 		endpoint->len_max = 0;
2584*4882a593Smuzhiyun 		endpoint->max_txq_depth = 0;
2585*4882a593Smuzhiyun 		memset(&endpoint->ep_st, 0,
2586*4882a593Smuzhiyun 		       sizeof(endpoint->ep_st));
2587*4882a593Smuzhiyun 		INIT_LIST_HEAD(&endpoint->rx_bufq);
2588*4882a593Smuzhiyun 		INIT_LIST_HEAD(&endpoint->txq);
2589*4882a593Smuzhiyun 		endpoint->target = target;
2590*4882a593Smuzhiyun 	}
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	/* reset distribution list */
2593*4882a593Smuzhiyun 	/* FIXME: free existing entries */
2594*4882a593Smuzhiyun 	INIT_LIST_HEAD(&target->cred_dist_list);
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun 
ath6kl_htc_mbox_get_rxbuf_num(struct htc_target * target,enum htc_endpoint_id endpoint)2597*4882a593Smuzhiyun static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2598*4882a593Smuzhiyun 			     enum htc_endpoint_id endpoint)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun 	int num;
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	spin_lock_bh(&target->rx_lock);
2603*4882a593Smuzhiyun 	num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2604*4882a593Smuzhiyun 	spin_unlock_bh(&target->rx_lock);
2605*4882a593Smuzhiyun 	return num;
2606*4882a593Smuzhiyun }
2607*4882a593Smuzhiyun 
htc_setup_msg_bndl(struct htc_target * target)2608*4882a593Smuzhiyun static void htc_setup_msg_bndl(struct htc_target *target)
2609*4882a593Smuzhiyun {
2610*4882a593Smuzhiyun 	/* limit what HTC can handle */
2611*4882a593Smuzhiyun 	target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2612*4882a593Smuzhiyun 				       target->msg_per_bndl_max);
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2615*4882a593Smuzhiyun 		target->msg_per_bndl_max = 0;
2616*4882a593Smuzhiyun 		return;
2617*4882a593Smuzhiyun 	}
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	/* limit bundle what the device layer can handle */
2620*4882a593Smuzhiyun 	target->msg_per_bndl_max = min(target->max_scat_entries,
2621*4882a593Smuzhiyun 				       target->msg_per_bndl_max);
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_BOOT,
2624*4882a593Smuzhiyun 		   "htc bundling allowed msg_per_bndl_max %d\n",
2625*4882a593Smuzhiyun 		   target->msg_per_bndl_max);
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 	/* Max rx bundle size is limited by the max tx bundle size */
2628*4882a593Smuzhiyun 	target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2629*4882a593Smuzhiyun 	/* Max tx bundle size if limited by the extended mbox address range */
2630*4882a593Smuzhiyun 	target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2631*4882a593Smuzhiyun 				     target->max_xfer_szper_scatreq);
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2634*4882a593Smuzhiyun 		   target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	if (target->max_tx_bndl_sz)
2637*4882a593Smuzhiyun 		/* tx_bndl_mask is enabled per AC, each has 1 bit */
2638*4882a593Smuzhiyun 		target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	if (target->max_rx_bndl_sz)
2641*4882a593Smuzhiyun 		target->rx_bndl_enable = true;
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	if ((target->tgt_cred_sz % target->block_sz) != 0) {
2644*4882a593Smuzhiyun 		ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2645*4882a593Smuzhiyun 			    target->tgt_cred_sz);
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 		/*
2648*4882a593Smuzhiyun 		 * Disallow send bundling since the credit size is
2649*4882a593Smuzhiyun 		 * not aligned to a block size the I/O block
2650*4882a593Smuzhiyun 		 * padding will spill into the next credit buffer
2651*4882a593Smuzhiyun 		 * which is fatal.
2652*4882a593Smuzhiyun 		 */
2653*4882a593Smuzhiyun 		target->tx_bndl_mask = 0;
2654*4882a593Smuzhiyun 	}
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun 
ath6kl_htc_mbox_wait_target(struct htc_target * target)2657*4882a593Smuzhiyun static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2658*4882a593Smuzhiyun {
2659*4882a593Smuzhiyun 	struct htc_packet *packet = NULL;
2660*4882a593Smuzhiyun 	struct htc_ready_ext_msg *rdy_msg;
2661*4882a593Smuzhiyun 	struct htc_service_connect_req connect;
2662*4882a593Smuzhiyun 	struct htc_service_connect_resp resp;
2663*4882a593Smuzhiyun 	int status;
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	/* we should be getting 1 control message that the target is ready */
2666*4882a593Smuzhiyun 	packet = htc_wait_for_ctrl_msg(target);
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	if (!packet)
2669*4882a593Smuzhiyun 		return -ENOMEM;
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	/* we controlled the buffer creation so it's properly aligned */
2672*4882a593Smuzhiyun 	rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2675*4882a593Smuzhiyun 	    (packet->act_len < sizeof(struct htc_ready_msg))) {
2676*4882a593Smuzhiyun 		status = -ENOMEM;
2677*4882a593Smuzhiyun 		goto fail_wait_target;
2678*4882a593Smuzhiyun 	}
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2681*4882a593Smuzhiyun 		status = -ENOMEM;
2682*4882a593Smuzhiyun 		goto fail_wait_target;
2683*4882a593Smuzhiyun 	}
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2686*4882a593Smuzhiyun 	target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_BOOT,
2689*4882a593Smuzhiyun 		   "htc target ready credits %d size %d\n",
2690*4882a593Smuzhiyun 		   target->tgt_creds, target->tgt_cred_sz);
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 	/* check if this is an extended ready message */
2693*4882a593Smuzhiyun 	if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2694*4882a593Smuzhiyun 		/* this is an extended message */
2695*4882a593Smuzhiyun 		target->htc_tgt_ver = rdy_msg->htc_ver;
2696*4882a593Smuzhiyun 		target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2697*4882a593Smuzhiyun 	} else {
2698*4882a593Smuzhiyun 		/* legacy */
2699*4882a593Smuzhiyun 		target->htc_tgt_ver = HTC_VERSION_2P0;
2700*4882a593Smuzhiyun 		target->msg_per_bndl_max = 0;
2701*4882a593Smuzhiyun 	}
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2704*4882a593Smuzhiyun 		   (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2705*4882a593Smuzhiyun 		   target->htc_tgt_ver);
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	if (target->msg_per_bndl_max > 0)
2708*4882a593Smuzhiyun 		htc_setup_msg_bndl(target);
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	/* setup our pseudo HTC control endpoint connection */
2711*4882a593Smuzhiyun 	memset(&connect, 0, sizeof(connect));
2712*4882a593Smuzhiyun 	memset(&resp, 0, sizeof(resp));
2713*4882a593Smuzhiyun 	connect.ep_cb.rx = htc_ctrl_rx;
2714*4882a593Smuzhiyun 	connect.ep_cb.rx_refill = NULL;
2715*4882a593Smuzhiyun 	connect.ep_cb.tx_full = NULL;
2716*4882a593Smuzhiyun 	connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2717*4882a593Smuzhiyun 	connect.svc_id = HTC_CTRL_RSVD_SVC;
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	/* connect fake service */
2720*4882a593Smuzhiyun 	status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	if (status)
2723*4882a593Smuzhiyun 		/*
2724*4882a593Smuzhiyun 		 * FIXME: this call doesn't make sense, the caller should
2725*4882a593Smuzhiyun 		 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2726*4882a593Smuzhiyun 		 */
2727*4882a593Smuzhiyun 		ath6kl_hif_cleanup_scatter(target->dev->ar);
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun fail_wait_target:
2730*4882a593Smuzhiyun 	if (packet) {
2731*4882a593Smuzhiyun 		htc_rxpkt_reset(packet);
2732*4882a593Smuzhiyun 		reclaim_rx_ctrl_buf(target, packet);
2733*4882a593Smuzhiyun 	}
2734*4882a593Smuzhiyun 
2735*4882a593Smuzhiyun 	return status;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun /*
2739*4882a593Smuzhiyun  * Start HTC, enable interrupts and let the target know
2740*4882a593Smuzhiyun  * host has finished setup.
2741*4882a593Smuzhiyun  */
ath6kl_htc_mbox_start(struct htc_target * target)2742*4882a593Smuzhiyun static int ath6kl_htc_mbox_start(struct htc_target *target)
2743*4882a593Smuzhiyun {
2744*4882a593Smuzhiyun 	struct htc_packet *packet;
2745*4882a593Smuzhiyun 	int status;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	memset(&target->dev->irq_proc_reg, 0,
2748*4882a593Smuzhiyun 	       sizeof(target->dev->irq_proc_reg));
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	/* Disable interrupts at the chip level */
2751*4882a593Smuzhiyun 	ath6kl_hif_disable_intrs(target->dev);
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 	target->htc_flags = 0;
2754*4882a593Smuzhiyun 	target->rx_st_flags = 0;
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 	/* Push control receive buffers into htc control endpoint */
2757*4882a593Smuzhiyun 	while ((packet = htc_get_control_buf(target, false)) != NULL) {
2758*4882a593Smuzhiyun 		status = htc_add_rxbuf(target, packet);
2759*4882a593Smuzhiyun 		if (status)
2760*4882a593Smuzhiyun 			return status;
2761*4882a593Smuzhiyun 	}
2762*4882a593Smuzhiyun 
2763*4882a593Smuzhiyun 	/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2764*4882a593Smuzhiyun 	ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2765*4882a593Smuzhiyun 			   target->tgt_creds);
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	dump_cred_dist_stats(target);
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	/* Indicate to the target of the setup completion */
2770*4882a593Smuzhiyun 	status = htc_setup_tx_complete(target);
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	if (status)
2773*4882a593Smuzhiyun 		return status;
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 	/* unmask interrupts */
2776*4882a593Smuzhiyun 	status = ath6kl_hif_unmask_intrs(target->dev);
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	if (status)
2779*4882a593Smuzhiyun 		ath6kl_htc_mbox_stop(target);
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun 	return status;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun 
ath6kl_htc_reset(struct htc_target * target)2784*4882a593Smuzhiyun static int ath6kl_htc_reset(struct htc_target *target)
2785*4882a593Smuzhiyun {
2786*4882a593Smuzhiyun 	u32 block_size, ctrl_bufsz;
2787*4882a593Smuzhiyun 	struct htc_packet *packet;
2788*4882a593Smuzhiyun 	int i;
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 	reset_ep_state(target);
2791*4882a593Smuzhiyun 
2792*4882a593Smuzhiyun 	block_size = target->dev->ar->mbox_info.block_size;
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2795*4882a593Smuzhiyun 		      (block_size + HTC_HDR_LENGTH) :
2796*4882a593Smuzhiyun 		      (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2797*4882a593Smuzhiyun 
2798*4882a593Smuzhiyun 	for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2799*4882a593Smuzhiyun 		packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2800*4882a593Smuzhiyun 		if (!packet)
2801*4882a593Smuzhiyun 			return -ENOMEM;
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 		packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2804*4882a593Smuzhiyun 		if (!packet->buf_start) {
2805*4882a593Smuzhiyun 			kfree(packet);
2806*4882a593Smuzhiyun 			return -ENOMEM;
2807*4882a593Smuzhiyun 		}
2808*4882a593Smuzhiyun 
2809*4882a593Smuzhiyun 		packet->buf_len = ctrl_bufsz;
2810*4882a593Smuzhiyun 		if (i < NUM_CONTROL_RX_BUFFERS) {
2811*4882a593Smuzhiyun 			packet->act_len = 0;
2812*4882a593Smuzhiyun 			packet->buf = packet->buf_start;
2813*4882a593Smuzhiyun 			packet->endpoint = ENDPOINT_0;
2814*4882a593Smuzhiyun 			list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2815*4882a593Smuzhiyun 		} else {
2816*4882a593Smuzhiyun 			list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2817*4882a593Smuzhiyun 		}
2818*4882a593Smuzhiyun 	}
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	return 0;
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun /* htc_stop: stop interrupt reception, and flush all queued buffers */
ath6kl_htc_mbox_stop(struct htc_target * target)2824*4882a593Smuzhiyun static void ath6kl_htc_mbox_stop(struct htc_target *target)
2825*4882a593Smuzhiyun {
2826*4882a593Smuzhiyun 	spin_lock_bh(&target->htc_lock);
2827*4882a593Smuzhiyun 	target->htc_flags |= HTC_OP_STATE_STOPPING;
2828*4882a593Smuzhiyun 	spin_unlock_bh(&target->htc_lock);
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 	/*
2831*4882a593Smuzhiyun 	 * Masking interrupts is a synchronous operation, when this
2832*4882a593Smuzhiyun 	 * function returns all pending HIF I/O has completed, we can
2833*4882a593Smuzhiyun 	 * safely flush the queues.
2834*4882a593Smuzhiyun 	 */
2835*4882a593Smuzhiyun 	ath6kl_hif_mask_intrs(target->dev);
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	ath6kl_htc_flush_txep_all(target);
2838*4882a593Smuzhiyun 
2839*4882a593Smuzhiyun 	ath6kl_htc_mbox_flush_rx_buf(target);
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 	ath6kl_htc_reset(target);
2842*4882a593Smuzhiyun }
2843*4882a593Smuzhiyun 
ath6kl_htc_mbox_create(struct ath6kl * ar)2844*4882a593Smuzhiyun static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2845*4882a593Smuzhiyun {
2846*4882a593Smuzhiyun 	struct htc_target *target = NULL;
2847*4882a593Smuzhiyun 	int status = 0;
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 	target = kzalloc(sizeof(*target), GFP_KERNEL);
2850*4882a593Smuzhiyun 	if (!target) {
2851*4882a593Smuzhiyun 		ath6kl_err("unable to allocate memory\n");
2852*4882a593Smuzhiyun 		return NULL;
2853*4882a593Smuzhiyun 	}
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2856*4882a593Smuzhiyun 	if (!target->dev) {
2857*4882a593Smuzhiyun 		ath6kl_err("unable to allocate memory\n");
2858*4882a593Smuzhiyun 		kfree(target);
2859*4882a593Smuzhiyun 		return NULL;
2860*4882a593Smuzhiyun 	}
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	spin_lock_init(&target->htc_lock);
2863*4882a593Smuzhiyun 	spin_lock_init(&target->rx_lock);
2864*4882a593Smuzhiyun 	spin_lock_init(&target->tx_lock);
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 	INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2867*4882a593Smuzhiyun 	INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2868*4882a593Smuzhiyun 	INIT_LIST_HEAD(&target->cred_dist_list);
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	target->dev->ar = ar;
2871*4882a593Smuzhiyun 	target->dev->htc_cnxt = target;
2872*4882a593Smuzhiyun 	target->ep_waiting = ENDPOINT_MAX;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 	status = ath6kl_hif_setup(target->dev);
2875*4882a593Smuzhiyun 	if (status)
2876*4882a593Smuzhiyun 		goto err_htc_cleanup;
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun 	status = ath6kl_htc_reset(target);
2879*4882a593Smuzhiyun 	if (status)
2880*4882a593Smuzhiyun 		goto err_htc_cleanup;
2881*4882a593Smuzhiyun 
2882*4882a593Smuzhiyun 	return target;
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun err_htc_cleanup:
2885*4882a593Smuzhiyun 	ath6kl_htc_mbox_cleanup(target);
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	return NULL;
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun /* cleanup the HTC instance */
ath6kl_htc_mbox_cleanup(struct htc_target * target)2891*4882a593Smuzhiyun static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2892*4882a593Smuzhiyun {
2893*4882a593Smuzhiyun 	struct htc_packet *packet, *tmp_packet;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	ath6kl_hif_cleanup_scatter(target->dev->ar);
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_packet,
2898*4882a593Smuzhiyun 				 &target->free_ctrl_txbuf, list) {
2899*4882a593Smuzhiyun 		list_del(&packet->list);
2900*4882a593Smuzhiyun 		kfree(packet->buf_start);
2901*4882a593Smuzhiyun 		kfree(packet);
2902*4882a593Smuzhiyun 	}
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun 	list_for_each_entry_safe(packet, tmp_packet,
2905*4882a593Smuzhiyun 				 &target->free_ctrl_rxbuf, list) {
2906*4882a593Smuzhiyun 		list_del(&packet->list);
2907*4882a593Smuzhiyun 		kfree(packet->buf_start);
2908*4882a593Smuzhiyun 		kfree(packet);
2909*4882a593Smuzhiyun 	}
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	kfree(target->dev);
2912*4882a593Smuzhiyun 	kfree(target);
2913*4882a593Smuzhiyun }
2914*4882a593Smuzhiyun 
2915*4882a593Smuzhiyun static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2916*4882a593Smuzhiyun 	.create = ath6kl_htc_mbox_create,
2917*4882a593Smuzhiyun 	.wait_target = ath6kl_htc_mbox_wait_target,
2918*4882a593Smuzhiyun 	.start = ath6kl_htc_mbox_start,
2919*4882a593Smuzhiyun 	.conn_service = ath6kl_htc_mbox_conn_service,
2920*4882a593Smuzhiyun 	.tx = ath6kl_htc_mbox_tx,
2921*4882a593Smuzhiyun 	.stop = ath6kl_htc_mbox_stop,
2922*4882a593Smuzhiyun 	.cleanup = ath6kl_htc_mbox_cleanup,
2923*4882a593Smuzhiyun 	.flush_txep = ath6kl_htc_mbox_flush_txep,
2924*4882a593Smuzhiyun 	.flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2925*4882a593Smuzhiyun 	.activity_changed = ath6kl_htc_mbox_activity_changed,
2926*4882a593Smuzhiyun 	.get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2927*4882a593Smuzhiyun 	.add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2928*4882a593Smuzhiyun 	.credit_setup = ath6kl_htc_mbox_credit_setup,
2929*4882a593Smuzhiyun };
2930*4882a593Smuzhiyun 
ath6kl_htc_mbox_attach(struct ath6kl * ar)2931*4882a593Smuzhiyun void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2932*4882a593Smuzhiyun {
2933*4882a593Smuzhiyun 	ar->htc_ops = &ath6kl_htc_mbox_ops;
2934*4882a593Smuzhiyun }
2935