1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010, ST-Ericsson
6*4882a593Smuzhiyun * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <net/mac80211.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include "queue.h"
12*4882a593Smuzhiyun #include "cw1200.h"
13*4882a593Smuzhiyun #include "debug.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* private */ struct cw1200_queue_item
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun struct list_head head;
18*4882a593Smuzhiyun struct sk_buff *skb;
19*4882a593Smuzhiyun u32 packet_id;
20*4882a593Smuzhiyun unsigned long queue_timestamp;
21*4882a593Smuzhiyun unsigned long xmit_timestamp;
22*4882a593Smuzhiyun struct cw1200_txpriv txpriv;
23*4882a593Smuzhiyun u8 generation;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
__cw1200_queue_lock(struct cw1200_queue * queue)26*4882a593Smuzhiyun static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
29*4882a593Smuzhiyun if (queue->tx_locked_cnt++ == 0) {
30*4882a593Smuzhiyun pr_debug("[TX] Queue %d is locked.\n",
31*4882a593Smuzhiyun queue->queue_id);
32*4882a593Smuzhiyun ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
__cw1200_queue_unlock(struct cw1200_queue * queue)36*4882a593Smuzhiyun static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
39*4882a593Smuzhiyun BUG_ON(!queue->tx_locked_cnt);
40*4882a593Smuzhiyun if (--queue->tx_locked_cnt == 0) {
41*4882a593Smuzhiyun pr_debug("[TX] Queue %d is unlocked.\n",
42*4882a593Smuzhiyun queue->queue_id);
43*4882a593Smuzhiyun ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
cw1200_queue_parse_id(u32 packet_id,u8 * queue_generation,u8 * queue_id,u8 * item_generation,u8 * item_id)47*4882a593Smuzhiyun static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
48*4882a593Smuzhiyun u8 *queue_id, u8 *item_generation,
49*4882a593Smuzhiyun u8 *item_id)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun *item_id = (packet_id >> 0) & 0xFF;
52*4882a593Smuzhiyun *item_generation = (packet_id >> 8) & 0xFF;
53*4882a593Smuzhiyun *queue_id = (packet_id >> 16) & 0xFF;
54*4882a593Smuzhiyun *queue_generation = (packet_id >> 24) & 0xFF;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
cw1200_queue_mk_packet_id(u8 queue_generation,u8 queue_id,u8 item_generation,u8 item_id)57*4882a593Smuzhiyun static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
58*4882a593Smuzhiyun u8 item_generation, u8 item_id)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun return ((u32)item_id << 0) |
61*4882a593Smuzhiyun ((u32)item_generation << 8) |
62*4882a593Smuzhiyun ((u32)queue_id << 16) |
63*4882a593Smuzhiyun ((u32)queue_generation << 24);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
cw1200_queue_post_gc(struct cw1200_queue_stats * stats,struct list_head * gc_list)66*4882a593Smuzhiyun static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
67*4882a593Smuzhiyun struct list_head *gc_list)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct cw1200_queue_item *item, *tmp;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun list_for_each_entry_safe(item, tmp, gc_list, head) {
72*4882a593Smuzhiyun list_del(&item->head);
73*4882a593Smuzhiyun stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
74*4882a593Smuzhiyun kfree(item);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
cw1200_queue_register_post_gc(struct list_head * gc_list,struct cw1200_queue_item * item)78*4882a593Smuzhiyun static void cw1200_queue_register_post_gc(struct list_head *gc_list,
79*4882a593Smuzhiyun struct cw1200_queue_item *item)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct cw1200_queue_item *gc_item;
82*4882a593Smuzhiyun gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
83*4882a593Smuzhiyun GFP_ATOMIC);
84*4882a593Smuzhiyun BUG_ON(!gc_item);
85*4882a593Smuzhiyun list_add_tail(&gc_item->head, gc_list);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
__cw1200_queue_gc(struct cw1200_queue * queue,struct list_head * head,bool unlock)88*4882a593Smuzhiyun static void __cw1200_queue_gc(struct cw1200_queue *queue,
89*4882a593Smuzhiyun struct list_head *head,
90*4882a593Smuzhiyun bool unlock)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
93*4882a593Smuzhiyun struct cw1200_queue_item *item = NULL, *tmp;
94*4882a593Smuzhiyun bool wakeup_stats = false;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun list_for_each_entry_safe(item, tmp, &queue->queue, head) {
97*4882a593Smuzhiyun if (jiffies - item->queue_timestamp < queue->ttl)
98*4882a593Smuzhiyun break;
99*4882a593Smuzhiyun --queue->num_queued;
100*4882a593Smuzhiyun --queue->link_map_cache[item->txpriv.link_id];
101*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
102*4882a593Smuzhiyun --stats->num_queued;
103*4882a593Smuzhiyun if (!--stats->link_map_cache[item->txpriv.link_id])
104*4882a593Smuzhiyun wakeup_stats = true;
105*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
106*4882a593Smuzhiyun cw1200_debug_tx_ttl(stats->priv);
107*4882a593Smuzhiyun cw1200_queue_register_post_gc(head, item);
108*4882a593Smuzhiyun item->skb = NULL;
109*4882a593Smuzhiyun list_move_tail(&item->head, &queue->free_pool);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (wakeup_stats)
113*4882a593Smuzhiyun wake_up(&stats->wait_link_id_empty);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (queue->overfull) {
116*4882a593Smuzhiyun if (queue->num_queued <= (queue->capacity >> 1)) {
117*4882a593Smuzhiyun queue->overfull = false;
118*4882a593Smuzhiyun if (unlock)
119*4882a593Smuzhiyun __cw1200_queue_unlock(queue);
120*4882a593Smuzhiyun } else if (item) {
121*4882a593Smuzhiyun unsigned long tmo = item->queue_timestamp + queue->ttl;
122*4882a593Smuzhiyun mod_timer(&queue->gc, tmo);
123*4882a593Smuzhiyun cw1200_pm_stay_awake(&stats->priv->pm_state,
124*4882a593Smuzhiyun tmo - jiffies);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
cw1200_queue_gc(struct timer_list * t)129*4882a593Smuzhiyun static void cw1200_queue_gc(struct timer_list *t)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun LIST_HEAD(list);
132*4882a593Smuzhiyun struct cw1200_queue *queue =
133*4882a593Smuzhiyun from_timer(queue, t, gc);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
136*4882a593Smuzhiyun __cw1200_queue_gc(queue, &list, true);
137*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
138*4882a593Smuzhiyun cw1200_queue_post_gc(queue->stats, &list);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
cw1200_queue_stats_init(struct cw1200_queue_stats * stats,size_t map_capacity,cw1200_queue_skb_dtor_t skb_dtor,struct cw1200_common * priv)141*4882a593Smuzhiyun int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
142*4882a593Smuzhiyun size_t map_capacity,
143*4882a593Smuzhiyun cw1200_queue_skb_dtor_t skb_dtor,
144*4882a593Smuzhiyun struct cw1200_common *priv)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun memset(stats, 0, sizeof(*stats));
147*4882a593Smuzhiyun stats->map_capacity = map_capacity;
148*4882a593Smuzhiyun stats->skb_dtor = skb_dtor;
149*4882a593Smuzhiyun stats->priv = priv;
150*4882a593Smuzhiyun spin_lock_init(&stats->lock);
151*4882a593Smuzhiyun init_waitqueue_head(&stats->wait_link_id_empty);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
154*4882a593Smuzhiyun GFP_KERNEL);
155*4882a593Smuzhiyun if (!stats->link_map_cache)
156*4882a593Smuzhiyun return -ENOMEM;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
cw1200_queue_init(struct cw1200_queue * queue,struct cw1200_queue_stats * stats,u8 queue_id,size_t capacity,unsigned long ttl)161*4882a593Smuzhiyun int cw1200_queue_init(struct cw1200_queue *queue,
162*4882a593Smuzhiyun struct cw1200_queue_stats *stats,
163*4882a593Smuzhiyun u8 queue_id,
164*4882a593Smuzhiyun size_t capacity,
165*4882a593Smuzhiyun unsigned long ttl)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun size_t i;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun memset(queue, 0, sizeof(*queue));
170*4882a593Smuzhiyun queue->stats = stats;
171*4882a593Smuzhiyun queue->capacity = capacity;
172*4882a593Smuzhiyun queue->queue_id = queue_id;
173*4882a593Smuzhiyun queue->ttl = ttl;
174*4882a593Smuzhiyun INIT_LIST_HEAD(&queue->queue);
175*4882a593Smuzhiyun INIT_LIST_HEAD(&queue->pending);
176*4882a593Smuzhiyun INIT_LIST_HEAD(&queue->free_pool);
177*4882a593Smuzhiyun spin_lock_init(&queue->lock);
178*4882a593Smuzhiyun timer_setup(&queue->gc, cw1200_queue_gc, 0);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
181*4882a593Smuzhiyun GFP_KERNEL);
182*4882a593Smuzhiyun if (!queue->pool)
183*4882a593Smuzhiyun return -ENOMEM;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
186*4882a593Smuzhiyun GFP_KERNEL);
187*4882a593Smuzhiyun if (!queue->link_map_cache) {
188*4882a593Smuzhiyun kfree(queue->pool);
189*4882a593Smuzhiyun queue->pool = NULL;
190*4882a593Smuzhiyun return -ENOMEM;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun for (i = 0; i < capacity; ++i)
194*4882a593Smuzhiyun list_add_tail(&queue->pool[i].head, &queue->free_pool);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
cw1200_queue_clear(struct cw1200_queue * queue)199*4882a593Smuzhiyun int cw1200_queue_clear(struct cw1200_queue *queue)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun int i;
202*4882a593Smuzhiyun LIST_HEAD(gc_list);
203*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
204*4882a593Smuzhiyun struct cw1200_queue_item *item, *tmp;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
207*4882a593Smuzhiyun queue->generation++;
208*4882a593Smuzhiyun list_splice_tail_init(&queue->queue, &queue->pending);
209*4882a593Smuzhiyun list_for_each_entry_safe(item, tmp, &queue->pending, head) {
210*4882a593Smuzhiyun WARN_ON(!item->skb);
211*4882a593Smuzhiyun cw1200_queue_register_post_gc(&gc_list, item);
212*4882a593Smuzhiyun item->skb = NULL;
213*4882a593Smuzhiyun list_move_tail(&item->head, &queue->free_pool);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun queue->num_queued = 0;
216*4882a593Smuzhiyun queue->num_pending = 0;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
219*4882a593Smuzhiyun for (i = 0; i < stats->map_capacity; ++i) {
220*4882a593Smuzhiyun stats->num_queued -= queue->link_map_cache[i];
221*4882a593Smuzhiyun stats->link_map_cache[i] -= queue->link_map_cache[i];
222*4882a593Smuzhiyun queue->link_map_cache[i] = 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
225*4882a593Smuzhiyun if (queue->overfull) {
226*4882a593Smuzhiyun queue->overfull = false;
227*4882a593Smuzhiyun __cw1200_queue_unlock(queue);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
230*4882a593Smuzhiyun wake_up(&stats->wait_link_id_empty);
231*4882a593Smuzhiyun cw1200_queue_post_gc(stats, &gc_list);
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
cw1200_queue_stats_deinit(struct cw1200_queue_stats * stats)235*4882a593Smuzhiyun void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun kfree(stats->link_map_cache);
238*4882a593Smuzhiyun stats->link_map_cache = NULL;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
cw1200_queue_deinit(struct cw1200_queue * queue)241*4882a593Smuzhiyun void cw1200_queue_deinit(struct cw1200_queue *queue)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun cw1200_queue_clear(queue);
244*4882a593Smuzhiyun del_timer_sync(&queue->gc);
245*4882a593Smuzhiyun INIT_LIST_HEAD(&queue->free_pool);
246*4882a593Smuzhiyun kfree(queue->pool);
247*4882a593Smuzhiyun kfree(queue->link_map_cache);
248*4882a593Smuzhiyun queue->pool = NULL;
249*4882a593Smuzhiyun queue->link_map_cache = NULL;
250*4882a593Smuzhiyun queue->capacity = 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
cw1200_queue_get_num_queued(struct cw1200_queue * queue,u32 link_id_map)253*4882a593Smuzhiyun size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
254*4882a593Smuzhiyun u32 link_id_map)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun size_t ret;
257*4882a593Smuzhiyun int i, bit;
258*4882a593Smuzhiyun size_t map_capacity = queue->stats->map_capacity;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (!link_id_map)
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
264*4882a593Smuzhiyun if (link_id_map == (u32)-1) {
265*4882a593Smuzhiyun ret = queue->num_queued - queue->num_pending;
266*4882a593Smuzhiyun } else {
267*4882a593Smuzhiyun ret = 0;
268*4882a593Smuzhiyun for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
269*4882a593Smuzhiyun if (link_id_map & bit)
270*4882a593Smuzhiyun ret += queue->link_map_cache[i];
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
274*4882a593Smuzhiyun return ret;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
cw1200_queue_put(struct cw1200_queue * queue,struct sk_buff * skb,struct cw1200_txpriv * txpriv)277*4882a593Smuzhiyun int cw1200_queue_put(struct cw1200_queue *queue,
278*4882a593Smuzhiyun struct sk_buff *skb,
279*4882a593Smuzhiyun struct cw1200_txpriv *txpriv)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun int ret = 0;
282*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (txpriv->link_id >= queue->stats->map_capacity)
285*4882a593Smuzhiyun return -EINVAL;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
288*4882a593Smuzhiyun if (!WARN_ON(list_empty(&queue->free_pool))) {
289*4882a593Smuzhiyun struct cw1200_queue_item *item = list_first_entry(
290*4882a593Smuzhiyun &queue->free_pool, struct cw1200_queue_item, head);
291*4882a593Smuzhiyun BUG_ON(item->skb);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun list_move_tail(&item->head, &queue->queue);
294*4882a593Smuzhiyun item->skb = skb;
295*4882a593Smuzhiyun item->txpriv = *txpriv;
296*4882a593Smuzhiyun item->generation = 0;
297*4882a593Smuzhiyun item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
298*4882a593Smuzhiyun queue->queue_id,
299*4882a593Smuzhiyun item->generation,
300*4882a593Smuzhiyun item - queue->pool);
301*4882a593Smuzhiyun item->queue_timestamp = jiffies;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ++queue->num_queued;
304*4882a593Smuzhiyun ++queue->link_map_cache[txpriv->link_id];
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
307*4882a593Smuzhiyun ++stats->num_queued;
308*4882a593Smuzhiyun ++stats->link_map_cache[txpriv->link_id];
309*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* TX may happen in parallel sometimes.
312*4882a593Smuzhiyun * Leave extra queue slots so we don't overflow.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun if (queue->overfull == false &&
315*4882a593Smuzhiyun queue->num_queued >=
316*4882a593Smuzhiyun (queue->capacity - (num_present_cpus() - 1))) {
317*4882a593Smuzhiyun queue->overfull = true;
318*4882a593Smuzhiyun __cw1200_queue_lock(queue);
319*4882a593Smuzhiyun mod_timer(&queue->gc, jiffies);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun } else {
322*4882a593Smuzhiyun ret = -ENOENT;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
325*4882a593Smuzhiyun return ret;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
cw1200_queue_get(struct cw1200_queue * queue,u32 link_id_map,struct wsm_tx ** tx,struct ieee80211_tx_info ** tx_info,const struct cw1200_txpriv ** txpriv)328*4882a593Smuzhiyun int cw1200_queue_get(struct cw1200_queue *queue,
329*4882a593Smuzhiyun u32 link_id_map,
330*4882a593Smuzhiyun struct wsm_tx **tx,
331*4882a593Smuzhiyun struct ieee80211_tx_info **tx_info,
332*4882a593Smuzhiyun const struct cw1200_txpriv **txpriv)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun int ret = -ENOENT;
335*4882a593Smuzhiyun struct cw1200_queue_item *item;
336*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
337*4882a593Smuzhiyun bool wakeup_stats = false;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
340*4882a593Smuzhiyun list_for_each_entry(item, &queue->queue, head) {
341*4882a593Smuzhiyun if (link_id_map & BIT(item->txpriv.link_id)) {
342*4882a593Smuzhiyun ret = 0;
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!WARN_ON(ret)) {
348*4882a593Smuzhiyun *tx = (struct wsm_tx *)item->skb->data;
349*4882a593Smuzhiyun *tx_info = IEEE80211_SKB_CB(item->skb);
350*4882a593Smuzhiyun *txpriv = &item->txpriv;
351*4882a593Smuzhiyun (*tx)->packet_id = item->packet_id;
352*4882a593Smuzhiyun list_move_tail(&item->head, &queue->pending);
353*4882a593Smuzhiyun ++queue->num_pending;
354*4882a593Smuzhiyun --queue->link_map_cache[item->txpriv.link_id];
355*4882a593Smuzhiyun item->xmit_timestamp = jiffies;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
358*4882a593Smuzhiyun --stats->num_queued;
359*4882a593Smuzhiyun if (!--stats->link_map_cache[item->txpriv.link_id])
360*4882a593Smuzhiyun wakeup_stats = true;
361*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
364*4882a593Smuzhiyun if (wakeup_stats)
365*4882a593Smuzhiyun wake_up(&stats->wait_link_id_empty);
366*4882a593Smuzhiyun return ret;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
cw1200_queue_requeue(struct cw1200_queue * queue,u32 packet_id)369*4882a593Smuzhiyun int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun int ret = 0;
372*4882a593Smuzhiyun u8 queue_generation, queue_id, item_generation, item_id;
373*4882a593Smuzhiyun struct cw1200_queue_item *item;
374*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
377*4882a593Smuzhiyun &item_generation, &item_id);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun item = &queue->pool[item_id];
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
382*4882a593Smuzhiyun BUG_ON(queue_id != queue->queue_id);
383*4882a593Smuzhiyun if (queue_generation != queue->generation) {
384*4882a593Smuzhiyun ret = -ENOENT;
385*4882a593Smuzhiyun } else if (item_id >= (unsigned) queue->capacity) {
386*4882a593Smuzhiyun WARN_ON(1);
387*4882a593Smuzhiyun ret = -EINVAL;
388*4882a593Smuzhiyun } else if (item->generation != item_generation) {
389*4882a593Smuzhiyun WARN_ON(1);
390*4882a593Smuzhiyun ret = -ENOENT;
391*4882a593Smuzhiyun } else {
392*4882a593Smuzhiyun --queue->num_pending;
393*4882a593Smuzhiyun ++queue->link_map_cache[item->txpriv.link_id];
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
396*4882a593Smuzhiyun ++stats->num_queued;
397*4882a593Smuzhiyun ++stats->link_map_cache[item->txpriv.link_id];
398*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun item->generation = ++item_generation;
401*4882a593Smuzhiyun item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
402*4882a593Smuzhiyun queue_id,
403*4882a593Smuzhiyun item_generation,
404*4882a593Smuzhiyun item_id);
405*4882a593Smuzhiyun list_move(&item->head, &queue->queue);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
408*4882a593Smuzhiyun return ret;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
cw1200_queue_requeue_all(struct cw1200_queue * queue)411*4882a593Smuzhiyun int cw1200_queue_requeue_all(struct cw1200_queue *queue)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct cw1200_queue_item *item, *tmp;
414*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
415*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
418*4882a593Smuzhiyun --queue->num_pending;
419*4882a593Smuzhiyun ++queue->link_map_cache[item->txpriv.link_id];
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
422*4882a593Smuzhiyun ++stats->num_queued;
423*4882a593Smuzhiyun ++stats->link_map_cache[item->txpriv.link_id];
424*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun ++item->generation;
427*4882a593Smuzhiyun item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
428*4882a593Smuzhiyun queue->queue_id,
429*4882a593Smuzhiyun item->generation,
430*4882a593Smuzhiyun item - queue->pool);
431*4882a593Smuzhiyun list_move(&item->head, &queue->queue);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun return 0;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
cw1200_queue_remove(struct cw1200_queue * queue,u32 packet_id)438*4882a593Smuzhiyun int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun int ret = 0;
441*4882a593Smuzhiyun u8 queue_generation, queue_id, item_generation, item_id;
442*4882a593Smuzhiyun struct cw1200_queue_item *item;
443*4882a593Smuzhiyun struct cw1200_queue_stats *stats = queue->stats;
444*4882a593Smuzhiyun struct sk_buff *gc_skb = NULL;
445*4882a593Smuzhiyun struct cw1200_txpriv gc_txpriv;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
448*4882a593Smuzhiyun &item_generation, &item_id);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun item = &queue->pool[item_id];
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
453*4882a593Smuzhiyun BUG_ON(queue_id != queue->queue_id);
454*4882a593Smuzhiyun if (queue_generation != queue->generation) {
455*4882a593Smuzhiyun ret = -ENOENT;
456*4882a593Smuzhiyun } else if (item_id >= (unsigned) queue->capacity) {
457*4882a593Smuzhiyun WARN_ON(1);
458*4882a593Smuzhiyun ret = -EINVAL;
459*4882a593Smuzhiyun } else if (item->generation != item_generation) {
460*4882a593Smuzhiyun WARN_ON(1);
461*4882a593Smuzhiyun ret = -ENOENT;
462*4882a593Smuzhiyun } else {
463*4882a593Smuzhiyun gc_txpriv = item->txpriv;
464*4882a593Smuzhiyun gc_skb = item->skb;
465*4882a593Smuzhiyun item->skb = NULL;
466*4882a593Smuzhiyun --queue->num_pending;
467*4882a593Smuzhiyun --queue->num_queued;
468*4882a593Smuzhiyun ++queue->num_sent;
469*4882a593Smuzhiyun ++item->generation;
470*4882a593Smuzhiyun /* Do not use list_move_tail here, but list_move:
471*4882a593Smuzhiyun * try to utilize cache row.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun list_move(&item->head, &queue->free_pool);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (queue->overfull &&
476*4882a593Smuzhiyun (queue->num_queued <= (queue->capacity >> 1))) {
477*4882a593Smuzhiyun queue->overfull = false;
478*4882a593Smuzhiyun __cw1200_queue_unlock(queue);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (gc_skb)
484*4882a593Smuzhiyun stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return ret;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
cw1200_queue_get_skb(struct cw1200_queue * queue,u32 packet_id,struct sk_buff ** skb,const struct cw1200_txpriv ** txpriv)489*4882a593Smuzhiyun int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
490*4882a593Smuzhiyun struct sk_buff **skb,
491*4882a593Smuzhiyun const struct cw1200_txpriv **txpriv)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun int ret = 0;
494*4882a593Smuzhiyun u8 queue_generation, queue_id, item_generation, item_id;
495*4882a593Smuzhiyun struct cw1200_queue_item *item;
496*4882a593Smuzhiyun cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
497*4882a593Smuzhiyun &item_generation, &item_id);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun item = &queue->pool[item_id];
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
502*4882a593Smuzhiyun BUG_ON(queue_id != queue->queue_id);
503*4882a593Smuzhiyun if (queue_generation != queue->generation) {
504*4882a593Smuzhiyun ret = -ENOENT;
505*4882a593Smuzhiyun } else if (item_id >= (unsigned) queue->capacity) {
506*4882a593Smuzhiyun WARN_ON(1);
507*4882a593Smuzhiyun ret = -EINVAL;
508*4882a593Smuzhiyun } else if (item->generation != item_generation) {
509*4882a593Smuzhiyun WARN_ON(1);
510*4882a593Smuzhiyun ret = -ENOENT;
511*4882a593Smuzhiyun } else {
512*4882a593Smuzhiyun *skb = item->skb;
513*4882a593Smuzhiyun *txpriv = &item->txpriv;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
516*4882a593Smuzhiyun return ret;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
cw1200_queue_lock(struct cw1200_queue * queue)519*4882a593Smuzhiyun void cw1200_queue_lock(struct cw1200_queue *queue)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
522*4882a593Smuzhiyun __cw1200_queue_lock(queue);
523*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
cw1200_queue_unlock(struct cw1200_queue * queue)526*4882a593Smuzhiyun void cw1200_queue_unlock(struct cw1200_queue *queue)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
529*4882a593Smuzhiyun __cw1200_queue_unlock(queue);
530*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
cw1200_queue_get_xmit_timestamp(struct cw1200_queue * queue,unsigned long * timestamp,u32 pending_frame_id)533*4882a593Smuzhiyun bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
534*4882a593Smuzhiyun unsigned long *timestamp,
535*4882a593Smuzhiyun u32 pending_frame_id)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct cw1200_queue_item *item;
538*4882a593Smuzhiyun bool ret;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun spin_lock_bh(&queue->lock);
541*4882a593Smuzhiyun ret = !list_empty(&queue->pending);
542*4882a593Smuzhiyun if (ret) {
543*4882a593Smuzhiyun list_for_each_entry(item, &queue->pending, head) {
544*4882a593Smuzhiyun if (item->packet_id != pending_frame_id)
545*4882a593Smuzhiyun if (time_before(item->xmit_timestamp,
546*4882a593Smuzhiyun *timestamp))
547*4882a593Smuzhiyun *timestamp = item->xmit_timestamp;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun spin_unlock_bh(&queue->lock);
551*4882a593Smuzhiyun return ret;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
cw1200_queue_stats_is_empty(struct cw1200_queue_stats * stats,u32 link_id_map)554*4882a593Smuzhiyun bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
555*4882a593Smuzhiyun u32 link_id_map)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun bool empty = true;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun spin_lock_bh(&stats->lock);
560*4882a593Smuzhiyun if (link_id_map == (u32)-1) {
561*4882a593Smuzhiyun empty = stats->num_queued == 0;
562*4882a593Smuzhiyun } else {
563*4882a593Smuzhiyun int i;
564*4882a593Smuzhiyun for (i = 0; i < stats->map_capacity; ++i) {
565*4882a593Smuzhiyun if (link_id_map & BIT(i)) {
566*4882a593Smuzhiyun if (stats->link_map_cache[i]) {
567*4882a593Smuzhiyun empty = false;
568*4882a593Smuzhiyun break;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun spin_unlock_bh(&stats->lock);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun return empty;
576*4882a593Smuzhiyun }
577