1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4*4882a593Smuzhiyun * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef __MT76_UTIL_H
8*4882a593Smuzhiyun #define __MT76_UTIL_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/skbuff.h>
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/bitfield.h>
13*4882a593Smuzhiyun #include <net/mac80211.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun struct mt76_worker
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun struct task_struct *task;
18*4882a593Smuzhiyun void (*fn)(struct mt76_worker *);
19*4882a593Smuzhiyun unsigned long state;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun enum {
23*4882a593Smuzhiyun MT76_WORKER_SCHEDULED,
24*4882a593Smuzhiyun MT76_WORKER_RUNNING,
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define MT76_INCR(_var, _size) \
28*4882a593Smuzhiyun (_var = (((_var) + 1) % (_size)))
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun int mt76_wcid_alloc(u32 *mask, int size);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static inline bool
mt76_wcid_mask_test(u32 * mask,int idx)33*4882a593Smuzhiyun mt76_wcid_mask_test(u32 *mask, int idx)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun return mask[idx / 32] & BIT(idx % 32);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static inline void
mt76_wcid_mask_set(u32 * mask,int idx)39*4882a593Smuzhiyun mt76_wcid_mask_set(u32 *mask, int idx)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun mask[idx / 32] |= BIT(idx % 32);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static inline void
mt76_wcid_mask_clear(u32 * mask,int idx)45*4882a593Smuzhiyun mt76_wcid_mask_clear(u32 *mask, int idx)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun mask[idx / 32] &= ~BIT(idx % 32);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static inline void
mt76_skb_set_moredata(struct sk_buff * skb,bool enable)51*4882a593Smuzhiyun mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (enable)
56*4882a593Smuzhiyun hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
57*4882a593Smuzhiyun else
58*4882a593Smuzhiyun hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun int __mt76_worker_fn(void *ptr);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static inline int
mt76_worker_setup(struct ieee80211_hw * hw,struct mt76_worker * w,void (* fn)(struct mt76_worker *),const char * name)64*4882a593Smuzhiyun mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
65*4882a593Smuzhiyun void (*fn)(struct mt76_worker *),
66*4882a593Smuzhiyun const char *name)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun const char *dev_name = wiphy_name(hw->wiphy);
69*4882a593Smuzhiyun int ret;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (fn)
72*4882a593Smuzhiyun w->fn = fn;
73*4882a593Smuzhiyun w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
74*4882a593Smuzhiyun name, dev_name);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun ret = PTR_ERR_OR_ZERO(w->task);
77*4882a593Smuzhiyun if (ret) {
78*4882a593Smuzhiyun w->task = NULL;
79*4882a593Smuzhiyun return ret;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun wake_up_process(w->task);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
mt76_worker_schedule(struct mt76_worker * w)87*4882a593Smuzhiyun static inline void mt76_worker_schedule(struct mt76_worker *w)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun if (!w->task)
90*4882a593Smuzhiyun return;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
93*4882a593Smuzhiyun !test_bit(MT76_WORKER_RUNNING, &w->state))
94*4882a593Smuzhiyun wake_up_process(w->task);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
mt76_worker_disable(struct mt76_worker * w)97*4882a593Smuzhiyun static inline void mt76_worker_disable(struct mt76_worker *w)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun if (!w->task)
100*4882a593Smuzhiyun return;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun kthread_park(w->task);
103*4882a593Smuzhiyun WRITE_ONCE(w->state, 0);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
mt76_worker_enable(struct mt76_worker * w)106*4882a593Smuzhiyun static inline void mt76_worker_enable(struct mt76_worker *w)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if (!w->task)
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun kthread_unpark(w->task);
112*4882a593Smuzhiyun mt76_worker_schedule(w);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
mt76_worker_teardown(struct mt76_worker * w)115*4882a593Smuzhiyun static inline void mt76_worker_teardown(struct mt76_worker *w)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun if (!w->task)
118*4882a593Smuzhiyun return;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun kthread_stop(w->task);
121*4882a593Smuzhiyun w->task = NULL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #endif
125