1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
4*4882a593Smuzhiyun * Copyright (C) 2019-2020 Intel Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/netdevice.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/skbuff.h>
9*4882a593Smuzhiyun #include <linux/debugfs.h>
10*4882a593Smuzhiyun #include <linux/random.h>
11*4882a593Smuzhiyun #include <linux/moduleparam.h>
12*4882a593Smuzhiyun #include <linux/ieee80211.h>
13*4882a593Smuzhiyun #include <net/mac80211.h>
14*4882a593Smuzhiyun #include "rate.h"
15*4882a593Smuzhiyun #include "sta_info.h"
16*4882a593Smuzhiyun #include "rc80211_minstrel.h"
17*4882a593Smuzhiyun #include "rc80211_minstrel_ht.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define AVG_AMPDU_SIZE 16
20*4882a593Smuzhiyun #define AVG_PKT_SIZE 1200
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define SAMPLE_SWITCH_THR 100
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Number of bits for an average sized packet */
25*4882a593Smuzhiyun #define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Number of symbols for a packet with (bps) bits per symbol */
28*4882a593Smuzhiyun #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Transmission time (nanoseconds) for a packet containing (syms) symbols */
31*4882a593Smuzhiyun #define MCS_SYMBOL_TIME(sgi, syms) \
32*4882a593Smuzhiyun (sgi ? \
33*4882a593Smuzhiyun ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
34*4882a593Smuzhiyun ((syms) * 1000) << 2 /* syms * 4 us */ \
35*4882a593Smuzhiyun )
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Transmit duration for the raw data part of an average sized packet */
38*4882a593Smuzhiyun #define MCS_DURATION(streams, sgi, bps) \
39*4882a593Smuzhiyun (MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define BW_20 0
42*4882a593Smuzhiyun #define BW_40 1
43*4882a593Smuzhiyun #define BW_80 2
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Define group sort order: HT40 -> SGI -> #streams
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define GROUP_IDX(_streams, _sgi, _ht40) \
49*4882a593Smuzhiyun MINSTREL_HT_GROUP_0 + \
50*4882a593Smuzhiyun MINSTREL_MAX_STREAMS * 2 * _ht40 + \
51*4882a593Smuzhiyun MINSTREL_MAX_STREAMS * _sgi + \
52*4882a593Smuzhiyun _streams - 1
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define _MAX(a, b) (((a)>(b))?(a):(b))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define GROUP_SHIFT(duration) \
57*4882a593Smuzhiyun _MAX(0, 16 - __builtin_clz(duration))
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* MCS rate information for an MCS group */
60*4882a593Smuzhiyun #define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
61*4882a593Smuzhiyun [GROUP_IDX(_streams, _sgi, _ht40)] = { \
62*4882a593Smuzhiyun .streams = _streams, \
63*4882a593Smuzhiyun .shift = _s, \
64*4882a593Smuzhiyun .bw = _ht40, \
65*4882a593Smuzhiyun .flags = \
66*4882a593Smuzhiyun IEEE80211_TX_RC_MCS | \
67*4882a593Smuzhiyun (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
68*4882a593Smuzhiyun (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
69*4882a593Smuzhiyun .duration = { \
70*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
71*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
72*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
73*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
74*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
75*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
76*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
77*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
78*4882a593Smuzhiyun } \
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
82*4882a593Smuzhiyun GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define MCS_GROUP(_streams, _sgi, _ht40) \
85*4882a593Smuzhiyun __MCS_GROUP(_streams, _sgi, _ht40, \
86*4882a593Smuzhiyun MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define VHT_GROUP_IDX(_streams, _sgi, _bw) \
89*4882a593Smuzhiyun (MINSTREL_VHT_GROUP_0 + \
90*4882a593Smuzhiyun MINSTREL_MAX_STREAMS * 2 * (_bw) + \
91*4882a593Smuzhiyun MINSTREL_MAX_STREAMS * (_sgi) + \
92*4882a593Smuzhiyun (_streams) - 1)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define BW2VBPS(_bw, r3, r2, r1) \
95*4882a593Smuzhiyun (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define __VHT_GROUP(_streams, _sgi, _bw, _s) \
98*4882a593Smuzhiyun [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
99*4882a593Smuzhiyun .streams = _streams, \
100*4882a593Smuzhiyun .shift = _s, \
101*4882a593Smuzhiyun .bw = _bw, \
102*4882a593Smuzhiyun .flags = \
103*4882a593Smuzhiyun IEEE80211_TX_RC_VHT_MCS | \
104*4882a593Smuzhiyun (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
105*4882a593Smuzhiyun (_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \
106*4882a593Smuzhiyun _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
107*4882a593Smuzhiyun .duration = { \
108*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
109*4882a593Smuzhiyun BW2VBPS(_bw, 117, 54, 26)) >> _s, \
110*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
111*4882a593Smuzhiyun BW2VBPS(_bw, 234, 108, 52)) >> _s, \
112*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
113*4882a593Smuzhiyun BW2VBPS(_bw, 351, 162, 78)) >> _s, \
114*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
115*4882a593Smuzhiyun BW2VBPS(_bw, 468, 216, 104)) >> _s, \
116*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
117*4882a593Smuzhiyun BW2VBPS(_bw, 702, 324, 156)) >> _s, \
118*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
119*4882a593Smuzhiyun BW2VBPS(_bw, 936, 432, 208)) >> _s, \
120*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
121*4882a593Smuzhiyun BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
122*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
123*4882a593Smuzhiyun BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
124*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
125*4882a593Smuzhiyun BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
126*4882a593Smuzhiyun MCS_DURATION(_streams, _sgi, \
127*4882a593Smuzhiyun BW2VBPS(_bw, 1560, 720, 346)) >> _s \
128*4882a593Smuzhiyun } \
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
132*4882a593Smuzhiyun GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
133*4882a593Smuzhiyun BW2VBPS(_bw, 117, 54, 26)))
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #define VHT_GROUP(_streams, _sgi, _bw) \
136*4882a593Smuzhiyun __VHT_GROUP(_streams, _sgi, _bw, \
137*4882a593Smuzhiyun VHT_GROUP_SHIFT(_streams, _sgi, _bw))
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define CCK_DURATION(_bitrate, _short, _len) \
140*4882a593Smuzhiyun (1000 * (10 /* SIFS */ + \
141*4882a593Smuzhiyun (_short ? 72 + 24 : 144 + 48) + \
142*4882a593Smuzhiyun (8 * (_len + 4) * 10) / (_bitrate)))
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #define CCK_ACK_DURATION(_bitrate, _short) \
145*4882a593Smuzhiyun (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
146*4882a593Smuzhiyun CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #define CCK_DURATION_LIST(_short, _s) \
149*4882a593Smuzhiyun CCK_ACK_DURATION(10, _short) >> _s, \
150*4882a593Smuzhiyun CCK_ACK_DURATION(20, _short) >> _s, \
151*4882a593Smuzhiyun CCK_ACK_DURATION(55, _short) >> _s, \
152*4882a593Smuzhiyun CCK_ACK_DURATION(110, _short) >> _s
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #define __CCK_GROUP(_s) \
155*4882a593Smuzhiyun [MINSTREL_CCK_GROUP] = { \
156*4882a593Smuzhiyun .streams = 1, \
157*4882a593Smuzhiyun .flags = 0, \
158*4882a593Smuzhiyun .shift = _s, \
159*4882a593Smuzhiyun .duration = { \
160*4882a593Smuzhiyun CCK_DURATION_LIST(false, _s), \
161*4882a593Smuzhiyun CCK_DURATION_LIST(true, _s) \
162*4882a593Smuzhiyun } \
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define CCK_GROUP_SHIFT \
166*4882a593Smuzhiyun GROUP_SHIFT(CCK_ACK_DURATION(10, false))
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #define CCK_GROUP __CCK_GROUP(CCK_GROUP_SHIFT)
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static bool minstrel_vht_only = true;
172*4882a593Smuzhiyun module_param(minstrel_vht_only, bool, 0644);
173*4882a593Smuzhiyun MODULE_PARM_DESC(minstrel_vht_only,
174*4882a593Smuzhiyun "Use only VHT rates when VHT is supported by sta.");
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * To enable sufficiently targeted rate sampling, MCS rates are divided into
178*4882a593Smuzhiyun * groups, based on the number of streams and flags (HT40, SGI) that they
179*4882a593Smuzhiyun * use.
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
182*4882a593Smuzhiyun * BW -> SGI -> #streams
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun const struct mcs_group minstrel_mcs_groups[] = {
185*4882a593Smuzhiyun MCS_GROUP(1, 0, BW_20),
186*4882a593Smuzhiyun MCS_GROUP(2, 0, BW_20),
187*4882a593Smuzhiyun MCS_GROUP(3, 0, BW_20),
188*4882a593Smuzhiyun MCS_GROUP(4, 0, BW_20),
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun MCS_GROUP(1, 1, BW_20),
191*4882a593Smuzhiyun MCS_GROUP(2, 1, BW_20),
192*4882a593Smuzhiyun MCS_GROUP(3, 1, BW_20),
193*4882a593Smuzhiyun MCS_GROUP(4, 1, BW_20),
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun MCS_GROUP(1, 0, BW_40),
196*4882a593Smuzhiyun MCS_GROUP(2, 0, BW_40),
197*4882a593Smuzhiyun MCS_GROUP(3, 0, BW_40),
198*4882a593Smuzhiyun MCS_GROUP(4, 0, BW_40),
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun MCS_GROUP(1, 1, BW_40),
201*4882a593Smuzhiyun MCS_GROUP(2, 1, BW_40),
202*4882a593Smuzhiyun MCS_GROUP(3, 1, BW_40),
203*4882a593Smuzhiyun MCS_GROUP(4, 1, BW_40),
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun CCK_GROUP,
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun VHT_GROUP(1, 0, BW_20),
208*4882a593Smuzhiyun VHT_GROUP(2, 0, BW_20),
209*4882a593Smuzhiyun VHT_GROUP(3, 0, BW_20),
210*4882a593Smuzhiyun VHT_GROUP(4, 0, BW_20),
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun VHT_GROUP(1, 1, BW_20),
213*4882a593Smuzhiyun VHT_GROUP(2, 1, BW_20),
214*4882a593Smuzhiyun VHT_GROUP(3, 1, BW_20),
215*4882a593Smuzhiyun VHT_GROUP(4, 1, BW_20),
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun VHT_GROUP(1, 0, BW_40),
218*4882a593Smuzhiyun VHT_GROUP(2, 0, BW_40),
219*4882a593Smuzhiyun VHT_GROUP(3, 0, BW_40),
220*4882a593Smuzhiyun VHT_GROUP(4, 0, BW_40),
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun VHT_GROUP(1, 1, BW_40),
223*4882a593Smuzhiyun VHT_GROUP(2, 1, BW_40),
224*4882a593Smuzhiyun VHT_GROUP(3, 1, BW_40),
225*4882a593Smuzhiyun VHT_GROUP(4, 1, BW_40),
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun VHT_GROUP(1, 0, BW_80),
228*4882a593Smuzhiyun VHT_GROUP(2, 0, BW_80),
229*4882a593Smuzhiyun VHT_GROUP(3, 0, BW_80),
230*4882a593Smuzhiyun VHT_GROUP(4, 0, BW_80),
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun VHT_GROUP(1, 1, BW_80),
233*4882a593Smuzhiyun VHT_GROUP(2, 1, BW_80),
234*4882a593Smuzhiyun VHT_GROUP(3, 1, BW_80),
235*4882a593Smuzhiyun VHT_GROUP(4, 1, BW_80),
236*4882a593Smuzhiyun };
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static void
241*4882a593Smuzhiyun minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer)
245*4882a593Smuzhiyun * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * Returns the valid mcs map for struct minstrel_mcs_group_data.supported
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun static u16
minstrel_get_valid_vht_rates(int bw,int nss,__le16 mcs_map)250*4882a593Smuzhiyun minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun u16 mask = 0;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (bw == BW_20) {
255*4882a593Smuzhiyun if (nss != 3 && nss != 6)
256*4882a593Smuzhiyun mask = BIT(9);
257*4882a593Smuzhiyun } else if (bw == BW_80) {
258*4882a593Smuzhiyun if (nss == 3 || nss == 7)
259*4882a593Smuzhiyun mask = BIT(6);
260*4882a593Smuzhiyun else if (nss == 6)
261*4882a593Smuzhiyun mask = BIT(9);
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun WARN_ON(bw != BW_40);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) {
267*4882a593Smuzhiyun case IEEE80211_VHT_MCS_SUPPORT_0_7:
268*4882a593Smuzhiyun mask |= 0x300;
269*4882a593Smuzhiyun break;
270*4882a593Smuzhiyun case IEEE80211_VHT_MCS_SUPPORT_0_8:
271*4882a593Smuzhiyun mask |= 0x200;
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case IEEE80211_VHT_MCS_SUPPORT_0_9:
274*4882a593Smuzhiyun break;
275*4882a593Smuzhiyun default:
276*4882a593Smuzhiyun mask = 0x3ff;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0x3ff & ~mask;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * Look up an MCS group index based on mac80211 rate information
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate * rate)286*4882a593Smuzhiyun minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun return GROUP_IDX((rate->idx / 8) + 1,
289*4882a593Smuzhiyun !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
290*4882a593Smuzhiyun !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun static int
minstrel_vht_get_group_idx(struct ieee80211_tx_rate * rate)294*4882a593Smuzhiyun minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate),
297*4882a593Smuzhiyun !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
298*4882a593Smuzhiyun !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) +
299*4882a593Smuzhiyun 2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv * mp,struct minstrel_ht_sta * mi,struct ieee80211_tx_rate * rate)303*4882a593Smuzhiyun minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
304*4882a593Smuzhiyun struct ieee80211_tx_rate *rate)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun int group, idx;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (rate->flags & IEEE80211_TX_RC_MCS) {
309*4882a593Smuzhiyun group = minstrel_ht_get_group_idx(rate);
310*4882a593Smuzhiyun idx = rate->idx % 8;
311*4882a593Smuzhiyun } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
312*4882a593Smuzhiyun group = minstrel_vht_get_group_idx(rate);
313*4882a593Smuzhiyun idx = ieee80211_rate_get_vht_mcs(rate);
314*4882a593Smuzhiyun } else {
315*4882a593Smuzhiyun group = MINSTREL_CCK_GROUP;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
318*4882a593Smuzhiyun if (rate->idx == mp->cck_rates[idx])
319*4882a593Smuzhiyun break;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* short preamble */
322*4882a593Smuzhiyun if ((mi->supported[group] & BIT(idx + 4)) &&
323*4882a593Smuzhiyun (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
324*4882a593Smuzhiyun idx += 4;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun return &mi->groups[group].rates[idx];
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta * mi,int index)330*4882a593Smuzhiyun minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun static unsigned int
minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta * mi)336*4882a593Smuzhiyun minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun if (!mi->avg_ampdu_len)
339*4882a593Smuzhiyun return AVG_AMPDU_SIZE;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return MINSTREL_TRUNC(mi->avg_ampdu_len);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Return current throughput based on the average A-MPDU length, taking into
346*4882a593Smuzhiyun * account the expected number of retransmissions and their expected length
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta * mi,int group,int rate,int prob_avg)349*4882a593Smuzhiyun minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
350*4882a593Smuzhiyun int prob_avg)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun unsigned int nsecs = 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* do not account throughput if sucess prob is below 10% */
355*4882a593Smuzhiyun if (prob_avg < MINSTREL_FRAC(10, 100))
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (group != MINSTREL_CCK_GROUP)
359*4882a593Smuzhiyun nsecs = 1000 * mi->overhead / minstrel_ht_avg_ampdu_len(mi);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun nsecs += minstrel_mcs_groups[group].duration[rate] <<
362*4882a593Smuzhiyun minstrel_mcs_groups[group].shift;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * For the throughput calculation, limit the probability value to 90% to
366*4882a593Smuzhiyun * account for collision related packet error rate fluctuation
367*4882a593Smuzhiyun * (prob is scaled - see MINSTREL_FRAC above)
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun if (prob_avg > MINSTREL_FRAC(90, 100))
370*4882a593Smuzhiyun return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
371*4882a593Smuzhiyun / nsecs));
372*4882a593Smuzhiyun else
373*4882a593Smuzhiyun return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * Find & sort topmost throughput rates
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * If multiple rates provide equal throughput the sorting is based on their
380*4882a593Smuzhiyun * current success probability. Higher success probability is preferred among
381*4882a593Smuzhiyun * MCS groups, CCK rates do not provide aggregation and are therefore at last.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun static void
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta * mi,u16 index,u16 * tp_list)384*4882a593Smuzhiyun minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
385*4882a593Smuzhiyun u16 *tp_list)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun int cur_group, cur_idx, cur_tp_avg, cur_prob;
388*4882a593Smuzhiyun int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
389*4882a593Smuzhiyun int j = MAX_THR_RATES;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun cur_group = index / MCS_GROUP_RATES;
392*4882a593Smuzhiyun cur_idx = index % MCS_GROUP_RATES;
393*4882a593Smuzhiyun cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
394*4882a593Smuzhiyun cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun do {
397*4882a593Smuzhiyun tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
398*4882a593Smuzhiyun tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
399*4882a593Smuzhiyun tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
400*4882a593Smuzhiyun tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
401*4882a593Smuzhiyun tmp_prob);
402*4882a593Smuzhiyun if (cur_tp_avg < tmp_tp_avg ||
403*4882a593Smuzhiyun (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob))
404*4882a593Smuzhiyun break;
405*4882a593Smuzhiyun j--;
406*4882a593Smuzhiyun } while (j > 0);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (j < MAX_THR_RATES - 1) {
409*4882a593Smuzhiyun memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
410*4882a593Smuzhiyun (MAX_THR_RATES - (j + 1))));
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun if (j < MAX_THR_RATES)
413*4882a593Smuzhiyun tp_list[j] = index;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Find and set the topmost probability rate per sta and per group
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun static void
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta * mi,u16 index)420*4882a593Smuzhiyun minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct minstrel_mcs_group_data *mg;
423*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
424*4882a593Smuzhiyun int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
425*4882a593Smuzhiyun int max_tp_group, cur_tp_avg, cur_group, cur_idx;
426*4882a593Smuzhiyun int max_gpr_group, max_gpr_idx;
427*4882a593Smuzhiyun int max_gpr_tp_avg, max_gpr_prob;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun cur_group = index / MCS_GROUP_RATES;
430*4882a593Smuzhiyun cur_idx = index % MCS_GROUP_RATES;
431*4882a593Smuzhiyun mg = &mi->groups[index / MCS_GROUP_RATES];
432*4882a593Smuzhiyun mrs = &mg->rates[index % MCS_GROUP_RATES];
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
435*4882a593Smuzhiyun tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
436*4882a593Smuzhiyun tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
437*4882a593Smuzhiyun tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
440*4882a593Smuzhiyun * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
441*4882a593Smuzhiyun max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
442*4882a593Smuzhiyun if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
443*4882a593Smuzhiyun (max_tp_group != MINSTREL_CCK_GROUP))
444*4882a593Smuzhiyun return;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
447*4882a593Smuzhiyun max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
448*4882a593Smuzhiyun max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
451*4882a593Smuzhiyun cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
452*4882a593Smuzhiyun mrs->prob_avg);
453*4882a593Smuzhiyun if (cur_tp_avg > tmp_tp_avg)
454*4882a593Smuzhiyun mi->max_prob_rate = index;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
457*4882a593Smuzhiyun max_gpr_idx,
458*4882a593Smuzhiyun max_gpr_prob);
459*4882a593Smuzhiyun if (cur_tp_avg > max_gpr_tp_avg)
460*4882a593Smuzhiyun mg->max_group_prob_rate = index;
461*4882a593Smuzhiyun } else {
462*4882a593Smuzhiyun if (mrs->prob_avg > tmp_prob)
463*4882a593Smuzhiyun mi->max_prob_rate = index;
464*4882a593Smuzhiyun if (mrs->prob_avg > max_gpr_prob)
465*4882a593Smuzhiyun mg->max_group_prob_rate = index;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * Assign new rate set per sta and use CCK rates only if the fastest
472*4882a593Smuzhiyun * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
473*4882a593Smuzhiyun * rate sets where MCS and CCK rates are mixed, because CCK rates can
474*4882a593Smuzhiyun * not use aggregation.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta * mi,u16 tmp_mcs_tp_rate[MAX_THR_RATES],u16 tmp_cck_tp_rate[MAX_THR_RATES])477*4882a593Smuzhiyun minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
478*4882a593Smuzhiyun u16 tmp_mcs_tp_rate[MAX_THR_RATES],
479*4882a593Smuzhiyun u16 tmp_cck_tp_rate[MAX_THR_RATES])
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
482*4882a593Smuzhiyun int i;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
485*4882a593Smuzhiyun tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
486*4882a593Smuzhiyun tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
487*4882a593Smuzhiyun tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
490*4882a593Smuzhiyun tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
491*4882a593Smuzhiyun tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
492*4882a593Smuzhiyun tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (tmp_cck_tp > tmp_mcs_tp) {
495*4882a593Smuzhiyun for(i = 0; i < MAX_THR_RATES; i++) {
496*4882a593Smuzhiyun minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
497*4882a593Smuzhiyun tmp_mcs_tp_rate);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * Try to increase robustness of max_prob rate by decrease number of
505*4882a593Smuzhiyun * streams if possible.
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta * mi)508*4882a593Smuzhiyun minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun struct minstrel_mcs_group_data *mg;
511*4882a593Smuzhiyun int tmp_max_streams, group, tmp_idx, tmp_prob;
512*4882a593Smuzhiyun int tmp_tp = 0;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
515*4882a593Smuzhiyun MCS_GROUP_RATES].streams;
516*4882a593Smuzhiyun for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
517*4882a593Smuzhiyun mg = &mi->groups[group];
518*4882a593Smuzhiyun if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
519*4882a593Smuzhiyun continue;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
522*4882a593Smuzhiyun tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
525*4882a593Smuzhiyun (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
526*4882a593Smuzhiyun mi->max_prob_rate = mg->max_group_prob_rate;
527*4882a593Smuzhiyun tmp_tp = minstrel_ht_get_tp_avg(mi, group,
528*4882a593Smuzhiyun tmp_idx,
529*4882a593Smuzhiyun tmp_prob);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun static inline int
minstrel_get_duration(int index)535*4882a593Smuzhiyun minstrel_get_duration(int index)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
538*4882a593Smuzhiyun unsigned int duration = group->duration[index % MCS_GROUP_RATES];
539*4882a593Smuzhiyun return duration << group->shift;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun static bool
minstrel_ht_probe_group(struct minstrel_ht_sta * mi,const struct mcs_group * tp_group,int tp_idx,const struct mcs_group * group)543*4882a593Smuzhiyun minstrel_ht_probe_group(struct minstrel_ht_sta *mi, const struct mcs_group *tp_group,
544*4882a593Smuzhiyun int tp_idx, const struct mcs_group *group)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun if (group->bw < tp_group->bw)
547*4882a593Smuzhiyun return false;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (group->streams == tp_group->streams)
550*4882a593Smuzhiyun return true;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (tp_idx < 4 && group->streams == tp_group->streams - 1)
553*4882a593Smuzhiyun return true;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return group->streams == tp_group->streams + 1;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun static void
minstrel_ht_find_probe_rates(struct minstrel_ht_sta * mi,u16 * rates,int * n_rates,bool faster_rate)559*4882a593Smuzhiyun minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rates,
560*4882a593Smuzhiyun bool faster_rate)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun const struct mcs_group *group, *tp_group;
563*4882a593Smuzhiyun int i, g, max_dur;
564*4882a593Smuzhiyun int tp_idx;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun tp_group = &minstrel_mcs_groups[mi->max_tp_rate[0] / MCS_GROUP_RATES];
567*4882a593Smuzhiyun tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun max_dur = minstrel_get_duration(mi->max_tp_rate[0]);
570*4882a593Smuzhiyun if (faster_rate)
571*4882a593Smuzhiyun max_dur -= max_dur / 16;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun for (g = 0; g < MINSTREL_GROUPS_NB; g++) {
574*4882a593Smuzhiyun u16 supported = mi->supported[g];
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (!supported)
577*4882a593Smuzhiyun continue;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun group = &minstrel_mcs_groups[g];
580*4882a593Smuzhiyun if (!minstrel_ht_probe_group(mi, tp_group, tp_idx, group))
581*4882a593Smuzhiyun continue;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun for (i = 0; supported; supported >>= 1, i++) {
584*4882a593Smuzhiyun int idx;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (!(supported & 1))
587*4882a593Smuzhiyun continue;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if ((group->duration[i] << group->shift) > max_dur)
590*4882a593Smuzhiyun continue;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun idx = g * MCS_GROUP_RATES + i;
593*4882a593Smuzhiyun if (idx == mi->max_tp_rate[0])
594*4882a593Smuzhiyun continue;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun rates[(*n_rates)++] = idx;
597*4882a593Smuzhiyun break;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun static void
minstrel_ht_rate_sample_switch(struct minstrel_priv * mp,struct minstrel_ht_sta * mi)603*4882a593Smuzhiyun minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
604*4882a593Smuzhiyun struct minstrel_ht_sta *mi)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
607*4882a593Smuzhiyun u16 rates[MINSTREL_GROUPS_NB];
608*4882a593Smuzhiyun int n_rates = 0;
609*4882a593Smuzhiyun int probe_rate = 0;
610*4882a593Smuzhiyun bool faster_rate;
611*4882a593Smuzhiyun int i;
612*4882a593Smuzhiyun u8 random;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * Use rate switching instead of probing packets for devices with
616*4882a593Smuzhiyun * little control over retry fallback behavior
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun if (mp->hw->max_rates > 1)
619*4882a593Smuzhiyun return;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * If the current EWMA prob is >75%, look for a rate that's 6.25%
623*4882a593Smuzhiyun * faster than the max tp rate.
624*4882a593Smuzhiyun * If that fails, look again for a rate that is at least as fast
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
627*4882a593Smuzhiyun faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
628*4882a593Smuzhiyun minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
629*4882a593Smuzhiyun if (!n_rates && faster_rate)
630*4882a593Smuzhiyun minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* If no suitable rate was found, try to pick the next one in the group */
633*4882a593Smuzhiyun if (!n_rates) {
634*4882a593Smuzhiyun int g_idx = mi->max_tp_rate[0] / MCS_GROUP_RATES;
635*4882a593Smuzhiyun u16 supported = mi->supported[g_idx];
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun supported >>= mi->max_tp_rate[0] % MCS_GROUP_RATES;
638*4882a593Smuzhiyun for (i = 0; supported; supported >>= 1, i++) {
639*4882a593Smuzhiyun if (!(supported & 1))
640*4882a593Smuzhiyun continue;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun probe_rate = mi->max_tp_rate[0] + i;
643*4882a593Smuzhiyun goto out;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun i = 0;
650*4882a593Smuzhiyun if (n_rates > 1) {
651*4882a593Smuzhiyun random = prandom_u32();
652*4882a593Smuzhiyun i = random % n_rates;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun probe_rate = rates[i];
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun out:
657*4882a593Smuzhiyun mi->sample_rate = probe_rate;
658*4882a593Smuzhiyun mi->sample_mode = MINSTREL_SAMPLE_ACTIVE;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /*
662*4882a593Smuzhiyun * Update rate statistics and select new primary rates
663*4882a593Smuzhiyun *
664*4882a593Smuzhiyun * Rules for rate selection:
665*4882a593Smuzhiyun * - max_prob_rate must use only one stream, as a tradeoff between delivery
666*4882a593Smuzhiyun * probability and throughput during strong fluctuations
667*4882a593Smuzhiyun * - as long as the max prob rate has a probability of more than 75%, pick
668*4882a593Smuzhiyun * higher throughput rates, even if the probablity is a bit lower
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun static void
minstrel_ht_update_stats(struct minstrel_priv * mp,struct minstrel_ht_sta * mi,bool sample)671*4882a593Smuzhiyun minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
672*4882a593Smuzhiyun bool sample)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct minstrel_mcs_group_data *mg;
675*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
676*4882a593Smuzhiyun int group, i, j, cur_prob;
677*4882a593Smuzhiyun u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
678*4882a593Smuzhiyun u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun mi->sample_mode = MINSTREL_SAMPLE_IDLE;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (sample) {
683*4882a593Smuzhiyun mi->total_packets_cur = mi->total_packets -
684*4882a593Smuzhiyun mi->total_packets_last;
685*4882a593Smuzhiyun mi->total_packets_last = mi->total_packets;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun if (!mp->sample_switch)
688*4882a593Smuzhiyun sample = false;
689*4882a593Smuzhiyun if (mi->total_packets_cur < SAMPLE_SWITCH_THR && mp->sample_switch != 1)
690*4882a593Smuzhiyun sample = false;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (mi->ampdu_packets > 0) {
693*4882a593Smuzhiyun if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
694*4882a593Smuzhiyun mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
695*4882a593Smuzhiyun MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets),
696*4882a593Smuzhiyun EWMA_LEVEL);
697*4882a593Smuzhiyun else
698*4882a593Smuzhiyun mi->avg_ampdu_len = 0;
699*4882a593Smuzhiyun mi->ampdu_len = 0;
700*4882a593Smuzhiyun mi->ampdu_packets = 0;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun mi->sample_slow = 0;
704*4882a593Smuzhiyun mi->sample_count = 0;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
707*4882a593Smuzhiyun memset(tmp_cck_tp_rate, 0, sizeof(tmp_cck_tp_rate));
708*4882a593Smuzhiyun if (mi->supported[MINSTREL_CCK_GROUP])
709*4882a593Smuzhiyun for (j = 0; j < ARRAY_SIZE(tmp_cck_tp_rate); j++)
710*4882a593Smuzhiyun tmp_cck_tp_rate[j] = MINSTREL_CCK_GROUP * MCS_GROUP_RATES;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (mi->supported[MINSTREL_VHT_GROUP_0])
713*4882a593Smuzhiyun index = MINSTREL_VHT_GROUP_0 * MCS_GROUP_RATES;
714*4882a593Smuzhiyun else
715*4882a593Smuzhiyun index = MINSTREL_HT_GROUP_0 * MCS_GROUP_RATES;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++)
718*4882a593Smuzhiyun tmp_mcs_tp_rate[j] = index;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* Find best rate sets within all MCS groups*/
721*4882a593Smuzhiyun for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun mg = &mi->groups[group];
724*4882a593Smuzhiyun if (!mi->supported[group])
725*4882a593Smuzhiyun continue;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun mi->sample_count++;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* (re)Initialize group rate indexes */
730*4882a593Smuzhiyun for(j = 0; j < MAX_THR_RATES; j++)
731*4882a593Smuzhiyun tmp_group_tp_rate[j] = MCS_GROUP_RATES * group;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun for (i = 0; i < MCS_GROUP_RATES; i++) {
734*4882a593Smuzhiyun if (!(mi->supported[group] & BIT(i)))
735*4882a593Smuzhiyun continue;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun index = MCS_GROUP_RATES * group + i;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun mrs = &mg->rates[i];
740*4882a593Smuzhiyun mrs->retry_updated = false;
741*4882a593Smuzhiyun minstrel_calc_rate_stats(mp, mrs);
742*4882a593Smuzhiyun cur_prob = mrs->prob_avg;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
745*4882a593Smuzhiyun continue;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* Find max throughput rate set */
748*4882a593Smuzhiyun if (group != MINSTREL_CCK_GROUP) {
749*4882a593Smuzhiyun minstrel_ht_sort_best_tp_rates(mi, index,
750*4882a593Smuzhiyun tmp_mcs_tp_rate);
751*4882a593Smuzhiyun } else if (group == MINSTREL_CCK_GROUP) {
752*4882a593Smuzhiyun minstrel_ht_sort_best_tp_rates(mi, index,
753*4882a593Smuzhiyun tmp_cck_tp_rate);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* Find max throughput rate set within a group */
757*4882a593Smuzhiyun minstrel_ht_sort_best_tp_rates(mi, index,
758*4882a593Smuzhiyun tmp_group_tp_rate);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /* Find max probability rate per group and global */
761*4882a593Smuzhiyun minstrel_ht_set_best_prob_rate(mi, index);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
765*4882a593Smuzhiyun sizeof(mg->max_group_tp_rate));
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* Assign new rate set per sta */
769*4882a593Smuzhiyun minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
770*4882a593Smuzhiyun memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /* Try to increase robustness of max_prob_rate*/
773*4882a593Smuzhiyun minstrel_ht_prob_rate_reduce_streams(mi);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* try to sample all available rates during each interval */
776*4882a593Smuzhiyun mi->sample_count *= 8;
777*4882a593Smuzhiyun if (mp->new_avg)
778*4882a593Smuzhiyun mi->sample_count /= 2;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (sample)
781*4882a593Smuzhiyun minstrel_ht_rate_sample_switch(mp, mi);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_DEBUGFS
784*4882a593Smuzhiyun /* use fixed index if set */
785*4882a593Smuzhiyun if (mp->fixed_rate_idx != -1) {
786*4882a593Smuzhiyun for (i = 0; i < 4; i++)
787*4882a593Smuzhiyun mi->max_tp_rate[i] = mp->fixed_rate_idx;
788*4882a593Smuzhiyun mi->max_prob_rate = mp->fixed_rate_idx;
789*4882a593Smuzhiyun mi->sample_mode = MINSTREL_SAMPLE_IDLE;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun #endif
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* Reset update timer */
794*4882a593Smuzhiyun mi->last_stats_update = jiffies;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun static bool
minstrel_ht_txstat_valid(struct minstrel_priv * mp,struct ieee80211_tx_rate * rate)798*4882a593Smuzhiyun minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun if (rate->idx < 0)
801*4882a593Smuzhiyun return false;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (!rate->count)
804*4882a593Smuzhiyun return false;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (rate->flags & IEEE80211_TX_RC_MCS ||
807*4882a593Smuzhiyun rate->flags & IEEE80211_TX_RC_VHT_MCS)
808*4882a593Smuzhiyun return true;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return rate->idx == mp->cck_rates[0] ||
811*4882a593Smuzhiyun rate->idx == mp->cck_rates[1] ||
812*4882a593Smuzhiyun rate->idx == mp->cck_rates[2] ||
813*4882a593Smuzhiyun rate->idx == mp->cck_rates[3];
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun static void
minstrel_set_next_sample_idx(struct minstrel_ht_sta * mi)817*4882a593Smuzhiyun minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct minstrel_mcs_group_data *mg;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun for (;;) {
822*4882a593Smuzhiyun mi->sample_group++;
823*4882a593Smuzhiyun mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
824*4882a593Smuzhiyun mg = &mi->groups[mi->sample_group];
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (!mi->supported[mi->sample_group])
827*4882a593Smuzhiyun continue;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (++mg->index >= MCS_GROUP_RATES) {
830*4882a593Smuzhiyun mg->index = 0;
831*4882a593Smuzhiyun if (++mg->column >= ARRAY_SIZE(sample_table))
832*4882a593Smuzhiyun mg->column = 0;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun break;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun static void
minstrel_downgrade_rate(struct minstrel_ht_sta * mi,u16 * idx,bool primary)839*4882a593Smuzhiyun minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun int group, orig_group;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun orig_group = group = *idx / MCS_GROUP_RATES;
844*4882a593Smuzhiyun while (group > 0) {
845*4882a593Smuzhiyun group--;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (!mi->supported[group])
848*4882a593Smuzhiyun continue;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (minstrel_mcs_groups[group].streams >
851*4882a593Smuzhiyun minstrel_mcs_groups[orig_group].streams)
852*4882a593Smuzhiyun continue;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (primary)
855*4882a593Smuzhiyun *idx = mi->groups[group].max_group_tp_rate[0];
856*4882a593Smuzhiyun else
857*4882a593Smuzhiyun *idx = mi->groups[group].max_group_tp_rate[1];
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun static void
minstrel_aggr_check(struct ieee80211_sta * pubsta,struct sk_buff * skb)863*4882a593Smuzhiyun minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
866*4882a593Smuzhiyun struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
867*4882a593Smuzhiyun u16 tid;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
870*4882a593Smuzhiyun return;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
873*4882a593Smuzhiyun return;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
876*4882a593Smuzhiyun return;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun tid = ieee80211_get_tid(hdr);
879*4882a593Smuzhiyun if (likely(sta->ampdu_mlme.tid_tx[tid]))
880*4882a593Smuzhiyun return;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun ieee80211_start_tx_ba_session(pubsta, tid, 0);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun static void
minstrel_ht_tx_status(void * priv,struct ieee80211_supported_band * sband,void * priv_sta,struct ieee80211_tx_status * st)886*4882a593Smuzhiyun minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
887*4882a593Smuzhiyun void *priv_sta, struct ieee80211_tx_status *st)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun struct ieee80211_tx_info *info = st->info;
890*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp = priv_sta;
891*4882a593Smuzhiyun struct minstrel_ht_sta *mi = &msp->ht;
892*4882a593Smuzhiyun struct ieee80211_tx_rate *ar = info->status.rates;
893*4882a593Smuzhiyun struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
894*4882a593Smuzhiyun struct minstrel_priv *mp = priv;
895*4882a593Smuzhiyun u32 update_interval = mp->update_interval / 2;
896*4882a593Smuzhiyun bool last, update = false;
897*4882a593Smuzhiyun bool sample_status = false;
898*4882a593Smuzhiyun int i;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (!msp->is_ht)
901*4882a593Smuzhiyun return mac80211_minstrel.tx_status_ext(priv, sband,
902*4882a593Smuzhiyun &msp->legacy, st);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun /* This packet was aggregated but doesn't carry status info */
906*4882a593Smuzhiyun if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
907*4882a593Smuzhiyun !(info->flags & IEEE80211_TX_STAT_AMPDU))
908*4882a593Smuzhiyun return;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
911*4882a593Smuzhiyun info->status.ampdu_ack_len =
912*4882a593Smuzhiyun (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
913*4882a593Smuzhiyun info->status.ampdu_len = 1;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun mi->ampdu_packets++;
917*4882a593Smuzhiyun mi->ampdu_len += info->status.ampdu_len;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
920*4882a593Smuzhiyun int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun mi->sample_wait = 16 + 2 * avg_ampdu_len;
923*4882a593Smuzhiyun mi->sample_tries = 1;
924*4882a593Smuzhiyun mi->sample_count--;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
928*4882a593Smuzhiyun mi->sample_packets += info->status.ampdu_len;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
931*4882a593Smuzhiyun rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun last = !minstrel_ht_txstat_valid(mp, &ar[0]);
934*4882a593Smuzhiyun for (i = 0; !last; i++) {
935*4882a593Smuzhiyun last = (i == IEEE80211_TX_MAX_RATES - 1) ||
936*4882a593Smuzhiyun !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
939*4882a593Smuzhiyun if (rate == rate_sample)
940*4882a593Smuzhiyun sample_status = true;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (last)
943*4882a593Smuzhiyun rate->success += info->status.ampdu_ack_len;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun rate->attempts += ar[i].count * info->status.ampdu_len;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun switch (mi->sample_mode) {
949*4882a593Smuzhiyun case MINSTREL_SAMPLE_IDLE:
950*4882a593Smuzhiyun if (mp->new_avg &&
951*4882a593Smuzhiyun (mp->hw->max_rates > 1 ||
952*4882a593Smuzhiyun mi->total_packets_cur < SAMPLE_SWITCH_THR))
953*4882a593Smuzhiyun update_interval /= 2;
954*4882a593Smuzhiyun break;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun case MINSTREL_SAMPLE_ACTIVE:
957*4882a593Smuzhiyun if (!sample_status)
958*4882a593Smuzhiyun break;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun mi->sample_mode = MINSTREL_SAMPLE_PENDING;
961*4882a593Smuzhiyun update = true;
962*4882a593Smuzhiyun break;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun case MINSTREL_SAMPLE_PENDING:
965*4882a593Smuzhiyun if (sample_status)
966*4882a593Smuzhiyun break;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun update = true;
969*4882a593Smuzhiyun minstrel_ht_update_stats(mp, mi, false);
970*4882a593Smuzhiyun break;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun if (mp->hw->max_rates > 1) {
975*4882a593Smuzhiyun /*
976*4882a593Smuzhiyun * check for sudden death of spatial multiplexing,
977*4882a593Smuzhiyun * downgrade to a lower number of streams if necessary.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
980*4882a593Smuzhiyun if (rate->attempts > 30 &&
981*4882a593Smuzhiyun rate->success < rate->attempts / 4) {
982*4882a593Smuzhiyun minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
983*4882a593Smuzhiyun update = true;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
987*4882a593Smuzhiyun if (rate2->attempts > 30 &&
988*4882a593Smuzhiyun rate2->success < rate2->attempts / 4) {
989*4882a593Smuzhiyun minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
990*4882a593Smuzhiyun update = true;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (time_after(jiffies, mi->last_stats_update + update_interval)) {
995*4882a593Smuzhiyun update = true;
996*4882a593Smuzhiyun minstrel_ht_update_stats(mp, mi, true);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (update)
1000*4882a593Smuzhiyun minstrel_ht_update_rates(mp, mi);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun static void
minstrel_calc_retransmit(struct minstrel_priv * mp,struct minstrel_ht_sta * mi,int index)1004*4882a593Smuzhiyun minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
1005*4882a593Smuzhiyun int index)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
1008*4882a593Smuzhiyun unsigned int tx_time, tx_time_rtscts, tx_time_data;
1009*4882a593Smuzhiyun unsigned int cw = mp->cw_min;
1010*4882a593Smuzhiyun unsigned int ctime = 0;
1011*4882a593Smuzhiyun unsigned int t_slot = 9; /* FIXME */
1012*4882a593Smuzhiyun unsigned int ampdu_len = minstrel_ht_avg_ampdu_len(mi);
1013*4882a593Smuzhiyun unsigned int overhead = 0, overhead_rtscts = 0;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun mrs = minstrel_get_ratestats(mi, index);
1016*4882a593Smuzhiyun if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
1017*4882a593Smuzhiyun mrs->retry_count = 1;
1018*4882a593Smuzhiyun mrs->retry_count_rtscts = 1;
1019*4882a593Smuzhiyun return;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun mrs->retry_count = 2;
1023*4882a593Smuzhiyun mrs->retry_count_rtscts = 2;
1024*4882a593Smuzhiyun mrs->retry_updated = true;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun /* Contention time for first 2 tries */
1029*4882a593Smuzhiyun ctime = (t_slot * cw) >> 1;
1030*4882a593Smuzhiyun cw = min((cw << 1) | 1, mp->cw_max);
1031*4882a593Smuzhiyun ctime += (t_slot * cw) >> 1;
1032*4882a593Smuzhiyun cw = min((cw << 1) | 1, mp->cw_max);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
1035*4882a593Smuzhiyun overhead = mi->overhead;
1036*4882a593Smuzhiyun overhead_rtscts = mi->overhead_rtscts;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* Total TX time for data and Contention after first 2 tries */
1040*4882a593Smuzhiyun tx_time = ctime + 2 * (overhead + tx_time_data);
1041*4882a593Smuzhiyun tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* See how many more tries we can fit inside segment size */
1044*4882a593Smuzhiyun do {
1045*4882a593Smuzhiyun /* Contention time for this try */
1046*4882a593Smuzhiyun ctime = (t_slot * cw) >> 1;
1047*4882a593Smuzhiyun cw = min((cw << 1) | 1, mp->cw_max);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* Total TX time after this try */
1050*4882a593Smuzhiyun tx_time += ctime + overhead + tx_time_data;
1051*4882a593Smuzhiyun tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (tx_time_rtscts < mp->segment_size)
1054*4882a593Smuzhiyun mrs->retry_count_rtscts++;
1055*4882a593Smuzhiyun } while ((tx_time < mp->segment_size) &&
1056*4882a593Smuzhiyun (++mrs->retry_count < mp->max_retry));
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun static void
minstrel_ht_set_rate(struct minstrel_priv * mp,struct minstrel_ht_sta * mi,struct ieee80211_sta_rates * ratetbl,int offset,int index)1061*4882a593Smuzhiyun minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
1062*4882a593Smuzhiyun struct ieee80211_sta_rates *ratetbl, int offset, int index)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
1065*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
1066*4882a593Smuzhiyun u8 idx;
1067*4882a593Smuzhiyun u16 flags = group->flags;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun mrs = minstrel_get_ratestats(mi, index);
1070*4882a593Smuzhiyun if (!mrs->retry_updated)
1071*4882a593Smuzhiyun minstrel_calc_retransmit(mp, mi, index);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
1074*4882a593Smuzhiyun ratetbl->rate[offset].count = 2;
1075*4882a593Smuzhiyun ratetbl->rate[offset].count_rts = 2;
1076*4882a593Smuzhiyun ratetbl->rate[offset].count_cts = 2;
1077*4882a593Smuzhiyun } else {
1078*4882a593Smuzhiyun ratetbl->rate[offset].count = mrs->retry_count;
1079*4882a593Smuzhiyun ratetbl->rate[offset].count_cts = mrs->retry_count;
1080*4882a593Smuzhiyun ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP)
1084*4882a593Smuzhiyun idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
1085*4882a593Smuzhiyun else if (flags & IEEE80211_TX_RC_VHT_MCS)
1086*4882a593Smuzhiyun idx = ((group->streams - 1) << 4) |
1087*4882a593Smuzhiyun ((index % MCS_GROUP_RATES) & 0xF);
1088*4882a593Smuzhiyun else
1089*4882a593Smuzhiyun idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* enable RTS/CTS if needed:
1092*4882a593Smuzhiyun * - if station is in dynamic SMPS (and streams > 1)
1093*4882a593Smuzhiyun * - for fallback rates, to increase chances of getting through
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun if (offset > 0 ||
1096*4882a593Smuzhiyun (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
1097*4882a593Smuzhiyun group->streams > 1)) {
1098*4882a593Smuzhiyun ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
1099*4882a593Smuzhiyun flags |= IEEE80211_TX_RC_USE_RTS_CTS;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun ratetbl->rate[offset].idx = idx;
1103*4882a593Smuzhiyun ratetbl->rate[offset].flags = flags;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun static inline int
minstrel_ht_get_prob_avg(struct minstrel_ht_sta * mi,int rate)1107*4882a593Smuzhiyun minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun int group = rate / MCS_GROUP_RATES;
1110*4882a593Smuzhiyun rate %= MCS_GROUP_RATES;
1111*4882a593Smuzhiyun return mi->groups[group].rates[rate].prob_avg;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun static int
minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta * mi)1115*4882a593Smuzhiyun minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun int group = mi->max_prob_rate / MCS_GROUP_RATES;
1118*4882a593Smuzhiyun const struct mcs_group *g = &minstrel_mcs_groups[group];
1119*4882a593Smuzhiyun int rate = mi->max_prob_rate % MCS_GROUP_RATES;
1120*4882a593Smuzhiyun unsigned int duration;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Disable A-MSDU if max_prob_rate is bad */
1123*4882a593Smuzhiyun if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
1124*4882a593Smuzhiyun return 1;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun duration = g->duration[rate];
1127*4882a593Smuzhiyun duration <<= g->shift;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
1130*4882a593Smuzhiyun if (duration > MCS_DURATION(1, 0, 52))
1131*4882a593Smuzhiyun return 500;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * If the rate is slower than single-stream MCS4, limit A-MSDU to usual
1135*4882a593Smuzhiyun * data packet size
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun if (duration > MCS_DURATION(1, 0, 104))
1138*4882a593Smuzhiyun return 1600;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /*
1141*4882a593Smuzhiyun * If the rate is slower than single-stream MCS7, or if the max throughput
1142*4882a593Smuzhiyun * rate success probability is less than 75%, limit A-MSDU to twice the usual
1143*4882a593Smuzhiyun * data packet size
1144*4882a593Smuzhiyun */
1145*4882a593Smuzhiyun if (duration > MCS_DURATION(1, 0, 260) ||
1146*4882a593Smuzhiyun (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
1147*4882a593Smuzhiyun MINSTREL_FRAC(75, 100)))
1148*4882a593Smuzhiyun return 3200;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun /*
1151*4882a593Smuzhiyun * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
1152*4882a593Smuzhiyun * Since aggregation sessions are started/stopped without txq flush, use
1153*4882a593Smuzhiyun * the limit here to avoid the complexity of having to de-aggregate
1154*4882a593Smuzhiyun * packets in the queue.
1155*4882a593Smuzhiyun */
1156*4882a593Smuzhiyun if (!mi->sta->vht_cap.vht_supported)
1157*4882a593Smuzhiyun return IEEE80211_MAX_MPDU_LEN_HT_BA;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* unlimited */
1160*4882a593Smuzhiyun return 0;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun static void
minstrel_ht_update_rates(struct minstrel_priv * mp,struct minstrel_ht_sta * mi)1164*4882a593Smuzhiyun minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun struct ieee80211_sta_rates *rates;
1167*4882a593Smuzhiyun u16 first_rate = mi->max_tp_rate[0];
1168*4882a593Smuzhiyun int i = 0;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (mi->sample_mode == MINSTREL_SAMPLE_ACTIVE)
1171*4882a593Smuzhiyun first_rate = mi->sample_rate;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
1174*4882a593Smuzhiyun if (!rates)
1175*4882a593Smuzhiyun return;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /* Start with max_tp_rate[0] */
1178*4882a593Smuzhiyun minstrel_ht_set_rate(mp, mi, rates, i++, first_rate);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (mp->hw->max_rates >= 3) {
1181*4882a593Smuzhiyun /* At least 3 tx rates supported, use max_tp_rate[1] next */
1182*4882a593Smuzhiyun minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun if (mp->hw->max_rates >= 2) {
1186*4882a593Smuzhiyun minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
1190*4882a593Smuzhiyun rates->rate[i].idx = -1;
1191*4882a593Smuzhiyun rate_control_set_rates(mp->hw, mi->sta, rates);
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun static int
minstrel_get_sample_rate(struct minstrel_priv * mp,struct minstrel_ht_sta * mi)1195*4882a593Smuzhiyun minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun struct minstrel_rate_stats *mrs;
1198*4882a593Smuzhiyun struct minstrel_mcs_group_data *mg;
1199*4882a593Smuzhiyun unsigned int sample_dur, sample_group, cur_max_tp_streams;
1200*4882a593Smuzhiyun int tp_rate1, tp_rate2;
1201*4882a593Smuzhiyun int sample_idx = 0;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (mp->hw->max_rates == 1 && mp->sample_switch &&
1204*4882a593Smuzhiyun (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
1205*4882a593Smuzhiyun mp->sample_switch == 1))
1206*4882a593Smuzhiyun return -1;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (mi->sample_wait > 0) {
1209*4882a593Smuzhiyun mi->sample_wait--;
1210*4882a593Smuzhiyun return -1;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (!mi->sample_tries)
1214*4882a593Smuzhiyun return -1;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun sample_group = mi->sample_group;
1217*4882a593Smuzhiyun mg = &mi->groups[sample_group];
1218*4882a593Smuzhiyun sample_idx = sample_table[mg->column][mg->index];
1219*4882a593Smuzhiyun minstrel_set_next_sample_idx(mi);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (!(mi->supported[sample_group] & BIT(sample_idx)))
1222*4882a593Smuzhiyun return -1;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun mrs = &mg->rates[sample_idx];
1225*4882a593Smuzhiyun sample_idx += sample_group * MCS_GROUP_RATES;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */
1228*4882a593Smuzhiyun if (minstrel_get_duration(mi->max_tp_rate[0]) >
1229*4882a593Smuzhiyun minstrel_get_duration(mi->max_tp_rate[1])) {
1230*4882a593Smuzhiyun tp_rate1 = mi->max_tp_rate[1];
1231*4882a593Smuzhiyun tp_rate2 = mi->max_tp_rate[0];
1232*4882a593Smuzhiyun } else {
1233*4882a593Smuzhiyun tp_rate1 = mi->max_tp_rate[0];
1234*4882a593Smuzhiyun tp_rate2 = mi->max_tp_rate[1];
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /*
1238*4882a593Smuzhiyun * Sampling might add some overhead (RTS, no aggregation)
1239*4882a593Smuzhiyun * to the frame. Hence, don't use sampling for the highest currently
1240*4882a593Smuzhiyun * used highest throughput or probability rate.
1241*4882a593Smuzhiyun */
1242*4882a593Smuzhiyun if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
1243*4882a593Smuzhiyun return -1;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun /*
1246*4882a593Smuzhiyun * Do not sample if the probability is already higher than 95%,
1247*4882a593Smuzhiyun * or if the rate is 3 times slower than the current max probability
1248*4882a593Smuzhiyun * rate, to avoid wasting airtime.
1249*4882a593Smuzhiyun */
1250*4882a593Smuzhiyun sample_dur = minstrel_get_duration(sample_idx);
1251*4882a593Smuzhiyun if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
1252*4882a593Smuzhiyun minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
1253*4882a593Smuzhiyun return -1;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /*
1257*4882a593Smuzhiyun * For devices with no configurable multi-rate retry, skip sampling
1258*4882a593Smuzhiyun * below the per-group max throughput rate, and only use one sampling
1259*4882a593Smuzhiyun * attempt per rate
1260*4882a593Smuzhiyun */
1261*4882a593Smuzhiyun if (mp->hw->max_rates == 1 &&
1262*4882a593Smuzhiyun (minstrel_get_duration(mg->max_group_tp_rate[0]) < sample_dur ||
1263*4882a593Smuzhiyun mrs->attempts))
1264*4882a593Smuzhiyun return -1;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun /* Skip already sampled slow rates */
1267*4882a593Smuzhiyun if (sample_dur >= minstrel_get_duration(tp_rate1) && mrs->attempts)
1268*4882a593Smuzhiyun return -1;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun /*
1271*4882a593Smuzhiyun * Make sure that lower rates get sampled only occasionally,
1272*4882a593Smuzhiyun * if the link is working perfectly.
1273*4882a593Smuzhiyun */
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
1276*4882a593Smuzhiyun MCS_GROUP_RATES].streams;
1277*4882a593Smuzhiyun if (sample_dur >= minstrel_get_duration(tp_rate2) &&
1278*4882a593Smuzhiyun (cur_max_tp_streams - 1 <
1279*4882a593Smuzhiyun minstrel_mcs_groups[sample_group].streams ||
1280*4882a593Smuzhiyun sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
1281*4882a593Smuzhiyun if (mrs->sample_skipped < 20)
1282*4882a593Smuzhiyun return -1;
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun if (mi->sample_slow++ > 2)
1285*4882a593Smuzhiyun return -1;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun mi->sample_tries--;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun return sample_idx;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun static void
minstrel_ht_get_rate(void * priv,struct ieee80211_sta * sta,void * priv_sta,struct ieee80211_tx_rate_control * txrc)1293*4882a593Smuzhiyun minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
1294*4882a593Smuzhiyun struct ieee80211_tx_rate_control *txrc)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun const struct mcs_group *sample_group;
1297*4882a593Smuzhiyun struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
1298*4882a593Smuzhiyun struct ieee80211_tx_rate *rate = &info->status.rates[0];
1299*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp = priv_sta;
1300*4882a593Smuzhiyun struct minstrel_ht_sta *mi = &msp->ht;
1301*4882a593Smuzhiyun struct minstrel_priv *mp = priv;
1302*4882a593Smuzhiyun int sample_idx;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun if (!msp->is_ht)
1305*4882a593Smuzhiyun return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
1308*4882a593Smuzhiyun mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
1309*4882a593Smuzhiyun minstrel_aggr_check(sta, txrc->skb);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun info->flags |= mi->tx_flags;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_DEBUGFS
1314*4882a593Smuzhiyun if (mp->fixed_rate_idx != -1)
1315*4882a593Smuzhiyun return;
1316*4882a593Smuzhiyun #endif
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun /* Don't use EAPOL frames for sampling on non-mrr hw */
1319*4882a593Smuzhiyun if (mp->hw->max_rates == 1 &&
1320*4882a593Smuzhiyun (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
1321*4882a593Smuzhiyun sample_idx = -1;
1322*4882a593Smuzhiyun else
1323*4882a593Smuzhiyun sample_idx = minstrel_get_sample_rate(mp, mi);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun mi->total_packets++;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun /* wraparound */
1328*4882a593Smuzhiyun if (mi->total_packets == ~0) {
1329*4882a593Smuzhiyun mi->total_packets = 0;
1330*4882a593Smuzhiyun mi->sample_packets = 0;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun if (sample_idx < 0)
1334*4882a593Smuzhiyun return;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
1337*4882a593Smuzhiyun sample_idx %= MCS_GROUP_RATES;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
1340*4882a593Smuzhiyun (sample_idx >= 4) != txrc->short_preamble)
1341*4882a593Smuzhiyun return;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1344*4882a593Smuzhiyun rate->count = 1;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
1347*4882a593Smuzhiyun int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
1348*4882a593Smuzhiyun rate->idx = mp->cck_rates[idx];
1349*4882a593Smuzhiyun } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
1350*4882a593Smuzhiyun ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
1351*4882a593Smuzhiyun sample_group->streams);
1352*4882a593Smuzhiyun } else {
1353*4882a593Smuzhiyun rate->idx = sample_idx + (sample_group->streams - 1) * 8;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun rate->flags = sample_group->flags;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun static void
minstrel_ht_update_cck(struct minstrel_priv * mp,struct minstrel_ht_sta * mi,struct ieee80211_supported_band * sband,struct ieee80211_sta * sta)1360*4882a593Smuzhiyun minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
1361*4882a593Smuzhiyun struct ieee80211_supported_band *sband,
1362*4882a593Smuzhiyun struct ieee80211_sta *sta)
1363*4882a593Smuzhiyun {
1364*4882a593Smuzhiyun int i;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun if (sband->band != NL80211_BAND_2GHZ)
1367*4882a593Smuzhiyun return;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
1370*4882a593Smuzhiyun return;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun mi->cck_supported = 0;
1373*4882a593Smuzhiyun mi->cck_supported_short = 0;
1374*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
1375*4882a593Smuzhiyun if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
1376*4882a593Smuzhiyun continue;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun mi->cck_supported |= BIT(i);
1379*4882a593Smuzhiyun if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
1380*4882a593Smuzhiyun mi->cck_supported_short |= BIT(i);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun static void
minstrel_ht_update_caps(void * priv,struct ieee80211_supported_band * sband,struct cfg80211_chan_def * chandef,struct ieee80211_sta * sta,void * priv_sta)1387*4882a593Smuzhiyun minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
1388*4882a593Smuzhiyun struct cfg80211_chan_def *chandef,
1389*4882a593Smuzhiyun struct ieee80211_sta *sta, void *priv_sta)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct minstrel_priv *mp = priv;
1392*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp = priv_sta;
1393*4882a593Smuzhiyun struct minstrel_ht_sta *mi = &msp->ht;
1394*4882a593Smuzhiyun struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
1395*4882a593Smuzhiyun u16 ht_cap = sta->ht_cap.cap;
1396*4882a593Smuzhiyun struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
1397*4882a593Smuzhiyun int use_vht;
1398*4882a593Smuzhiyun int n_supported = 0;
1399*4882a593Smuzhiyun int ack_dur;
1400*4882a593Smuzhiyun int stbc;
1401*4882a593Smuzhiyun int i;
1402*4882a593Smuzhiyun bool ldpc;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* fall back to the old minstrel for legacy stations */
1405*4882a593Smuzhiyun if (!sta->ht_cap.ht_supported)
1406*4882a593Smuzhiyun goto use_legacy;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun if (vht_cap->vht_supported)
1411*4882a593Smuzhiyun use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
1412*4882a593Smuzhiyun else
1413*4882a593Smuzhiyun use_vht = 0;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun msp->is_ht = true;
1416*4882a593Smuzhiyun memset(mi, 0, sizeof(*mi));
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun mi->sta = sta;
1419*4882a593Smuzhiyun mi->last_stats_update = jiffies;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
1422*4882a593Smuzhiyun mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
1423*4882a593Smuzhiyun mi->overhead += ack_dur;
1424*4882a593Smuzhiyun mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun /* When using MRR, sample more on the first attempt, without delay */
1429*4882a593Smuzhiyun if (mp->has_mrr) {
1430*4882a593Smuzhiyun mi->sample_count = 16;
1431*4882a593Smuzhiyun mi->sample_wait = 0;
1432*4882a593Smuzhiyun } else {
1433*4882a593Smuzhiyun mi->sample_count = 8;
1434*4882a593Smuzhiyun mi->sample_wait = 8;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun mi->sample_tries = 4;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if (!use_vht) {
1439*4882a593Smuzhiyun stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
1440*4882a593Smuzhiyun IEEE80211_HT_CAP_RX_STBC_SHIFT;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
1443*4882a593Smuzhiyun } else {
1444*4882a593Smuzhiyun stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
1445*4882a593Smuzhiyun IEEE80211_VHT_CAP_RXSTBC_SHIFT;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
1451*4882a593Smuzhiyun if (ldpc)
1452*4882a593Smuzhiyun mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
1455*4882a593Smuzhiyun u32 gflags = minstrel_mcs_groups[i].flags;
1456*4882a593Smuzhiyun int bw, nss;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun mi->supported[i] = 0;
1459*4882a593Smuzhiyun if (i == MINSTREL_CCK_GROUP) {
1460*4882a593Smuzhiyun minstrel_ht_update_cck(mp, mi, sband, sta);
1461*4882a593Smuzhiyun continue;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_SHORT_GI) {
1465*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1466*4882a593Smuzhiyun if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
1467*4882a593Smuzhiyun continue;
1468*4882a593Smuzhiyun } else {
1469*4882a593Smuzhiyun if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
1470*4882a593Smuzhiyun continue;
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
1475*4882a593Smuzhiyun sta->bandwidth < IEEE80211_STA_RX_BW_40)
1476*4882a593Smuzhiyun continue;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun nss = minstrel_mcs_groups[i].streams;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
1481*4882a593Smuzhiyun if (sta->smps_mode == IEEE80211_SMPS_STATIC && nss > 1)
1482*4882a593Smuzhiyun continue;
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun /* HT rate */
1485*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_MCS) {
1486*4882a593Smuzhiyun if (use_vht && minstrel_vht_only)
1487*4882a593Smuzhiyun continue;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun mi->supported[i] = mcs->rx_mask[nss - 1];
1490*4882a593Smuzhiyun if (mi->supported[i])
1491*4882a593Smuzhiyun n_supported++;
1492*4882a593Smuzhiyun continue;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun /* VHT rate */
1496*4882a593Smuzhiyun if (!vht_cap->vht_supported ||
1497*4882a593Smuzhiyun WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) ||
1498*4882a593Smuzhiyun WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH))
1499*4882a593Smuzhiyun continue;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
1502*4882a593Smuzhiyun if (sta->bandwidth < IEEE80211_STA_RX_BW_80 ||
1503*4882a593Smuzhiyun ((gflags & IEEE80211_TX_RC_SHORT_GI) &&
1504*4882a593Smuzhiyun !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
1505*4882a593Smuzhiyun continue;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1510*4882a593Smuzhiyun bw = BW_40;
1511*4882a593Smuzhiyun else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
1512*4882a593Smuzhiyun bw = BW_80;
1513*4882a593Smuzhiyun else
1514*4882a593Smuzhiyun bw = BW_20;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
1517*4882a593Smuzhiyun vht_cap->vht_mcs.tx_mcs_map);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun if (mi->supported[i])
1520*4882a593Smuzhiyun n_supported++;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun if (!n_supported)
1524*4882a593Smuzhiyun goto use_legacy;
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /* create an initial rate table with the lowest supported rates */
1529*4882a593Smuzhiyun minstrel_ht_update_stats(mp, mi, true);
1530*4882a593Smuzhiyun minstrel_ht_update_rates(mp, mi);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun return;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun use_legacy:
1535*4882a593Smuzhiyun msp->is_ht = false;
1536*4882a593Smuzhiyun memset(&msp->legacy, 0, sizeof(msp->legacy));
1537*4882a593Smuzhiyun msp->legacy.r = msp->ratelist;
1538*4882a593Smuzhiyun msp->legacy.sample_table = msp->sample_table;
1539*4882a593Smuzhiyun return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
1540*4882a593Smuzhiyun &msp->legacy);
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun static void
minstrel_ht_rate_init(void * priv,struct ieee80211_supported_band * sband,struct cfg80211_chan_def * chandef,struct ieee80211_sta * sta,void * priv_sta)1544*4882a593Smuzhiyun minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
1545*4882a593Smuzhiyun struct cfg80211_chan_def *chandef,
1546*4882a593Smuzhiyun struct ieee80211_sta *sta, void *priv_sta)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun static void
minstrel_ht_rate_update(void * priv,struct ieee80211_supported_band * sband,struct cfg80211_chan_def * chandef,struct ieee80211_sta * sta,void * priv_sta,u32 changed)1552*4882a593Smuzhiyun minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
1553*4882a593Smuzhiyun struct cfg80211_chan_def *chandef,
1554*4882a593Smuzhiyun struct ieee80211_sta *sta, void *priv_sta,
1555*4882a593Smuzhiyun u32 changed)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun static void *
minstrel_ht_alloc_sta(void * priv,struct ieee80211_sta * sta,gfp_t gfp)1561*4882a593Smuzhiyun minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
1564*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp;
1565*4882a593Smuzhiyun struct minstrel_priv *mp = priv;
1566*4882a593Smuzhiyun struct ieee80211_hw *hw = mp->hw;
1567*4882a593Smuzhiyun int max_rates = 0;
1568*4882a593Smuzhiyun int i;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun for (i = 0; i < NUM_NL80211_BANDS; i++) {
1571*4882a593Smuzhiyun sband = hw->wiphy->bands[i];
1572*4882a593Smuzhiyun if (sband && sband->n_bitrates > max_rates)
1573*4882a593Smuzhiyun max_rates = sband->n_bitrates;
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun msp = kzalloc(sizeof(*msp), gfp);
1577*4882a593Smuzhiyun if (!msp)
1578*4882a593Smuzhiyun return NULL;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun msp->ratelist = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
1581*4882a593Smuzhiyun if (!msp->ratelist)
1582*4882a593Smuzhiyun goto error;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun msp->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
1585*4882a593Smuzhiyun if (!msp->sample_table)
1586*4882a593Smuzhiyun goto error1;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun return msp;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun error1:
1591*4882a593Smuzhiyun kfree(msp->ratelist);
1592*4882a593Smuzhiyun error:
1593*4882a593Smuzhiyun kfree(msp);
1594*4882a593Smuzhiyun return NULL;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun static void
minstrel_ht_free_sta(void * priv,struct ieee80211_sta * sta,void * priv_sta)1598*4882a593Smuzhiyun minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp = priv_sta;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun kfree(msp->sample_table);
1603*4882a593Smuzhiyun kfree(msp->ratelist);
1604*4882a593Smuzhiyun kfree(msp);
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun static void
minstrel_ht_init_cck_rates(struct minstrel_priv * mp)1608*4882a593Smuzhiyun minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun static const int bitrates[4] = { 10, 20, 55, 110 };
1611*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
1612*4882a593Smuzhiyun u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
1613*4882a593Smuzhiyun int i, j;
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
1616*4882a593Smuzhiyun if (!sband)
1617*4882a593Smuzhiyun return;
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun for (i = 0; i < sband->n_bitrates; i++) {
1620*4882a593Smuzhiyun struct ieee80211_rate *rate = &sband->bitrates[i];
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun if (rate->flags & IEEE80211_RATE_ERP_G)
1623*4882a593Smuzhiyun continue;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
1626*4882a593Smuzhiyun continue;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
1629*4882a593Smuzhiyun if (rate->bitrate != bitrates[j])
1630*4882a593Smuzhiyun continue;
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun mp->cck_rates[j] = i;
1633*4882a593Smuzhiyun break;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun static void *
minstrel_ht_alloc(struct ieee80211_hw * hw)1639*4882a593Smuzhiyun minstrel_ht_alloc(struct ieee80211_hw *hw)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun struct minstrel_priv *mp;
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
1644*4882a593Smuzhiyun if (!mp)
1645*4882a593Smuzhiyun return NULL;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun mp->sample_switch = -1;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /* contention window settings
1650*4882a593Smuzhiyun * Just an approximation. Using the per-queue values would complicate
1651*4882a593Smuzhiyun * the calculations and is probably unnecessary */
1652*4882a593Smuzhiyun mp->cw_min = 15;
1653*4882a593Smuzhiyun mp->cw_max = 1023;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /* number of packets (in %) to use for sampling other rates
1656*4882a593Smuzhiyun * sample less often for non-mrr packets, because the overhead
1657*4882a593Smuzhiyun * is much higher than with mrr */
1658*4882a593Smuzhiyun mp->lookaround_rate = 5;
1659*4882a593Smuzhiyun mp->lookaround_rate_mrr = 10;
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun /* maximum time that the hw is allowed to stay in one MRR segment */
1662*4882a593Smuzhiyun mp->segment_size = 6000;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if (hw->max_rate_tries > 0)
1665*4882a593Smuzhiyun mp->max_retry = hw->max_rate_tries;
1666*4882a593Smuzhiyun else
1667*4882a593Smuzhiyun /* safe default, does not necessarily have to match hw properties */
1668*4882a593Smuzhiyun mp->max_retry = 7;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun if (hw->max_rates >= 4)
1671*4882a593Smuzhiyun mp->has_mrr = true;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun mp->hw = hw;
1674*4882a593Smuzhiyun mp->update_interval = HZ / 10;
1675*4882a593Smuzhiyun mp->new_avg = true;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun minstrel_ht_init_cck_rates(mp);
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun return mp;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_DEBUGFS
minstrel_ht_add_debugfs(struct ieee80211_hw * hw,void * priv,struct dentry * debugfsdir)1683*4882a593Smuzhiyun static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv,
1684*4882a593Smuzhiyun struct dentry *debugfsdir)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun struct minstrel_priv *mp = priv;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun mp->fixed_rate_idx = (u32) -1;
1689*4882a593Smuzhiyun debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
1690*4882a593Smuzhiyun &mp->fixed_rate_idx);
1691*4882a593Smuzhiyun debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
1692*4882a593Smuzhiyun &mp->sample_switch);
1693*4882a593Smuzhiyun debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
1694*4882a593Smuzhiyun &mp->new_avg);
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun #endif
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun static void
minstrel_ht_free(void * priv)1699*4882a593Smuzhiyun minstrel_ht_free(void *priv)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun kfree(priv);
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun
minstrel_ht_get_expected_throughput(void * priv_sta)1704*4882a593Smuzhiyun static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun struct minstrel_ht_sta_priv *msp = priv_sta;
1707*4882a593Smuzhiyun struct minstrel_ht_sta *mi = &msp->ht;
1708*4882a593Smuzhiyun int i, j, prob, tp_avg;
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun if (!msp->is_ht)
1711*4882a593Smuzhiyun return mac80211_minstrel.get_expected_throughput(priv_sta);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
1714*4882a593Smuzhiyun j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
1715*4882a593Smuzhiyun prob = mi->groups[i].rates[j].prob_avg;
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun /* convert tp_avg from pkt per second in kbps */
1718*4882a593Smuzhiyun tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
1719*4882a593Smuzhiyun tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun return tp_avg;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun static const struct rate_control_ops mac80211_minstrel_ht = {
1725*4882a593Smuzhiyun .name = "minstrel_ht",
1726*4882a593Smuzhiyun .tx_status_ext = minstrel_ht_tx_status,
1727*4882a593Smuzhiyun .get_rate = minstrel_ht_get_rate,
1728*4882a593Smuzhiyun .rate_init = minstrel_ht_rate_init,
1729*4882a593Smuzhiyun .rate_update = minstrel_ht_rate_update,
1730*4882a593Smuzhiyun .alloc_sta = minstrel_ht_alloc_sta,
1731*4882a593Smuzhiyun .free_sta = minstrel_ht_free_sta,
1732*4882a593Smuzhiyun .alloc = minstrel_ht_alloc,
1733*4882a593Smuzhiyun .free = minstrel_ht_free,
1734*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_DEBUGFS
1735*4882a593Smuzhiyun .add_debugfs = minstrel_ht_add_debugfs,
1736*4882a593Smuzhiyun .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
1737*4882a593Smuzhiyun #endif
1738*4882a593Smuzhiyun .get_expected_throughput = minstrel_ht_get_expected_throughput,
1739*4882a593Smuzhiyun };
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun
init_sample_table(void)1742*4882a593Smuzhiyun static void __init init_sample_table(void)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun int col, i, new_idx;
1745*4882a593Smuzhiyun u8 rnd[MCS_GROUP_RATES];
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun memset(sample_table, 0xff, sizeof(sample_table));
1748*4882a593Smuzhiyun for (col = 0; col < SAMPLE_COLUMNS; col++) {
1749*4882a593Smuzhiyun prandom_bytes(rnd, sizeof(rnd));
1750*4882a593Smuzhiyun for (i = 0; i < MCS_GROUP_RATES; i++) {
1751*4882a593Smuzhiyun new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
1752*4882a593Smuzhiyun while (sample_table[col][new_idx] != 0xff)
1753*4882a593Smuzhiyun new_idx = (new_idx + 1) % MCS_GROUP_RATES;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun sample_table[col][new_idx] = i;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun int __init
rc80211_minstrel_init(void)1761*4882a593Smuzhiyun rc80211_minstrel_init(void)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun init_sample_table();
1764*4882a593Smuzhiyun return ieee80211_rate_control_register(&mac80211_minstrel_ht);
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun void
rc80211_minstrel_exit(void)1768*4882a593Smuzhiyun rc80211_minstrel_exit(void)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
1771*4882a593Smuzhiyun }
1772