1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2002-2005, Instant802 Networks, Inc.
3*4882a593Smuzhiyun * Copyright 2005-2006, Devicescape Software, Inc.
4*4882a593Smuzhiyun * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5*4882a593Smuzhiyun * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
6*4882a593Smuzhiyun * Copyright 2013-2014 Intel Mobile Communications GmbH
7*4882a593Smuzhiyun * Copyright 2017 Intel Deutschland GmbH
8*4882a593Smuzhiyun * Copyright (C) 2018 - 2019 Intel Corporation
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Permission to use, copy, modify, and/or distribute this software for any
11*4882a593Smuzhiyun * purpose with or without fee is hereby granted, provided that the above
12*4882a593Smuzhiyun * copyright notice and this permission notice appear in all copies.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15*4882a593Smuzhiyun * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16*4882a593Smuzhiyun * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17*4882a593Smuzhiyun * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18*4882a593Smuzhiyun * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19*4882a593Smuzhiyun * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20*4882a593Smuzhiyun * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun * DOC: Wireless regulatory infrastructure
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * The usual implementation is for a driver to read a device EEPROM to
28*4882a593Smuzhiyun * determine which regulatory domain it should be operating under, then
29*4882a593Smuzhiyun * looking up the allowable channels in a driver-local table and finally
30*4882a593Smuzhiyun * registering those channels in the wiphy structure.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Another set of compliance enforcement is for drivers to use their
33*4882a593Smuzhiyun * own compliance limits which can be stored on the EEPROM. The host
34*4882a593Smuzhiyun * driver or firmware may ensure these are used.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * In addition to all this we provide an extra layer of regulatory
37*4882a593Smuzhiyun * conformance. For drivers which do not have any regulatory
38*4882a593Smuzhiyun * information CRDA provides the complete regulatory solution.
39*4882a593Smuzhiyun * For others it provides a community effort on further restrictions
40*4882a593Smuzhiyun * to enhance compliance.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Note: When number of rules --> infinity we will not be able to
43*4882a593Smuzhiyun * index on alpha2 any more, instead we'll probably have to
44*4882a593Smuzhiyun * rely on some SHA1 checksum of the regdomain for example.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include <linux/kernel.h>
51*4882a593Smuzhiyun #include <linux/export.h>
52*4882a593Smuzhiyun #include <linux/slab.h>
53*4882a593Smuzhiyun #include <linux/list.h>
54*4882a593Smuzhiyun #include <linux/ctype.h>
55*4882a593Smuzhiyun #include <linux/nl80211.h>
56*4882a593Smuzhiyun #include <linux/platform_device.h>
57*4882a593Smuzhiyun #include <linux/verification.h>
58*4882a593Smuzhiyun #include <linux/moduleparam.h>
59*4882a593Smuzhiyun #include <linux/firmware.h>
60*4882a593Smuzhiyun #include <net/cfg80211.h>
61*4882a593Smuzhiyun #include "core.h"
62*4882a593Smuzhiyun #include "reg.h"
63*4882a593Smuzhiyun #include "rdev-ops.h"
64*4882a593Smuzhiyun #include "nl80211.h"
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Grace period we give before making sure all current interfaces reside on
68*4882a593Smuzhiyun * channels allowed by the current regulatory domain.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun #define REG_ENFORCE_GRACE_MS 60000
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * enum reg_request_treatment - regulatory request treatment
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * @REG_REQ_OK: continue processing the regulatory request
76*4882a593Smuzhiyun * @REG_REQ_IGNORE: ignore the regulatory request
77*4882a593Smuzhiyun * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should
78*4882a593Smuzhiyun * be intersected with the current one.
79*4882a593Smuzhiyun * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
80*4882a593Smuzhiyun * regulatory settings, and no further processing is required.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun enum reg_request_treatment {
83*4882a593Smuzhiyun REG_REQ_OK,
84*4882a593Smuzhiyun REG_REQ_IGNORE,
85*4882a593Smuzhiyun REG_REQ_INTERSECT,
86*4882a593Smuzhiyun REG_REQ_ALREADY_SET,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static struct regulatory_request core_request_world = {
90*4882a593Smuzhiyun .initiator = NL80211_REGDOM_SET_BY_CORE,
91*4882a593Smuzhiyun .alpha2[0] = '0',
92*4882a593Smuzhiyun .alpha2[1] = '0',
93*4882a593Smuzhiyun .intersect = false,
94*4882a593Smuzhiyun .processed = true,
95*4882a593Smuzhiyun .country_ie_env = ENVIRON_ANY,
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Receipt of information from last regulatory request,
100*4882a593Smuzhiyun * protected by RTNL (and can be accessed with RCU protection)
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun static struct regulatory_request __rcu *last_request =
103*4882a593Smuzhiyun (void __force __rcu *)&core_request_world;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* To trigger userspace events and load firmware */
106*4882a593Smuzhiyun static struct platform_device *reg_pdev;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * Central wireless core regulatory domains, we only need two,
110*4882a593Smuzhiyun * the current one and a world regulatory domain in case we have no
111*4882a593Smuzhiyun * information to give us an alpha2.
112*4882a593Smuzhiyun * (protected by RTNL, can be read under RCU)
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Number of devices that registered to the core
118*4882a593Smuzhiyun * that support cellular base station regulatory hints
119*4882a593Smuzhiyun * (protected by RTNL)
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun static int reg_num_devs_support_basehint;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * State variable indicating if the platform on which the devices
125*4882a593Smuzhiyun * are attached is operating in an indoor environment. The state variable
126*4882a593Smuzhiyun * is relevant for all registered devices.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun static bool reg_is_indoor;
129*4882a593Smuzhiyun static spinlock_t reg_indoor_lock;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Used to track the userspace process controlling the indoor setting */
132*4882a593Smuzhiyun static u32 reg_is_indoor_portid;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun static void restore_regulatory_settings(bool reset_user, bool cached);
135*4882a593Smuzhiyun static void print_regdomain(const struct ieee80211_regdomain *rd);
136*4882a593Smuzhiyun
get_cfg80211_regdom(void)137*4882a593Smuzhiyun static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun return rcu_dereference_rtnl(cfg80211_regdomain);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
get_wiphy_regdom(struct wiphy * wiphy)142*4882a593Smuzhiyun const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return rcu_dereference_rtnl(wiphy->regd);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
reg_dfs_region_str(enum nl80211_dfs_regions dfs_region)147*4882a593Smuzhiyun static const char *reg_dfs_region_str(enum nl80211_dfs_regions dfs_region)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun switch (dfs_region) {
150*4882a593Smuzhiyun case NL80211_DFS_UNSET:
151*4882a593Smuzhiyun return "unset";
152*4882a593Smuzhiyun case NL80211_DFS_FCC:
153*4882a593Smuzhiyun return "FCC";
154*4882a593Smuzhiyun case NL80211_DFS_ETSI:
155*4882a593Smuzhiyun return "ETSI";
156*4882a593Smuzhiyun case NL80211_DFS_JP:
157*4882a593Smuzhiyun return "JP";
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun return "Unknown";
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
reg_get_dfs_region(struct wiphy * wiphy)162*4882a593Smuzhiyun enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun const struct ieee80211_regdomain *regd = NULL;
165*4882a593Smuzhiyun const struct ieee80211_regdomain *wiphy_regd = NULL;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun regd = get_cfg80211_regdom();
168*4882a593Smuzhiyun if (!wiphy)
169*4882a593Smuzhiyun goto out;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun wiphy_regd = get_wiphy_regdom(wiphy);
172*4882a593Smuzhiyun if (!wiphy_regd)
173*4882a593Smuzhiyun goto out;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (wiphy_regd->dfs_region == regd->dfs_region)
176*4882a593Smuzhiyun goto out;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n",
179*4882a593Smuzhiyun dev_name(&wiphy->dev),
180*4882a593Smuzhiyun reg_dfs_region_str(wiphy_regd->dfs_region),
181*4882a593Smuzhiyun reg_dfs_region_str(regd->dfs_region));
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun out:
184*4882a593Smuzhiyun return regd->dfs_region;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
rcu_free_regdom(const struct ieee80211_regdomain * r)187*4882a593Smuzhiyun static void rcu_free_regdom(const struct ieee80211_regdomain *r)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (!r)
190*4882a593Smuzhiyun return;
191*4882a593Smuzhiyun kfree_rcu((struct ieee80211_regdomain *)r, rcu_head);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
get_last_request(void)194*4882a593Smuzhiyun static struct regulatory_request *get_last_request(void)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun return rcu_dereference_rtnl(last_request);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* Used to queue up regulatory hints */
200*4882a593Smuzhiyun static LIST_HEAD(reg_requests_list);
201*4882a593Smuzhiyun static spinlock_t reg_requests_lock;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Used to queue up beacon hints for review */
204*4882a593Smuzhiyun static LIST_HEAD(reg_pending_beacons);
205*4882a593Smuzhiyun static spinlock_t reg_pending_beacons_lock;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Used to keep track of processed beacon hints */
208*4882a593Smuzhiyun static LIST_HEAD(reg_beacon_list);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun struct reg_beacon {
211*4882a593Smuzhiyun struct list_head list;
212*4882a593Smuzhiyun struct ieee80211_channel chan;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun static void reg_check_chans_work(struct work_struct *work);
216*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun static void reg_todo(struct work_struct *work);
219*4882a593Smuzhiyun static DECLARE_WORK(reg_work, reg_todo);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* We keep a static world regulatory domain in case of the absence of CRDA */
222*4882a593Smuzhiyun static const struct ieee80211_regdomain world_regdom = {
223*4882a593Smuzhiyun .n_reg_rules = 8,
224*4882a593Smuzhiyun .alpha2 = "00",
225*4882a593Smuzhiyun .reg_rules = {
226*4882a593Smuzhiyun /* IEEE 802.11b/g, channels 1..11 */
227*4882a593Smuzhiyun REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
228*4882a593Smuzhiyun /* IEEE 802.11b/g, channels 12..13. */
229*4882a593Smuzhiyun REG_RULE(2467-10, 2472+10, 20, 6, 20,
230*4882a593Smuzhiyun NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW),
231*4882a593Smuzhiyun /* IEEE 802.11 channel 14 - Only JP enables
232*4882a593Smuzhiyun * this and for 802.11b only */
233*4882a593Smuzhiyun REG_RULE(2484-10, 2484+10, 20, 6, 20,
234*4882a593Smuzhiyun NL80211_RRF_NO_IR |
235*4882a593Smuzhiyun NL80211_RRF_NO_OFDM),
236*4882a593Smuzhiyun /* IEEE 802.11a, channel 36..48 */
237*4882a593Smuzhiyun REG_RULE(5180-10, 5240+10, 80, 6, 20,
238*4882a593Smuzhiyun NL80211_RRF_NO_IR |
239*4882a593Smuzhiyun NL80211_RRF_AUTO_BW),
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* IEEE 802.11a, channel 52..64 - DFS required */
242*4882a593Smuzhiyun REG_RULE(5260-10, 5320+10, 80, 6, 20,
243*4882a593Smuzhiyun NL80211_RRF_NO_IR |
244*4882a593Smuzhiyun NL80211_RRF_AUTO_BW |
245*4882a593Smuzhiyun NL80211_RRF_DFS),
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* IEEE 802.11a, channel 100..144 - DFS required */
248*4882a593Smuzhiyun REG_RULE(5500-10, 5720+10, 160, 6, 20,
249*4882a593Smuzhiyun NL80211_RRF_NO_IR |
250*4882a593Smuzhiyun NL80211_RRF_DFS),
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* IEEE 802.11a, channel 149..165 */
253*4882a593Smuzhiyun REG_RULE(5745-10, 5825+10, 80, 6, 20,
254*4882a593Smuzhiyun NL80211_RRF_NO_IR),
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* IEEE 802.11ad (60GHz), channels 1..3 */
257*4882a593Smuzhiyun REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* protected by RTNL */
262*4882a593Smuzhiyun static const struct ieee80211_regdomain *cfg80211_world_regdom =
263*4882a593Smuzhiyun &world_regdom;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun static char *ieee80211_regdom = "00";
266*4882a593Smuzhiyun static char user_alpha2[2];
267*4882a593Smuzhiyun static const struct ieee80211_regdomain *cfg80211_user_regdom;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun module_param(ieee80211_regdom, charp, 0444);
270*4882a593Smuzhiyun MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
271*4882a593Smuzhiyun
reg_free_request(struct regulatory_request * request)272*4882a593Smuzhiyun static void reg_free_request(struct regulatory_request *request)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun if (request == &core_request_world)
275*4882a593Smuzhiyun return;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (request != get_last_request())
278*4882a593Smuzhiyun kfree(request);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
reg_free_last_request(void)281*4882a593Smuzhiyun static void reg_free_last_request(void)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (lr != &core_request_world && lr)
286*4882a593Smuzhiyun kfree_rcu(lr, rcu_head);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
reg_update_last_request(struct regulatory_request * request)289*4882a593Smuzhiyun static void reg_update_last_request(struct regulatory_request *request)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct regulatory_request *lr;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun lr = get_last_request();
294*4882a593Smuzhiyun if (lr == request)
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun reg_free_last_request();
298*4882a593Smuzhiyun rcu_assign_pointer(last_request, request);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
reset_regdomains(bool full_reset,const struct ieee80211_regdomain * new_regdom)301*4882a593Smuzhiyun static void reset_regdomains(bool full_reset,
302*4882a593Smuzhiyun const struct ieee80211_regdomain *new_regdom)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun const struct ieee80211_regdomain *r;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun ASSERT_RTNL();
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun r = get_cfg80211_regdom();
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* avoid freeing static information or freeing something twice */
311*4882a593Smuzhiyun if (r == cfg80211_world_regdom)
312*4882a593Smuzhiyun r = NULL;
313*4882a593Smuzhiyun if (cfg80211_world_regdom == &world_regdom)
314*4882a593Smuzhiyun cfg80211_world_regdom = NULL;
315*4882a593Smuzhiyun if (r == &world_regdom)
316*4882a593Smuzhiyun r = NULL;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun rcu_free_regdom(r);
319*4882a593Smuzhiyun rcu_free_regdom(cfg80211_world_regdom);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun cfg80211_world_regdom = &world_regdom;
322*4882a593Smuzhiyun rcu_assign_pointer(cfg80211_regdomain, new_regdom);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (!full_reset)
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun reg_update_last_request(&core_request_world);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * Dynamic world regulatory domain requested by the wireless
332*4882a593Smuzhiyun * core upon initialization
333*4882a593Smuzhiyun */
update_world_regdomain(const struct ieee80211_regdomain * rd)334*4882a593Smuzhiyun static void update_world_regdomain(const struct ieee80211_regdomain *rd)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct regulatory_request *lr;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun lr = get_last_request();
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun WARN_ON(!lr);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun reset_regdomains(false, rd);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun cfg80211_world_regdom = rd;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
is_world_regdom(const char * alpha2)347*4882a593Smuzhiyun bool is_world_regdom(const char *alpha2)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun if (!alpha2)
350*4882a593Smuzhiyun return false;
351*4882a593Smuzhiyun return alpha2[0] == '0' && alpha2[1] == '0';
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
is_alpha2_set(const char * alpha2)354*4882a593Smuzhiyun static bool is_alpha2_set(const char *alpha2)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun if (!alpha2)
357*4882a593Smuzhiyun return false;
358*4882a593Smuzhiyun return alpha2[0] && alpha2[1];
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
is_unknown_alpha2(const char * alpha2)361*4882a593Smuzhiyun static bool is_unknown_alpha2(const char *alpha2)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun if (!alpha2)
364*4882a593Smuzhiyun return false;
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Special case where regulatory domain was built by driver
367*4882a593Smuzhiyun * but a specific alpha2 cannot be determined
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun return alpha2[0] == '9' && alpha2[1] == '9';
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
is_intersected_alpha2(const char * alpha2)372*4882a593Smuzhiyun static bool is_intersected_alpha2(const char *alpha2)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun if (!alpha2)
375*4882a593Smuzhiyun return false;
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * Special case where regulatory domain is the
378*4882a593Smuzhiyun * result of an intersection between two regulatory domain
379*4882a593Smuzhiyun * structures
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun return alpha2[0] == '9' && alpha2[1] == '8';
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
is_an_alpha2(const char * alpha2)384*4882a593Smuzhiyun static bool is_an_alpha2(const char *alpha2)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun if (!alpha2)
387*4882a593Smuzhiyun return false;
388*4882a593Smuzhiyun return isalpha(alpha2[0]) && isalpha(alpha2[1]);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
alpha2_equal(const char * alpha2_x,const char * alpha2_y)391*4882a593Smuzhiyun static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun if (!alpha2_x || !alpha2_y)
394*4882a593Smuzhiyun return false;
395*4882a593Smuzhiyun return alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1];
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
regdom_changes(const char * alpha2)398*4882a593Smuzhiyun static bool regdom_changes(const char *alpha2)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun const struct ieee80211_regdomain *r = get_cfg80211_regdom();
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!r)
403*4882a593Smuzhiyun return true;
404*4882a593Smuzhiyun return !alpha2_equal(r->alpha2, alpha2);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
409*4882a593Smuzhiyun * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
410*4882a593Smuzhiyun * has ever been issued.
411*4882a593Smuzhiyun */
is_user_regdom_saved(void)412*4882a593Smuzhiyun static bool is_user_regdom_saved(void)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
415*4882a593Smuzhiyun return false;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* This would indicate a mistake on the design */
418*4882a593Smuzhiyun if (WARN(!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2),
419*4882a593Smuzhiyun "Unexpected user alpha2: %c%c\n",
420*4882a593Smuzhiyun user_alpha2[0], user_alpha2[1]))
421*4882a593Smuzhiyun return false;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun return true;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun static const struct ieee80211_regdomain *
reg_copy_regd(const struct ieee80211_regdomain * src_regd)427*4882a593Smuzhiyun reg_copy_regd(const struct ieee80211_regdomain *src_regd)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct ieee80211_regdomain *regd;
430*4882a593Smuzhiyun unsigned int i;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun regd = kzalloc(struct_size(regd, reg_rules, src_regd->n_reg_rules),
433*4882a593Smuzhiyun GFP_KERNEL);
434*4882a593Smuzhiyun if (!regd)
435*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun for (i = 0; i < src_regd->n_reg_rules; i++)
440*4882a593Smuzhiyun memcpy(®d->reg_rules[i], &src_regd->reg_rules[i],
441*4882a593Smuzhiyun sizeof(struct ieee80211_reg_rule));
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun return regd;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
cfg80211_save_user_regdom(const struct ieee80211_regdomain * rd)446*4882a593Smuzhiyun static void cfg80211_save_user_regdom(const struct ieee80211_regdomain *rd)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun ASSERT_RTNL();
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (!IS_ERR(cfg80211_user_regdom))
451*4882a593Smuzhiyun kfree(cfg80211_user_regdom);
452*4882a593Smuzhiyun cfg80211_user_regdom = reg_copy_regd(rd);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun struct reg_regdb_apply_request {
456*4882a593Smuzhiyun struct list_head list;
457*4882a593Smuzhiyun const struct ieee80211_regdomain *regdom;
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun static LIST_HEAD(reg_regdb_apply_list);
461*4882a593Smuzhiyun static DEFINE_MUTEX(reg_regdb_apply_mutex);
462*4882a593Smuzhiyun
reg_regdb_apply(struct work_struct * work)463*4882a593Smuzhiyun static void reg_regdb_apply(struct work_struct *work)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct reg_regdb_apply_request *request;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun rtnl_lock();
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun mutex_lock(®_regdb_apply_mutex);
470*4882a593Smuzhiyun while (!list_empty(®_regdb_apply_list)) {
471*4882a593Smuzhiyun request = list_first_entry(®_regdb_apply_list,
472*4882a593Smuzhiyun struct reg_regdb_apply_request,
473*4882a593Smuzhiyun list);
474*4882a593Smuzhiyun list_del(&request->list);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB);
477*4882a593Smuzhiyun kfree(request);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun mutex_unlock(®_regdb_apply_mutex);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun rtnl_unlock();
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun static DECLARE_WORK(reg_regdb_work, reg_regdb_apply);
485*4882a593Smuzhiyun
reg_schedule_apply(const struct ieee80211_regdomain * regdom)486*4882a593Smuzhiyun static int reg_schedule_apply(const struct ieee80211_regdomain *regdom)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun struct reg_regdb_apply_request *request;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL);
491*4882a593Smuzhiyun if (!request) {
492*4882a593Smuzhiyun kfree(regdom);
493*4882a593Smuzhiyun return -ENOMEM;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun request->regdom = regdom;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun mutex_lock(®_regdb_apply_mutex);
499*4882a593Smuzhiyun list_add_tail(&request->list, ®_regdb_apply_list);
500*4882a593Smuzhiyun mutex_unlock(®_regdb_apply_mutex);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun schedule_work(®_regdb_work);
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun #ifdef CONFIG_CFG80211_CRDA_SUPPORT
507*4882a593Smuzhiyun /* Max number of consecutive attempts to communicate with CRDA */
508*4882a593Smuzhiyun #define REG_MAX_CRDA_TIMEOUTS 10
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun static u32 reg_crda_timeouts;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun static void crda_timeout_work(struct work_struct *work);
513*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
514*4882a593Smuzhiyun
crda_timeout_work(struct work_struct * work)515*4882a593Smuzhiyun static void crda_timeout_work(struct work_struct *work)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
518*4882a593Smuzhiyun rtnl_lock();
519*4882a593Smuzhiyun reg_crda_timeouts++;
520*4882a593Smuzhiyun restore_regulatory_settings(true, false);
521*4882a593Smuzhiyun rtnl_unlock();
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
cancel_crda_timeout(void)524*4882a593Smuzhiyun static void cancel_crda_timeout(void)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun cancel_delayed_work(&crda_timeout);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
cancel_crda_timeout_sync(void)529*4882a593Smuzhiyun static void cancel_crda_timeout_sync(void)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun cancel_delayed_work_sync(&crda_timeout);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
reset_crda_timeouts(void)534*4882a593Smuzhiyun static void reset_crda_timeouts(void)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun reg_crda_timeouts = 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * This lets us keep regulatory code which is updated on a regulatory
541*4882a593Smuzhiyun * basis in userspace.
542*4882a593Smuzhiyun */
call_crda(const char * alpha2)543*4882a593Smuzhiyun static int call_crda(const char *alpha2)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun char country[12];
546*4882a593Smuzhiyun char *env[] = { country, NULL };
547*4882a593Smuzhiyun int ret;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun snprintf(country, sizeof(country), "COUNTRY=%c%c",
550*4882a593Smuzhiyun alpha2[0], alpha2[1]);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
553*4882a593Smuzhiyun pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (!is_world_regdom((char *) alpha2))
558*4882a593Smuzhiyun pr_debug("Calling CRDA for country: %c%c\n",
559*4882a593Smuzhiyun alpha2[0], alpha2[1]);
560*4882a593Smuzhiyun else
561*4882a593Smuzhiyun pr_debug("Calling CRDA to update world regulatory domain\n");
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun ret = kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env);
564*4882a593Smuzhiyun if (ret)
565*4882a593Smuzhiyun return ret;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun queue_delayed_work(system_power_efficient_wq,
568*4882a593Smuzhiyun &crda_timeout, msecs_to_jiffies(3142));
569*4882a593Smuzhiyun return 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun #else
cancel_crda_timeout(void)572*4882a593Smuzhiyun static inline void cancel_crda_timeout(void) {}
cancel_crda_timeout_sync(void)573*4882a593Smuzhiyun static inline void cancel_crda_timeout_sync(void) {}
reset_crda_timeouts(void)574*4882a593Smuzhiyun static inline void reset_crda_timeouts(void) {}
call_crda(const char * alpha2)575*4882a593Smuzhiyun static inline int call_crda(const char *alpha2)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun return -ENODATA;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun #endif /* CONFIG_CFG80211_CRDA_SUPPORT */
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* code to directly load a firmware database through request_firmware */
582*4882a593Smuzhiyun static const struct fwdb_header *regdb;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun struct fwdb_country {
585*4882a593Smuzhiyun u8 alpha2[2];
586*4882a593Smuzhiyun __be16 coll_ptr;
587*4882a593Smuzhiyun /* this struct cannot be extended */
588*4882a593Smuzhiyun } __packed __aligned(4);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun struct fwdb_collection {
591*4882a593Smuzhiyun u8 len;
592*4882a593Smuzhiyun u8 n_rules;
593*4882a593Smuzhiyun u8 dfs_region;
594*4882a593Smuzhiyun /* no optional data yet */
595*4882a593Smuzhiyun /* aligned to 2, then followed by __be16 array of rule pointers */
596*4882a593Smuzhiyun } __packed __aligned(4);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun enum fwdb_flags {
599*4882a593Smuzhiyun FWDB_FLAG_NO_OFDM = BIT(0),
600*4882a593Smuzhiyun FWDB_FLAG_NO_OUTDOOR = BIT(1),
601*4882a593Smuzhiyun FWDB_FLAG_DFS = BIT(2),
602*4882a593Smuzhiyun FWDB_FLAG_NO_IR = BIT(3),
603*4882a593Smuzhiyun FWDB_FLAG_AUTO_BW = BIT(4),
604*4882a593Smuzhiyun };
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun struct fwdb_wmm_ac {
607*4882a593Smuzhiyun u8 ecw;
608*4882a593Smuzhiyun u8 aifsn;
609*4882a593Smuzhiyun __be16 cot;
610*4882a593Smuzhiyun } __packed;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun struct fwdb_wmm_rule {
613*4882a593Smuzhiyun struct fwdb_wmm_ac client[IEEE80211_NUM_ACS];
614*4882a593Smuzhiyun struct fwdb_wmm_ac ap[IEEE80211_NUM_ACS];
615*4882a593Smuzhiyun } __packed;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun struct fwdb_rule {
618*4882a593Smuzhiyun u8 len;
619*4882a593Smuzhiyun u8 flags;
620*4882a593Smuzhiyun __be16 max_eirp;
621*4882a593Smuzhiyun __be32 start, end, max_bw;
622*4882a593Smuzhiyun /* start of optional data */
623*4882a593Smuzhiyun __be16 cac_timeout;
624*4882a593Smuzhiyun __be16 wmm_ptr;
625*4882a593Smuzhiyun } __packed __aligned(4);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun #define FWDB_MAGIC 0x52474442
628*4882a593Smuzhiyun #define FWDB_VERSION 20
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun struct fwdb_header {
631*4882a593Smuzhiyun __be32 magic;
632*4882a593Smuzhiyun __be32 version;
633*4882a593Smuzhiyun struct fwdb_country country[];
634*4882a593Smuzhiyun } __packed __aligned(4);
635*4882a593Smuzhiyun
ecw2cw(int ecw)636*4882a593Smuzhiyun static int ecw2cw(int ecw)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun return (1 << ecw) - 1;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
valid_wmm(struct fwdb_wmm_rule * rule)641*4882a593Smuzhiyun static bool valid_wmm(struct fwdb_wmm_rule *rule)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule;
644*4882a593Smuzhiyun int i;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun for (i = 0; i < IEEE80211_NUM_ACS * 2; i++) {
647*4882a593Smuzhiyun u16 cw_min = ecw2cw((ac[i].ecw & 0xf0) >> 4);
648*4882a593Smuzhiyun u16 cw_max = ecw2cw(ac[i].ecw & 0x0f);
649*4882a593Smuzhiyun u8 aifsn = ac[i].aifsn;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (cw_min >= cw_max)
652*4882a593Smuzhiyun return false;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (aifsn < 1)
655*4882a593Smuzhiyun return false;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun return true;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
valid_rule(const u8 * data,unsigned int size,u16 rule_ptr)661*4882a593Smuzhiyun static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2));
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if ((u8 *)rule + sizeof(rule->len) > data + size)
666*4882a593Smuzhiyun return false;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* mandatory fields */
669*4882a593Smuzhiyun if (rule->len < offsetofend(struct fwdb_rule, max_bw))
670*4882a593Smuzhiyun return false;
671*4882a593Smuzhiyun if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
672*4882a593Smuzhiyun u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
673*4882a593Smuzhiyun struct fwdb_wmm_rule *wmm;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (wmm_ptr + sizeof(struct fwdb_wmm_rule) > size)
676*4882a593Smuzhiyun return false;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun wmm = (void *)(data + wmm_ptr);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (!valid_wmm(wmm))
681*4882a593Smuzhiyun return false;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun return true;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
valid_country(const u8 * data,unsigned int size,const struct fwdb_country * country)686*4882a593Smuzhiyun static bool valid_country(const u8 *data, unsigned int size,
687*4882a593Smuzhiyun const struct fwdb_country *country)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
690*4882a593Smuzhiyun struct fwdb_collection *coll = (void *)(data + ptr);
691*4882a593Smuzhiyun __be16 *rules_ptr;
692*4882a593Smuzhiyun unsigned int i;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* make sure we can read len/n_rules */
695*4882a593Smuzhiyun if ((u8 *)coll + offsetofend(typeof(*coll), n_rules) > data + size)
696*4882a593Smuzhiyun return false;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /* make sure base struct and all rules fit */
699*4882a593Smuzhiyun if ((u8 *)coll + ALIGN(coll->len, 2) +
700*4882a593Smuzhiyun (coll->n_rules * 2) > data + size)
701*4882a593Smuzhiyun return false;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* mandatory fields must exist */
704*4882a593Smuzhiyun if (coll->len < offsetofend(struct fwdb_collection, dfs_region))
705*4882a593Smuzhiyun return false;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun for (i = 0; i < coll->n_rules; i++) {
710*4882a593Smuzhiyun u16 rule_ptr = be16_to_cpu(rules_ptr[i]);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (!valid_rule(data, size, rule_ptr))
713*4882a593Smuzhiyun return false;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun return true;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun #ifdef CONFIG_CFG80211_REQUIRE_SIGNED_REGDB
720*4882a593Smuzhiyun static struct key *builtin_regdb_keys;
721*4882a593Smuzhiyun
load_keys_from_buffer(const u8 * p,unsigned int buflen)722*4882a593Smuzhiyun static void __init load_keys_from_buffer(const u8 *p, unsigned int buflen)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun const u8 *end = p + buflen;
725*4882a593Smuzhiyun size_t plen;
726*4882a593Smuzhiyun key_ref_t key;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun while (p < end) {
729*4882a593Smuzhiyun /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
730*4882a593Smuzhiyun * than 256 bytes in size.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun if (end - p < 4)
733*4882a593Smuzhiyun goto dodgy_cert;
734*4882a593Smuzhiyun if (p[0] != 0x30 &&
735*4882a593Smuzhiyun p[1] != 0x82)
736*4882a593Smuzhiyun goto dodgy_cert;
737*4882a593Smuzhiyun plen = (p[2] << 8) | p[3];
738*4882a593Smuzhiyun plen += 4;
739*4882a593Smuzhiyun if (plen > end - p)
740*4882a593Smuzhiyun goto dodgy_cert;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun key = key_create_or_update(make_key_ref(builtin_regdb_keys, 1),
743*4882a593Smuzhiyun "asymmetric", NULL, p, plen,
744*4882a593Smuzhiyun ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
745*4882a593Smuzhiyun KEY_USR_VIEW | KEY_USR_READ),
746*4882a593Smuzhiyun KEY_ALLOC_NOT_IN_QUOTA |
747*4882a593Smuzhiyun KEY_ALLOC_BUILT_IN |
748*4882a593Smuzhiyun KEY_ALLOC_BYPASS_RESTRICTION);
749*4882a593Smuzhiyun if (IS_ERR(key)) {
750*4882a593Smuzhiyun pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
751*4882a593Smuzhiyun PTR_ERR(key));
752*4882a593Smuzhiyun } else {
753*4882a593Smuzhiyun pr_notice("Loaded X.509 cert '%s'\n",
754*4882a593Smuzhiyun key_ref_to_ptr(key)->description);
755*4882a593Smuzhiyun key_ref_put(key);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun p += plen;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun dodgy_cert:
763*4882a593Smuzhiyun pr_err("Problem parsing in-kernel X.509 certificate list\n");
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
load_builtin_regdb_keys(void)766*4882a593Smuzhiyun static int __init load_builtin_regdb_keys(void)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun builtin_regdb_keys =
769*4882a593Smuzhiyun keyring_alloc(".builtin_regdb_keys",
770*4882a593Smuzhiyun KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
771*4882a593Smuzhiyun ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
772*4882a593Smuzhiyun KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
773*4882a593Smuzhiyun KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
774*4882a593Smuzhiyun if (IS_ERR(builtin_regdb_keys))
775*4882a593Smuzhiyun return PTR_ERR(builtin_regdb_keys);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun pr_notice("Loading compiled-in X.509 certificates for regulatory database\n");
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun #ifdef CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS
780*4882a593Smuzhiyun load_keys_from_buffer(shipped_regdb_certs, shipped_regdb_certs_len);
781*4882a593Smuzhiyun #endif
782*4882a593Smuzhiyun #ifdef CONFIG_CFG80211_EXTRA_REGDB_KEYDIR
783*4882a593Smuzhiyun if (CONFIG_CFG80211_EXTRA_REGDB_KEYDIR[0] != '\0')
784*4882a593Smuzhiyun load_keys_from_buffer(extra_regdb_certs, extra_regdb_certs_len);
785*4882a593Smuzhiyun #endif
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return 0;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun MODULE_FIRMWARE("regulatory.db.p7s");
791*4882a593Smuzhiyun
regdb_has_valid_signature(const u8 * data,unsigned int size)792*4882a593Smuzhiyun static bool regdb_has_valid_signature(const u8 *data, unsigned int size)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun const struct firmware *sig;
795*4882a593Smuzhiyun bool result;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (request_firmware(&sig, "regulatory.db.p7s", ®_pdev->dev))
798*4882a593Smuzhiyun return false;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun result = verify_pkcs7_signature(data, size, sig->data, sig->size,
801*4882a593Smuzhiyun builtin_regdb_keys,
802*4882a593Smuzhiyun VERIFYING_UNSPECIFIED_SIGNATURE,
803*4882a593Smuzhiyun NULL, NULL) == 0;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun release_firmware(sig);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun return result;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
free_regdb_keyring(void)810*4882a593Smuzhiyun static void free_regdb_keyring(void)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun key_put(builtin_regdb_keys);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun #else
load_builtin_regdb_keys(void)815*4882a593Smuzhiyun static int load_builtin_regdb_keys(void)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun return 0;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
regdb_has_valid_signature(const u8 * data,unsigned int size)820*4882a593Smuzhiyun static bool regdb_has_valid_signature(const u8 *data, unsigned int size)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun return true;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
free_regdb_keyring(void)825*4882a593Smuzhiyun static void free_regdb_keyring(void)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun #endif /* CONFIG_CFG80211_REQUIRE_SIGNED_REGDB */
829*4882a593Smuzhiyun
valid_regdb(const u8 * data,unsigned int size)830*4882a593Smuzhiyun static bool valid_regdb(const u8 *data, unsigned int size)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun const struct fwdb_header *hdr = (void *)data;
833*4882a593Smuzhiyun const struct fwdb_country *country;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (size < sizeof(*hdr))
836*4882a593Smuzhiyun return false;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if (hdr->magic != cpu_to_be32(FWDB_MAGIC))
839*4882a593Smuzhiyun return false;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun if (hdr->version != cpu_to_be32(FWDB_VERSION))
842*4882a593Smuzhiyun return false;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!regdb_has_valid_signature(data, size))
845*4882a593Smuzhiyun return false;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun country = &hdr->country[0];
848*4882a593Smuzhiyun while ((u8 *)(country + 1) <= data + size) {
849*4882a593Smuzhiyun if (!country->coll_ptr)
850*4882a593Smuzhiyun break;
851*4882a593Smuzhiyun if (!valid_country(data, size, country))
852*4882a593Smuzhiyun return false;
853*4882a593Smuzhiyun country++;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun return true;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
set_wmm_rule(const struct fwdb_header * db,const struct fwdb_country * country,const struct fwdb_rule * rule,struct ieee80211_reg_rule * rrule)859*4882a593Smuzhiyun static void set_wmm_rule(const struct fwdb_header *db,
860*4882a593Smuzhiyun const struct fwdb_country *country,
861*4882a593Smuzhiyun const struct fwdb_rule *rule,
862*4882a593Smuzhiyun struct ieee80211_reg_rule *rrule)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule;
865*4882a593Smuzhiyun struct fwdb_wmm_rule *wmm;
866*4882a593Smuzhiyun unsigned int i, wmm_ptr;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
869*4882a593Smuzhiyun wmm = (void *)((u8 *)db + wmm_ptr);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (!valid_wmm(wmm)) {
872*4882a593Smuzhiyun pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n",
873*4882a593Smuzhiyun be32_to_cpu(rule->start), be32_to_cpu(rule->end),
874*4882a593Smuzhiyun country->alpha2[0], country->alpha2[1]);
875*4882a593Smuzhiyun return;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun for (i = 0; i < IEEE80211_NUM_ACS; i++) {
879*4882a593Smuzhiyun wmm_rule->client[i].cw_min =
880*4882a593Smuzhiyun ecw2cw((wmm->client[i].ecw & 0xf0) >> 4);
881*4882a593Smuzhiyun wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
882*4882a593Smuzhiyun wmm_rule->client[i].aifsn = wmm->client[i].aifsn;
883*4882a593Smuzhiyun wmm_rule->client[i].cot =
884*4882a593Smuzhiyun 1000 * be16_to_cpu(wmm->client[i].cot);
885*4882a593Smuzhiyun wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
886*4882a593Smuzhiyun wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
887*4882a593Smuzhiyun wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn;
888*4882a593Smuzhiyun wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun rrule->has_wmm = true;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
__regdb_query_wmm(const struct fwdb_header * db,const struct fwdb_country * country,int freq,struct ieee80211_reg_rule * rrule)894*4882a593Smuzhiyun static int __regdb_query_wmm(const struct fwdb_header *db,
895*4882a593Smuzhiyun const struct fwdb_country *country, int freq,
896*4882a593Smuzhiyun struct ieee80211_reg_rule *rrule)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
899*4882a593Smuzhiyun struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
900*4882a593Smuzhiyun int i;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun for (i = 0; i < coll->n_rules; i++) {
903*4882a593Smuzhiyun __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
904*4882a593Smuzhiyun unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2;
905*4882a593Smuzhiyun struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr))
908*4882a593Smuzhiyun continue;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) &&
911*4882a593Smuzhiyun freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) {
912*4882a593Smuzhiyun set_wmm_rule(db, country, rule, rrule);
913*4882a593Smuzhiyun return 0;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun return -ENODATA;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
reg_query_regdb_wmm(char * alpha2,int freq,struct ieee80211_reg_rule * rule)920*4882a593Smuzhiyun int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun const struct fwdb_header *hdr = regdb;
923*4882a593Smuzhiyun const struct fwdb_country *country;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (!regdb)
926*4882a593Smuzhiyun return -ENODATA;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (IS_ERR(regdb))
929*4882a593Smuzhiyun return PTR_ERR(regdb);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun country = &hdr->country[0];
932*4882a593Smuzhiyun while (country->coll_ptr) {
933*4882a593Smuzhiyun if (alpha2_equal(alpha2, country->alpha2))
934*4882a593Smuzhiyun return __regdb_query_wmm(regdb, country, freq, rule);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun country++;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return -ENODATA;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun EXPORT_SYMBOL(reg_query_regdb_wmm);
942*4882a593Smuzhiyun
regdb_query_country(const struct fwdb_header * db,const struct fwdb_country * country)943*4882a593Smuzhiyun static int regdb_query_country(const struct fwdb_header *db,
944*4882a593Smuzhiyun const struct fwdb_country *country)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
947*4882a593Smuzhiyun struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
948*4882a593Smuzhiyun struct ieee80211_regdomain *regdom;
949*4882a593Smuzhiyun unsigned int i;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun regdom = kzalloc(struct_size(regdom, reg_rules, coll->n_rules),
952*4882a593Smuzhiyun GFP_KERNEL);
953*4882a593Smuzhiyun if (!regdom)
954*4882a593Smuzhiyun return -ENOMEM;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun regdom->n_reg_rules = coll->n_rules;
957*4882a593Smuzhiyun regdom->alpha2[0] = country->alpha2[0];
958*4882a593Smuzhiyun regdom->alpha2[1] = country->alpha2[1];
959*4882a593Smuzhiyun regdom->dfs_region = coll->dfs_region;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun for (i = 0; i < regdom->n_reg_rules; i++) {
962*4882a593Smuzhiyun __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
963*4882a593Smuzhiyun unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2;
964*4882a593Smuzhiyun struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
965*4882a593Smuzhiyun struct ieee80211_reg_rule *rrule = ®dom->reg_rules[i];
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun rrule->freq_range.start_freq_khz = be32_to_cpu(rule->start);
968*4882a593Smuzhiyun rrule->freq_range.end_freq_khz = be32_to_cpu(rule->end);
969*4882a593Smuzhiyun rrule->freq_range.max_bandwidth_khz = be32_to_cpu(rule->max_bw);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun rrule->power_rule.max_antenna_gain = 0;
972*4882a593Smuzhiyun rrule->power_rule.max_eirp = be16_to_cpu(rule->max_eirp);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun rrule->flags = 0;
975*4882a593Smuzhiyun if (rule->flags & FWDB_FLAG_NO_OFDM)
976*4882a593Smuzhiyun rrule->flags |= NL80211_RRF_NO_OFDM;
977*4882a593Smuzhiyun if (rule->flags & FWDB_FLAG_NO_OUTDOOR)
978*4882a593Smuzhiyun rrule->flags |= NL80211_RRF_NO_OUTDOOR;
979*4882a593Smuzhiyun if (rule->flags & FWDB_FLAG_DFS)
980*4882a593Smuzhiyun rrule->flags |= NL80211_RRF_DFS;
981*4882a593Smuzhiyun if (rule->flags & FWDB_FLAG_NO_IR)
982*4882a593Smuzhiyun rrule->flags |= NL80211_RRF_NO_IR;
983*4882a593Smuzhiyun if (rule->flags & FWDB_FLAG_AUTO_BW)
984*4882a593Smuzhiyun rrule->flags |= NL80211_RRF_AUTO_BW;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun rrule->dfs_cac_ms = 0;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /* handle optional data */
989*4882a593Smuzhiyun if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout))
990*4882a593Smuzhiyun rrule->dfs_cac_ms =
991*4882a593Smuzhiyun 1000 * be16_to_cpu(rule->cac_timeout);
992*4882a593Smuzhiyun if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr))
993*4882a593Smuzhiyun set_wmm_rule(db, country, rule, rrule);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun return reg_schedule_apply(regdom);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
query_regdb(const char * alpha2)999*4882a593Smuzhiyun static int query_regdb(const char *alpha2)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun const struct fwdb_header *hdr = regdb;
1002*4882a593Smuzhiyun const struct fwdb_country *country;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun ASSERT_RTNL();
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (IS_ERR(regdb))
1007*4882a593Smuzhiyun return PTR_ERR(regdb);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun country = &hdr->country[0];
1010*4882a593Smuzhiyun while (country->coll_ptr) {
1011*4882a593Smuzhiyun if (alpha2_equal(alpha2, country->alpha2))
1012*4882a593Smuzhiyun return regdb_query_country(regdb, country);
1013*4882a593Smuzhiyun country++;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun return -ENODATA;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
regdb_fw_cb(const struct firmware * fw,void * context)1019*4882a593Smuzhiyun static void regdb_fw_cb(const struct firmware *fw, void *context)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun int set_error = 0;
1022*4882a593Smuzhiyun bool restore = true;
1023*4882a593Smuzhiyun void *db;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun if (!fw) {
1026*4882a593Smuzhiyun pr_info("failed to load regulatory.db\n");
1027*4882a593Smuzhiyun set_error = -ENODATA;
1028*4882a593Smuzhiyun } else if (!valid_regdb(fw->data, fw->size)) {
1029*4882a593Smuzhiyun pr_info("loaded regulatory.db is malformed or signature is missing/invalid\n");
1030*4882a593Smuzhiyun set_error = -EINVAL;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun rtnl_lock();
1034*4882a593Smuzhiyun if (regdb && !IS_ERR(regdb)) {
1035*4882a593Smuzhiyun /* negative case - a bug
1036*4882a593Smuzhiyun * positive case - can happen due to race in case of multiple cb's in
1037*4882a593Smuzhiyun * queue, due to usage of asynchronous callback
1038*4882a593Smuzhiyun *
1039*4882a593Smuzhiyun * Either case, just restore and free new db.
1040*4882a593Smuzhiyun */
1041*4882a593Smuzhiyun } else if (set_error) {
1042*4882a593Smuzhiyun regdb = ERR_PTR(set_error);
1043*4882a593Smuzhiyun } else if (fw) {
1044*4882a593Smuzhiyun db = kmemdup(fw->data, fw->size, GFP_KERNEL);
1045*4882a593Smuzhiyun if (db) {
1046*4882a593Smuzhiyun regdb = db;
1047*4882a593Smuzhiyun restore = context && query_regdb(context);
1048*4882a593Smuzhiyun } else {
1049*4882a593Smuzhiyun restore = true;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (restore)
1054*4882a593Smuzhiyun restore_regulatory_settings(true, false);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun rtnl_unlock();
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun kfree(context);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun release_firmware(fw);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun MODULE_FIRMWARE("regulatory.db");
1064*4882a593Smuzhiyun
query_regdb_file(const char * alpha2)1065*4882a593Smuzhiyun static int query_regdb_file(const char *alpha2)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun int err;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun ASSERT_RTNL();
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (regdb)
1072*4882a593Smuzhiyun return query_regdb(alpha2);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun alpha2 = kmemdup(alpha2, 2, GFP_KERNEL);
1075*4882a593Smuzhiyun if (!alpha2)
1076*4882a593Smuzhiyun return -ENOMEM;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db",
1079*4882a593Smuzhiyun ®_pdev->dev, GFP_KERNEL,
1080*4882a593Smuzhiyun (void *)alpha2, regdb_fw_cb);
1081*4882a593Smuzhiyun if (err)
1082*4882a593Smuzhiyun kfree(alpha2);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun return err;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
reg_reload_regdb(void)1087*4882a593Smuzhiyun int reg_reload_regdb(void)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun const struct firmware *fw;
1090*4882a593Smuzhiyun void *db;
1091*4882a593Smuzhiyun int err;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun err = request_firmware(&fw, "regulatory.db", ®_pdev->dev);
1094*4882a593Smuzhiyun if (err)
1095*4882a593Smuzhiyun return err;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun if (!valid_regdb(fw->data, fw->size)) {
1098*4882a593Smuzhiyun err = -ENODATA;
1099*4882a593Smuzhiyun goto out;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun db = kmemdup(fw->data, fw->size, GFP_KERNEL);
1103*4882a593Smuzhiyun if (!db) {
1104*4882a593Smuzhiyun err = -ENOMEM;
1105*4882a593Smuzhiyun goto out;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun rtnl_lock();
1109*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(regdb))
1110*4882a593Smuzhiyun kfree(regdb);
1111*4882a593Smuzhiyun regdb = db;
1112*4882a593Smuzhiyun rtnl_unlock();
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun out:
1115*4882a593Smuzhiyun release_firmware(fw);
1116*4882a593Smuzhiyun return err;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
reg_query_database(struct regulatory_request * request)1119*4882a593Smuzhiyun static bool reg_query_database(struct regulatory_request *request)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun if (query_regdb_file(request->alpha2) == 0)
1122*4882a593Smuzhiyun return true;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if (call_crda(request->alpha2) == 0)
1125*4882a593Smuzhiyun return true;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun return false;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
reg_is_valid_request(const char * alpha2)1130*4882a593Smuzhiyun bool reg_is_valid_request(const char *alpha2)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (!lr || lr->processed)
1135*4882a593Smuzhiyun return false;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun return alpha2_equal(lr->alpha2, alpha2);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
reg_get_regdomain(struct wiphy * wiphy)1140*4882a593Smuzhiyun static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /*
1145*4882a593Smuzhiyun * Follow the driver's regulatory domain, if present, unless a country
1146*4882a593Smuzhiyun * IE has been processed or a user wants to help complaince further
1147*4882a593Smuzhiyun */
1148*4882a593Smuzhiyun if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1149*4882a593Smuzhiyun lr->initiator != NL80211_REGDOM_SET_BY_USER &&
1150*4882a593Smuzhiyun wiphy->regd)
1151*4882a593Smuzhiyun return get_wiphy_regdom(wiphy);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun return get_cfg80211_regdom();
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun static unsigned int
reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain * rd,const struct ieee80211_reg_rule * rule)1157*4882a593Smuzhiyun reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd,
1158*4882a593Smuzhiyun const struct ieee80211_reg_rule *rule)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range = &rule->freq_range;
1161*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range_tmp;
1162*4882a593Smuzhiyun const struct ieee80211_reg_rule *tmp;
1163*4882a593Smuzhiyun u32 start_freq, end_freq, idx, no;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun for (idx = 0; idx < rd->n_reg_rules; idx++)
1166*4882a593Smuzhiyun if (rule == &rd->reg_rules[idx])
1167*4882a593Smuzhiyun break;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (idx == rd->n_reg_rules)
1170*4882a593Smuzhiyun return 0;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* get start_freq */
1173*4882a593Smuzhiyun no = idx;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun while (no) {
1176*4882a593Smuzhiyun tmp = &rd->reg_rules[--no];
1177*4882a593Smuzhiyun freq_range_tmp = &tmp->freq_range;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz)
1180*4882a593Smuzhiyun break;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun freq_range = freq_range_tmp;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun start_freq = freq_range->start_freq_khz;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun /* get end_freq */
1188*4882a593Smuzhiyun freq_range = &rule->freq_range;
1189*4882a593Smuzhiyun no = idx;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun while (no < rd->n_reg_rules - 1) {
1192*4882a593Smuzhiyun tmp = &rd->reg_rules[++no];
1193*4882a593Smuzhiyun freq_range_tmp = &tmp->freq_range;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz)
1196*4882a593Smuzhiyun break;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun freq_range = freq_range_tmp;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun end_freq = freq_range->end_freq_khz;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun return end_freq - start_freq;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
reg_get_max_bandwidth(const struct ieee80211_regdomain * rd,const struct ieee80211_reg_rule * rule)1206*4882a593Smuzhiyun unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
1207*4882a593Smuzhiyun const struct ieee80211_reg_rule *rule)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (rule->flags & NL80211_RRF_NO_160MHZ)
1212*4882a593Smuzhiyun bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80));
1213*4882a593Smuzhiyun if (rule->flags & NL80211_RRF_NO_80MHZ)
1214*4882a593Smuzhiyun bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40));
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /*
1217*4882a593Smuzhiyun * HT40+/HT40- limits are handled per-channel. Only limit BW if both
1218*4882a593Smuzhiyun * are not allowed.
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun if (rule->flags & NL80211_RRF_NO_HT40MINUS &&
1221*4882a593Smuzhiyun rule->flags & NL80211_RRF_NO_HT40PLUS)
1222*4882a593Smuzhiyun bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20));
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun return bw;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /* Sanity check on a regulatory rule */
is_valid_reg_rule(const struct ieee80211_reg_rule * rule)1228*4882a593Smuzhiyun static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range = &rule->freq_range;
1231*4882a593Smuzhiyun u32 freq_diff;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0)
1234*4882a593Smuzhiyun return false;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (freq_range->start_freq_khz > freq_range->end_freq_khz)
1237*4882a593Smuzhiyun return false;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
1242*4882a593Smuzhiyun freq_range->max_bandwidth_khz > freq_diff)
1243*4882a593Smuzhiyun return false;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun return true;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
is_valid_rd(const struct ieee80211_regdomain * rd)1248*4882a593Smuzhiyun static bool is_valid_rd(const struct ieee80211_regdomain *rd)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule = NULL;
1251*4882a593Smuzhiyun unsigned int i;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun if (!rd->n_reg_rules)
1254*4882a593Smuzhiyun return false;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
1257*4882a593Smuzhiyun return false;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun for (i = 0; i < rd->n_reg_rules; i++) {
1260*4882a593Smuzhiyun reg_rule = &rd->reg_rules[i];
1261*4882a593Smuzhiyun if (!is_valid_reg_rule(reg_rule))
1262*4882a593Smuzhiyun return false;
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun return true;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /**
1269*4882a593Smuzhiyun * freq_in_rule_band - tells us if a frequency is in a frequency band
1270*4882a593Smuzhiyun * @freq_range: frequency rule we want to query
1271*4882a593Smuzhiyun * @freq_khz: frequency we are inquiring about
1272*4882a593Smuzhiyun *
1273*4882a593Smuzhiyun * This lets us know if a specific frequency rule is or is not relevant to
1274*4882a593Smuzhiyun * a specific frequency's band. Bands are device specific and artificial
1275*4882a593Smuzhiyun * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
1276*4882a593Smuzhiyun * however it is safe for now to assume that a frequency rule should not be
1277*4882a593Smuzhiyun * part of a frequency's band if the start freq or end freq are off by more
1278*4882a593Smuzhiyun * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
1279*4882a593Smuzhiyun * 60 GHz band.
1280*4882a593Smuzhiyun * This resolution can be lowered and should be considered as we add
1281*4882a593Smuzhiyun * regulatory rule support for other "bands".
1282*4882a593Smuzhiyun **/
freq_in_rule_band(const struct ieee80211_freq_range * freq_range,u32 freq_khz)1283*4882a593Smuzhiyun static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
1284*4882a593Smuzhiyun u32 freq_khz)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun #define ONE_GHZ_IN_KHZ 1000000
1287*4882a593Smuzhiyun /*
1288*4882a593Smuzhiyun * From 802.11ad: directional multi-gigabit (DMG):
1289*4882a593Smuzhiyun * Pertaining to operation in a frequency band containing a channel
1290*4882a593Smuzhiyun * with the Channel starting frequency above 45 GHz.
1291*4882a593Smuzhiyun */
1292*4882a593Smuzhiyun u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
1293*4882a593Smuzhiyun 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
1294*4882a593Smuzhiyun if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
1295*4882a593Smuzhiyun return true;
1296*4882a593Smuzhiyun if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
1297*4882a593Smuzhiyun return true;
1298*4882a593Smuzhiyun return false;
1299*4882a593Smuzhiyun #undef ONE_GHZ_IN_KHZ
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /*
1303*4882a593Smuzhiyun * Later on we can perhaps use the more restrictive DFS
1304*4882a593Smuzhiyun * region but we don't have information for that yet so
1305*4882a593Smuzhiyun * for now simply disallow conflicts.
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun static enum nl80211_dfs_regions
reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,const enum nl80211_dfs_regions dfs_region2)1308*4882a593Smuzhiyun reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
1309*4882a593Smuzhiyun const enum nl80211_dfs_regions dfs_region2)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun if (dfs_region1 != dfs_region2)
1312*4882a593Smuzhiyun return NL80211_DFS_UNSET;
1313*4882a593Smuzhiyun return dfs_region1;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
reg_wmm_rules_intersect(const struct ieee80211_wmm_ac * wmm_ac1,const struct ieee80211_wmm_ac * wmm_ac2,struct ieee80211_wmm_ac * intersect)1316*4882a593Smuzhiyun static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
1317*4882a593Smuzhiyun const struct ieee80211_wmm_ac *wmm_ac2,
1318*4882a593Smuzhiyun struct ieee80211_wmm_ac *intersect)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
1321*4882a593Smuzhiyun intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
1322*4882a593Smuzhiyun intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
1323*4882a593Smuzhiyun intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /*
1327*4882a593Smuzhiyun * Helper for regdom_intersect(), this does the real
1328*4882a593Smuzhiyun * mathematical intersection fun
1329*4882a593Smuzhiyun */
reg_rules_intersect(const struct ieee80211_regdomain * rd1,const struct ieee80211_regdomain * rd2,const struct ieee80211_reg_rule * rule1,const struct ieee80211_reg_rule * rule2,struct ieee80211_reg_rule * intersected_rule)1330*4882a593Smuzhiyun static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
1331*4882a593Smuzhiyun const struct ieee80211_regdomain *rd2,
1332*4882a593Smuzhiyun const struct ieee80211_reg_rule *rule1,
1333*4882a593Smuzhiyun const struct ieee80211_reg_rule *rule2,
1334*4882a593Smuzhiyun struct ieee80211_reg_rule *intersected_rule)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range1, *freq_range2;
1337*4882a593Smuzhiyun struct ieee80211_freq_range *freq_range;
1338*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule1, *power_rule2;
1339*4882a593Smuzhiyun struct ieee80211_power_rule *power_rule;
1340*4882a593Smuzhiyun const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
1341*4882a593Smuzhiyun struct ieee80211_wmm_rule *wmm_rule;
1342*4882a593Smuzhiyun u32 freq_diff, max_bandwidth1, max_bandwidth2;
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun freq_range1 = &rule1->freq_range;
1345*4882a593Smuzhiyun freq_range2 = &rule2->freq_range;
1346*4882a593Smuzhiyun freq_range = &intersected_rule->freq_range;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun power_rule1 = &rule1->power_rule;
1349*4882a593Smuzhiyun power_rule2 = &rule2->power_rule;
1350*4882a593Smuzhiyun power_rule = &intersected_rule->power_rule;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun wmm_rule1 = &rule1->wmm_rule;
1353*4882a593Smuzhiyun wmm_rule2 = &rule2->wmm_rule;
1354*4882a593Smuzhiyun wmm_rule = &intersected_rule->wmm_rule;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
1357*4882a593Smuzhiyun freq_range2->start_freq_khz);
1358*4882a593Smuzhiyun freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
1359*4882a593Smuzhiyun freq_range2->end_freq_khz);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun max_bandwidth1 = freq_range1->max_bandwidth_khz;
1362*4882a593Smuzhiyun max_bandwidth2 = freq_range2->max_bandwidth_khz;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun if (rule1->flags & NL80211_RRF_AUTO_BW)
1365*4882a593Smuzhiyun max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1);
1366*4882a593Smuzhiyun if (rule2->flags & NL80211_RRF_AUTO_BW)
1367*4882a593Smuzhiyun max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2);
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun intersected_rule->flags = rule1->flags | rule2->flags;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun /*
1374*4882a593Smuzhiyun * In case NL80211_RRF_AUTO_BW requested for both rules
1375*4882a593Smuzhiyun * set AUTO_BW in intersected rule also. Next we will
1376*4882a593Smuzhiyun * calculate BW correctly in handle_channel function.
1377*4882a593Smuzhiyun * In other case remove AUTO_BW flag while we calculate
1378*4882a593Smuzhiyun * maximum bandwidth correctly and auto calculation is
1379*4882a593Smuzhiyun * not required.
1380*4882a593Smuzhiyun */
1381*4882a593Smuzhiyun if ((rule1->flags & NL80211_RRF_AUTO_BW) &&
1382*4882a593Smuzhiyun (rule2->flags & NL80211_RRF_AUTO_BW))
1383*4882a593Smuzhiyun intersected_rule->flags |= NL80211_RRF_AUTO_BW;
1384*4882a593Smuzhiyun else
1385*4882a593Smuzhiyun intersected_rule->flags &= ~NL80211_RRF_AUTO_BW;
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
1388*4882a593Smuzhiyun if (freq_range->max_bandwidth_khz > freq_diff)
1389*4882a593Smuzhiyun freq_range->max_bandwidth_khz = freq_diff;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun power_rule->max_eirp = min(power_rule1->max_eirp,
1392*4882a593Smuzhiyun power_rule2->max_eirp);
1393*4882a593Smuzhiyun power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain,
1394*4882a593Smuzhiyun power_rule2->max_antenna_gain);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
1397*4882a593Smuzhiyun rule2->dfs_cac_ms);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (rule1->has_wmm && rule2->has_wmm) {
1400*4882a593Smuzhiyun u8 ac;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1403*4882a593Smuzhiyun reg_wmm_rules_intersect(&wmm_rule1->client[ac],
1404*4882a593Smuzhiyun &wmm_rule2->client[ac],
1405*4882a593Smuzhiyun &wmm_rule->client[ac]);
1406*4882a593Smuzhiyun reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
1407*4882a593Smuzhiyun &wmm_rule2->ap[ac],
1408*4882a593Smuzhiyun &wmm_rule->ap[ac]);
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun intersected_rule->has_wmm = true;
1412*4882a593Smuzhiyun } else if (rule1->has_wmm) {
1413*4882a593Smuzhiyun *wmm_rule = *wmm_rule1;
1414*4882a593Smuzhiyun intersected_rule->has_wmm = true;
1415*4882a593Smuzhiyun } else if (rule2->has_wmm) {
1416*4882a593Smuzhiyun *wmm_rule = *wmm_rule2;
1417*4882a593Smuzhiyun intersected_rule->has_wmm = true;
1418*4882a593Smuzhiyun } else {
1419*4882a593Smuzhiyun intersected_rule->has_wmm = false;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun if (!is_valid_reg_rule(intersected_rule))
1423*4882a593Smuzhiyun return -EINVAL;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun return 0;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun /* check whether old rule contains new rule */
rule_contains(struct ieee80211_reg_rule * r1,struct ieee80211_reg_rule * r2)1429*4882a593Smuzhiyun static bool rule_contains(struct ieee80211_reg_rule *r1,
1430*4882a593Smuzhiyun struct ieee80211_reg_rule *r2)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun /* for simplicity, currently consider only same flags */
1433*4882a593Smuzhiyun if (r1->flags != r2->flags)
1434*4882a593Smuzhiyun return false;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /* verify r1 is more restrictive */
1437*4882a593Smuzhiyun if ((r1->power_rule.max_antenna_gain >
1438*4882a593Smuzhiyun r2->power_rule.max_antenna_gain) ||
1439*4882a593Smuzhiyun r1->power_rule.max_eirp > r2->power_rule.max_eirp)
1440*4882a593Smuzhiyun return false;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /* make sure r2's range is contained within r1 */
1443*4882a593Smuzhiyun if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz ||
1444*4882a593Smuzhiyun r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz)
1445*4882a593Smuzhiyun return false;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun /* and finally verify that r1.max_bw >= r2.max_bw */
1448*4882a593Smuzhiyun if (r1->freq_range.max_bandwidth_khz <
1449*4882a593Smuzhiyun r2->freq_range.max_bandwidth_khz)
1450*4882a593Smuzhiyun return false;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun return true;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /* add or extend current rules. do nothing if rule is already contained */
add_rule(struct ieee80211_reg_rule * rule,struct ieee80211_reg_rule * reg_rules,u32 * n_rules)1456*4882a593Smuzhiyun static void add_rule(struct ieee80211_reg_rule *rule,
1457*4882a593Smuzhiyun struct ieee80211_reg_rule *reg_rules, u32 *n_rules)
1458*4882a593Smuzhiyun {
1459*4882a593Smuzhiyun struct ieee80211_reg_rule *tmp_rule;
1460*4882a593Smuzhiyun int i;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun for (i = 0; i < *n_rules; i++) {
1463*4882a593Smuzhiyun tmp_rule = ®_rules[i];
1464*4882a593Smuzhiyun /* rule is already contained - do nothing */
1465*4882a593Smuzhiyun if (rule_contains(tmp_rule, rule))
1466*4882a593Smuzhiyun return;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /* extend rule if possible */
1469*4882a593Smuzhiyun if (rule_contains(rule, tmp_rule)) {
1470*4882a593Smuzhiyun memcpy(tmp_rule, rule, sizeof(*rule));
1471*4882a593Smuzhiyun return;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun memcpy(®_rules[*n_rules], rule, sizeof(*rule));
1476*4882a593Smuzhiyun (*n_rules)++;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun /**
1480*4882a593Smuzhiyun * regdom_intersect - do the intersection between two regulatory domains
1481*4882a593Smuzhiyun * @rd1: first regulatory domain
1482*4882a593Smuzhiyun * @rd2: second regulatory domain
1483*4882a593Smuzhiyun *
1484*4882a593Smuzhiyun * Use this function to get the intersection between two regulatory domains.
1485*4882a593Smuzhiyun * Once completed we will mark the alpha2 for the rd as intersected, "98",
1486*4882a593Smuzhiyun * as no one single alpha2 can represent this regulatory domain.
1487*4882a593Smuzhiyun *
1488*4882a593Smuzhiyun * Returns a pointer to the regulatory domain structure which will hold the
1489*4882a593Smuzhiyun * resulting intersection of rules between rd1 and rd2. We will
1490*4882a593Smuzhiyun * kzalloc() this structure for you.
1491*4882a593Smuzhiyun */
1492*4882a593Smuzhiyun static struct ieee80211_regdomain *
regdom_intersect(const struct ieee80211_regdomain * rd1,const struct ieee80211_regdomain * rd2)1493*4882a593Smuzhiyun regdom_intersect(const struct ieee80211_regdomain *rd1,
1494*4882a593Smuzhiyun const struct ieee80211_regdomain *rd2)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun int r;
1497*4882a593Smuzhiyun unsigned int x, y;
1498*4882a593Smuzhiyun unsigned int num_rules = 0;
1499*4882a593Smuzhiyun const struct ieee80211_reg_rule *rule1, *rule2;
1500*4882a593Smuzhiyun struct ieee80211_reg_rule intersected_rule;
1501*4882a593Smuzhiyun struct ieee80211_regdomain *rd;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun if (!rd1 || !rd2)
1504*4882a593Smuzhiyun return NULL;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /*
1507*4882a593Smuzhiyun * First we get a count of the rules we'll need, then we actually
1508*4882a593Smuzhiyun * build them. This is to so we can malloc() and free() a
1509*4882a593Smuzhiyun * regdomain once. The reason we use reg_rules_intersect() here
1510*4882a593Smuzhiyun * is it will return -EINVAL if the rule computed makes no sense.
1511*4882a593Smuzhiyun * All rules that do check out OK are valid.
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun for (x = 0; x < rd1->n_reg_rules; x++) {
1515*4882a593Smuzhiyun rule1 = &rd1->reg_rules[x];
1516*4882a593Smuzhiyun for (y = 0; y < rd2->n_reg_rules; y++) {
1517*4882a593Smuzhiyun rule2 = &rd2->reg_rules[y];
1518*4882a593Smuzhiyun if (!reg_rules_intersect(rd1, rd2, rule1, rule2,
1519*4882a593Smuzhiyun &intersected_rule))
1520*4882a593Smuzhiyun num_rules++;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (!num_rules)
1525*4882a593Smuzhiyun return NULL;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun rd = kzalloc(struct_size(rd, reg_rules, num_rules), GFP_KERNEL);
1528*4882a593Smuzhiyun if (!rd)
1529*4882a593Smuzhiyun return NULL;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun for (x = 0; x < rd1->n_reg_rules; x++) {
1532*4882a593Smuzhiyun rule1 = &rd1->reg_rules[x];
1533*4882a593Smuzhiyun for (y = 0; y < rd2->n_reg_rules; y++) {
1534*4882a593Smuzhiyun rule2 = &rd2->reg_rules[y];
1535*4882a593Smuzhiyun r = reg_rules_intersect(rd1, rd2, rule1, rule2,
1536*4882a593Smuzhiyun &intersected_rule);
1537*4882a593Smuzhiyun /*
1538*4882a593Smuzhiyun * No need to memset here the intersected rule here as
1539*4882a593Smuzhiyun * we're not using the stack anymore
1540*4882a593Smuzhiyun */
1541*4882a593Smuzhiyun if (r)
1542*4882a593Smuzhiyun continue;
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun add_rule(&intersected_rule, rd->reg_rules,
1545*4882a593Smuzhiyun &rd->n_reg_rules);
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun rd->alpha2[0] = '9';
1550*4882a593Smuzhiyun rd->alpha2[1] = '8';
1551*4882a593Smuzhiyun rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region,
1552*4882a593Smuzhiyun rd2->dfs_region);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun return rd;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /*
1558*4882a593Smuzhiyun * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
1559*4882a593Smuzhiyun * want to just have the channel structure use these
1560*4882a593Smuzhiyun */
map_regdom_flags(u32 rd_flags)1561*4882a593Smuzhiyun static u32 map_regdom_flags(u32 rd_flags)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun u32 channel_flags = 0;
1564*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_IR_ALL)
1565*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_IR;
1566*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_DFS)
1567*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_RADAR;
1568*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_OFDM)
1569*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_OFDM;
1570*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_OUTDOOR)
1571*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
1572*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_IR_CONCURRENT)
1573*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_IR_CONCURRENT;
1574*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_HT40MINUS)
1575*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_HT40MINUS;
1576*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_HT40PLUS)
1577*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_HT40PLUS;
1578*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_80MHZ)
1579*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_80MHZ;
1580*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_160MHZ)
1581*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_160MHZ;
1582*4882a593Smuzhiyun if (rd_flags & NL80211_RRF_NO_HE)
1583*4882a593Smuzhiyun channel_flags |= IEEE80211_CHAN_NO_HE;
1584*4882a593Smuzhiyun return channel_flags;
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun static const struct ieee80211_reg_rule *
freq_reg_info_regd(u32 center_freq,const struct ieee80211_regdomain * regd,u32 bw)1588*4882a593Smuzhiyun freq_reg_info_regd(u32 center_freq,
1589*4882a593Smuzhiyun const struct ieee80211_regdomain *regd, u32 bw)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun int i;
1592*4882a593Smuzhiyun bool band_rule_found = false;
1593*4882a593Smuzhiyun bool bw_fits = false;
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun if (!regd)
1596*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun for (i = 0; i < regd->n_reg_rules; i++) {
1599*4882a593Smuzhiyun const struct ieee80211_reg_rule *rr;
1600*4882a593Smuzhiyun const struct ieee80211_freq_range *fr = NULL;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun rr = ®d->reg_rules[i];
1603*4882a593Smuzhiyun fr = &rr->freq_range;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun /*
1606*4882a593Smuzhiyun * We only need to know if one frequency rule was
1607*4882a593Smuzhiyun * in center_freq's band, that's enough, so let's
1608*4882a593Smuzhiyun * not overwrite it once found
1609*4882a593Smuzhiyun */
1610*4882a593Smuzhiyun if (!band_rule_found)
1611*4882a593Smuzhiyun band_rule_found = freq_in_rule_band(fr, center_freq);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun if (band_rule_found && bw_fits)
1616*4882a593Smuzhiyun return rr;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun if (!band_rule_found)
1620*4882a593Smuzhiyun return ERR_PTR(-ERANGE);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun static const struct ieee80211_reg_rule *
__freq_reg_info(struct wiphy * wiphy,u32 center_freq,u32 min_bw)1626*4882a593Smuzhiyun __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
1629*4882a593Smuzhiyun const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20};
1630*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule;
1631*4882a593Smuzhiyun int i = ARRAY_SIZE(bws) - 1;
1632*4882a593Smuzhiyun u32 bw;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun for (bw = MHZ_TO_KHZ(bws[i]); bw >= min_bw; bw = MHZ_TO_KHZ(bws[i--])) {
1635*4882a593Smuzhiyun reg_rule = freq_reg_info_regd(center_freq, regd, bw);
1636*4882a593Smuzhiyun if (!IS_ERR(reg_rule))
1637*4882a593Smuzhiyun return reg_rule;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun return reg_rule;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun
freq_reg_info(struct wiphy * wiphy,u32 center_freq)1643*4882a593Smuzhiyun const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
1644*4882a593Smuzhiyun u32 center_freq)
1645*4882a593Smuzhiyun {
1646*4882a593Smuzhiyun u32 min_bw = center_freq < MHZ_TO_KHZ(1000) ? 1 : 20;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(min_bw));
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun EXPORT_SYMBOL(freq_reg_info);
1651*4882a593Smuzhiyun
reg_initiator_name(enum nl80211_reg_initiator initiator)1652*4882a593Smuzhiyun const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun switch (initiator) {
1655*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_CORE:
1656*4882a593Smuzhiyun return "core";
1657*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_USER:
1658*4882a593Smuzhiyun return "user";
1659*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_DRIVER:
1660*4882a593Smuzhiyun return "driver";
1661*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1662*4882a593Smuzhiyun return "country element";
1663*4882a593Smuzhiyun default:
1664*4882a593Smuzhiyun WARN_ON(1);
1665*4882a593Smuzhiyun return "bug";
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun EXPORT_SYMBOL(reg_initiator_name);
1669*4882a593Smuzhiyun
reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain * regd,const struct ieee80211_reg_rule * reg_rule,const struct ieee80211_channel * chan)1670*4882a593Smuzhiyun static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd,
1671*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule,
1672*4882a593Smuzhiyun const struct ieee80211_channel *chan)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range = NULL;
1675*4882a593Smuzhiyun u32 max_bandwidth_khz, center_freq_khz, bw_flags = 0;
1676*4882a593Smuzhiyun bool is_s1g = chan->band == NL80211_BAND_S1GHZ;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun freq_range = ®_rule->freq_range;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun max_bandwidth_khz = freq_range->max_bandwidth_khz;
1681*4882a593Smuzhiyun center_freq_khz = ieee80211_channel_to_khz(chan);
1682*4882a593Smuzhiyun /* Check if auto calculation requested */
1683*4882a593Smuzhiyun if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1684*4882a593Smuzhiyun max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun /* If we get a reg_rule we can assume that at least 5Mhz fit */
1687*4882a593Smuzhiyun if (!cfg80211_does_bw_fit_range(freq_range,
1688*4882a593Smuzhiyun center_freq_khz,
1689*4882a593Smuzhiyun MHZ_TO_KHZ(10)))
1690*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_10MHZ;
1691*4882a593Smuzhiyun if (!cfg80211_does_bw_fit_range(freq_range,
1692*4882a593Smuzhiyun center_freq_khz,
1693*4882a593Smuzhiyun MHZ_TO_KHZ(20)))
1694*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun if (is_s1g) {
1697*4882a593Smuzhiyun /* S1G is strict about non overlapping channels. We can
1698*4882a593Smuzhiyun * calculate which bandwidth is allowed per channel by finding
1699*4882a593Smuzhiyun * the largest bandwidth which cleanly divides the freq_range.
1700*4882a593Smuzhiyun */
1701*4882a593Smuzhiyun int edge_offset;
1702*4882a593Smuzhiyun int ch_bw = max_bandwidth_khz;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun while (ch_bw) {
1705*4882a593Smuzhiyun edge_offset = (center_freq_khz - ch_bw / 2) -
1706*4882a593Smuzhiyun freq_range->start_freq_khz;
1707*4882a593Smuzhiyun if (edge_offset % ch_bw == 0) {
1708*4882a593Smuzhiyun switch (KHZ_TO_MHZ(ch_bw)) {
1709*4882a593Smuzhiyun case 1:
1710*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_1MHZ;
1711*4882a593Smuzhiyun break;
1712*4882a593Smuzhiyun case 2:
1713*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_2MHZ;
1714*4882a593Smuzhiyun break;
1715*4882a593Smuzhiyun case 4:
1716*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_4MHZ;
1717*4882a593Smuzhiyun break;
1718*4882a593Smuzhiyun case 8:
1719*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_8MHZ;
1720*4882a593Smuzhiyun break;
1721*4882a593Smuzhiyun case 16:
1722*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_16MHZ;
1723*4882a593Smuzhiyun break;
1724*4882a593Smuzhiyun default:
1725*4882a593Smuzhiyun /* If we got here, no bandwidths fit on
1726*4882a593Smuzhiyun * this frequency, ie. band edge.
1727*4882a593Smuzhiyun */
1728*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_DISABLED;
1729*4882a593Smuzhiyun break;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun break;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun ch_bw /= 2;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun } else {
1736*4882a593Smuzhiyun if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1737*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_10MHZ;
1738*4882a593Smuzhiyun if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1739*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1740*4882a593Smuzhiyun if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1741*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_HT40;
1742*4882a593Smuzhiyun if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1743*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1744*4882a593Smuzhiyun if (max_bandwidth_khz < MHZ_TO_KHZ(160))
1745*4882a593Smuzhiyun bw_flags |= IEEE80211_CHAN_NO_160MHZ;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun return bw_flags;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
handle_channel_single_rule(struct wiphy * wiphy,enum nl80211_reg_initiator initiator,struct ieee80211_channel * chan,u32 flags,struct regulatory_request * lr,struct wiphy * request_wiphy,const struct ieee80211_reg_rule * reg_rule)1750*4882a593Smuzhiyun static void handle_channel_single_rule(struct wiphy *wiphy,
1751*4882a593Smuzhiyun enum nl80211_reg_initiator initiator,
1752*4882a593Smuzhiyun struct ieee80211_channel *chan,
1753*4882a593Smuzhiyun u32 flags,
1754*4882a593Smuzhiyun struct regulatory_request *lr,
1755*4882a593Smuzhiyun struct wiphy *request_wiphy,
1756*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun u32 bw_flags = 0;
1759*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule = NULL;
1760*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun regd = reg_get_regdomain(wiphy);
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun power_rule = ®_rule->power_rule;
1765*4882a593Smuzhiyun bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
1768*4882a593Smuzhiyun request_wiphy && request_wiphy == wiphy &&
1769*4882a593Smuzhiyun request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
1770*4882a593Smuzhiyun /*
1771*4882a593Smuzhiyun * This guarantees the driver's requested regulatory domain
1772*4882a593Smuzhiyun * will always be used as a base for further regulatory
1773*4882a593Smuzhiyun * settings
1774*4882a593Smuzhiyun */
1775*4882a593Smuzhiyun chan->flags = chan->orig_flags =
1776*4882a593Smuzhiyun map_regdom_flags(reg_rule->flags) | bw_flags;
1777*4882a593Smuzhiyun chan->max_antenna_gain = chan->orig_mag =
1778*4882a593Smuzhiyun (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1779*4882a593Smuzhiyun chan->max_reg_power = chan->max_power = chan->orig_mpwr =
1780*4882a593Smuzhiyun (int) MBM_TO_DBM(power_rule->max_eirp);
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_RADAR) {
1783*4882a593Smuzhiyun chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1784*4882a593Smuzhiyun if (reg_rule->dfs_cac_ms)
1785*4882a593Smuzhiyun chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun return;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun chan->dfs_state = NL80211_DFS_USABLE;
1792*4882a593Smuzhiyun chan->dfs_state_entered = jiffies;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun chan->beacon_found = false;
1795*4882a593Smuzhiyun chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
1796*4882a593Smuzhiyun chan->max_antenna_gain =
1797*4882a593Smuzhiyun min_t(int, chan->orig_mag,
1798*4882a593Smuzhiyun MBI_TO_DBI(power_rule->max_antenna_gain));
1799*4882a593Smuzhiyun chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_RADAR) {
1802*4882a593Smuzhiyun if (reg_rule->dfs_cac_ms)
1803*4882a593Smuzhiyun chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
1804*4882a593Smuzhiyun else
1805*4882a593Smuzhiyun chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (chan->orig_mpwr) {
1809*4882a593Smuzhiyun /*
1810*4882a593Smuzhiyun * Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER
1811*4882a593Smuzhiyun * will always follow the passed country IE power settings.
1812*4882a593Smuzhiyun */
1813*4882a593Smuzhiyun if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1814*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER)
1815*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
1816*4882a593Smuzhiyun else
1817*4882a593Smuzhiyun chan->max_power = min(chan->orig_mpwr,
1818*4882a593Smuzhiyun chan->max_reg_power);
1819*4882a593Smuzhiyun } else
1820*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
handle_channel_adjacent_rules(struct wiphy * wiphy,enum nl80211_reg_initiator initiator,struct ieee80211_channel * chan,u32 flags,struct regulatory_request * lr,struct wiphy * request_wiphy,const struct ieee80211_reg_rule * rrule1,const struct ieee80211_reg_rule * rrule2,struct ieee80211_freq_range * comb_range)1823*4882a593Smuzhiyun static void handle_channel_adjacent_rules(struct wiphy *wiphy,
1824*4882a593Smuzhiyun enum nl80211_reg_initiator initiator,
1825*4882a593Smuzhiyun struct ieee80211_channel *chan,
1826*4882a593Smuzhiyun u32 flags,
1827*4882a593Smuzhiyun struct regulatory_request *lr,
1828*4882a593Smuzhiyun struct wiphy *request_wiphy,
1829*4882a593Smuzhiyun const struct ieee80211_reg_rule *rrule1,
1830*4882a593Smuzhiyun const struct ieee80211_reg_rule *rrule2,
1831*4882a593Smuzhiyun struct ieee80211_freq_range *comb_range)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun u32 bw_flags1 = 0;
1834*4882a593Smuzhiyun u32 bw_flags2 = 0;
1835*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule1 = NULL;
1836*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule2 = NULL;
1837*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun regd = reg_get_regdomain(wiphy);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun power_rule1 = &rrule1->power_rule;
1842*4882a593Smuzhiyun power_rule2 = &rrule2->power_rule;
1843*4882a593Smuzhiyun bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan);
1844*4882a593Smuzhiyun bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan);
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
1847*4882a593Smuzhiyun request_wiphy && request_wiphy == wiphy &&
1848*4882a593Smuzhiyun request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
1849*4882a593Smuzhiyun /* This guarantees the driver's requested regulatory domain
1850*4882a593Smuzhiyun * will always be used as a base for further regulatory
1851*4882a593Smuzhiyun * settings
1852*4882a593Smuzhiyun */
1853*4882a593Smuzhiyun chan->flags =
1854*4882a593Smuzhiyun map_regdom_flags(rrule1->flags) |
1855*4882a593Smuzhiyun map_regdom_flags(rrule2->flags) |
1856*4882a593Smuzhiyun bw_flags1 |
1857*4882a593Smuzhiyun bw_flags2;
1858*4882a593Smuzhiyun chan->orig_flags = chan->flags;
1859*4882a593Smuzhiyun chan->max_antenna_gain =
1860*4882a593Smuzhiyun min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain),
1861*4882a593Smuzhiyun MBI_TO_DBI(power_rule2->max_antenna_gain));
1862*4882a593Smuzhiyun chan->orig_mag = chan->max_antenna_gain;
1863*4882a593Smuzhiyun chan->max_reg_power =
1864*4882a593Smuzhiyun min_t(int, MBM_TO_DBM(power_rule1->max_eirp),
1865*4882a593Smuzhiyun MBM_TO_DBM(power_rule2->max_eirp));
1866*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
1867*4882a593Smuzhiyun chan->orig_mpwr = chan->max_reg_power;
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_RADAR) {
1870*4882a593Smuzhiyun chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1871*4882a593Smuzhiyun if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms)
1872*4882a593Smuzhiyun chan->dfs_cac_ms = max_t(unsigned int,
1873*4882a593Smuzhiyun rrule1->dfs_cac_ms,
1874*4882a593Smuzhiyun rrule2->dfs_cac_ms);
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun return;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun chan->dfs_state = NL80211_DFS_USABLE;
1881*4882a593Smuzhiyun chan->dfs_state_entered = jiffies;
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun chan->beacon_found = false;
1884*4882a593Smuzhiyun chan->flags = flags | bw_flags1 | bw_flags2 |
1885*4882a593Smuzhiyun map_regdom_flags(rrule1->flags) |
1886*4882a593Smuzhiyun map_regdom_flags(rrule2->flags);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz
1889*4882a593Smuzhiyun * (otherwise no adj. rule case), recheck therefore
1890*4882a593Smuzhiyun */
1891*4882a593Smuzhiyun if (cfg80211_does_bw_fit_range(comb_range,
1892*4882a593Smuzhiyun ieee80211_channel_to_khz(chan),
1893*4882a593Smuzhiyun MHZ_TO_KHZ(10)))
1894*4882a593Smuzhiyun chan->flags &= ~IEEE80211_CHAN_NO_10MHZ;
1895*4882a593Smuzhiyun if (cfg80211_does_bw_fit_range(comb_range,
1896*4882a593Smuzhiyun ieee80211_channel_to_khz(chan),
1897*4882a593Smuzhiyun MHZ_TO_KHZ(20)))
1898*4882a593Smuzhiyun chan->flags &= ~IEEE80211_CHAN_NO_20MHZ;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun chan->max_antenna_gain =
1901*4882a593Smuzhiyun min_t(int, chan->orig_mag,
1902*4882a593Smuzhiyun min_t(int,
1903*4882a593Smuzhiyun MBI_TO_DBI(power_rule1->max_antenna_gain),
1904*4882a593Smuzhiyun MBI_TO_DBI(power_rule2->max_antenna_gain)));
1905*4882a593Smuzhiyun chan->max_reg_power = min_t(int,
1906*4882a593Smuzhiyun MBM_TO_DBM(power_rule1->max_eirp),
1907*4882a593Smuzhiyun MBM_TO_DBM(power_rule2->max_eirp));
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_RADAR) {
1910*4882a593Smuzhiyun if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms)
1911*4882a593Smuzhiyun chan->dfs_cac_ms = max_t(unsigned int,
1912*4882a593Smuzhiyun rrule1->dfs_cac_ms,
1913*4882a593Smuzhiyun rrule2->dfs_cac_ms);
1914*4882a593Smuzhiyun else
1915*4882a593Smuzhiyun chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun if (chan->orig_mpwr) {
1919*4882a593Smuzhiyun /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER
1920*4882a593Smuzhiyun * will always follow the passed country IE power settings.
1921*4882a593Smuzhiyun */
1922*4882a593Smuzhiyun if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1923*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER)
1924*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
1925*4882a593Smuzhiyun else
1926*4882a593Smuzhiyun chan->max_power = min(chan->orig_mpwr,
1927*4882a593Smuzhiyun chan->max_reg_power);
1928*4882a593Smuzhiyun } else {
1929*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* Note that right now we assume the desired channel bandwidth
1934*4882a593Smuzhiyun * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1935*4882a593Smuzhiyun * per channel, the primary and the extension channel).
1936*4882a593Smuzhiyun */
handle_channel(struct wiphy * wiphy,enum nl80211_reg_initiator initiator,struct ieee80211_channel * chan)1937*4882a593Smuzhiyun static void handle_channel(struct wiphy *wiphy,
1938*4882a593Smuzhiyun enum nl80211_reg_initiator initiator,
1939*4882a593Smuzhiyun struct ieee80211_channel *chan)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun const u32 orig_chan_freq = ieee80211_channel_to_khz(chan);
1942*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
1943*4882a593Smuzhiyun struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
1944*4882a593Smuzhiyun const struct ieee80211_reg_rule *rrule = NULL;
1945*4882a593Smuzhiyun const struct ieee80211_reg_rule *rrule1 = NULL;
1946*4882a593Smuzhiyun const struct ieee80211_reg_rule *rrule2 = NULL;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun u32 flags = chan->orig_flags;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun rrule = freq_reg_info(wiphy, orig_chan_freq);
1951*4882a593Smuzhiyun if (IS_ERR(rrule)) {
1952*4882a593Smuzhiyun /* check for adjacent match, therefore get rules for
1953*4882a593Smuzhiyun * chan - 20 MHz and chan + 20 MHz and test
1954*4882a593Smuzhiyun * if reg rules are adjacent
1955*4882a593Smuzhiyun */
1956*4882a593Smuzhiyun rrule1 = freq_reg_info(wiphy,
1957*4882a593Smuzhiyun orig_chan_freq - MHZ_TO_KHZ(20));
1958*4882a593Smuzhiyun rrule2 = freq_reg_info(wiphy,
1959*4882a593Smuzhiyun orig_chan_freq + MHZ_TO_KHZ(20));
1960*4882a593Smuzhiyun if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) {
1961*4882a593Smuzhiyun struct ieee80211_freq_range comb_range;
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (rrule1->freq_range.end_freq_khz !=
1964*4882a593Smuzhiyun rrule2->freq_range.start_freq_khz)
1965*4882a593Smuzhiyun goto disable_chan;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun comb_range.start_freq_khz =
1968*4882a593Smuzhiyun rrule1->freq_range.start_freq_khz;
1969*4882a593Smuzhiyun comb_range.end_freq_khz =
1970*4882a593Smuzhiyun rrule2->freq_range.end_freq_khz;
1971*4882a593Smuzhiyun comb_range.max_bandwidth_khz =
1972*4882a593Smuzhiyun min_t(u32,
1973*4882a593Smuzhiyun rrule1->freq_range.max_bandwidth_khz,
1974*4882a593Smuzhiyun rrule2->freq_range.max_bandwidth_khz);
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun if (!cfg80211_does_bw_fit_range(&comb_range,
1977*4882a593Smuzhiyun orig_chan_freq,
1978*4882a593Smuzhiyun MHZ_TO_KHZ(20)))
1979*4882a593Smuzhiyun goto disable_chan;
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun handle_channel_adjacent_rules(wiphy, initiator, chan,
1982*4882a593Smuzhiyun flags, lr, request_wiphy,
1983*4882a593Smuzhiyun rrule1, rrule2,
1984*4882a593Smuzhiyun &comb_range);
1985*4882a593Smuzhiyun return;
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun disable_chan:
1989*4882a593Smuzhiyun /* We will disable all channels that do not match our
1990*4882a593Smuzhiyun * received regulatory rule unless the hint is coming
1991*4882a593Smuzhiyun * from a Country IE and the Country IE had no information
1992*4882a593Smuzhiyun * about a band. The IEEE 802.11 spec allows for an AP
1993*4882a593Smuzhiyun * to send only a subset of the regulatory rules allowed,
1994*4882a593Smuzhiyun * so an AP in the US that only supports 2.4 GHz may only send
1995*4882a593Smuzhiyun * a country IE with information for the 2.4 GHz band
1996*4882a593Smuzhiyun * while 5 GHz is still supported.
1997*4882a593Smuzhiyun */
1998*4882a593Smuzhiyun if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1999*4882a593Smuzhiyun PTR_ERR(rrule) == -ERANGE)
2000*4882a593Smuzhiyun return;
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
2003*4882a593Smuzhiyun request_wiphy && request_wiphy == wiphy &&
2004*4882a593Smuzhiyun request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
2005*4882a593Smuzhiyun pr_debug("Disabling freq %d.%03d MHz for good\n",
2006*4882a593Smuzhiyun chan->center_freq, chan->freq_offset);
2007*4882a593Smuzhiyun chan->orig_flags |= IEEE80211_CHAN_DISABLED;
2008*4882a593Smuzhiyun chan->flags = chan->orig_flags;
2009*4882a593Smuzhiyun } else {
2010*4882a593Smuzhiyun pr_debug("Disabling freq %d.%03d MHz\n",
2011*4882a593Smuzhiyun chan->center_freq, chan->freq_offset);
2012*4882a593Smuzhiyun chan->flags |= IEEE80211_CHAN_DISABLED;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun return;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun handle_channel_single_rule(wiphy, initiator, chan, flags, lr,
2018*4882a593Smuzhiyun request_wiphy, rrule);
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
handle_band(struct wiphy * wiphy,enum nl80211_reg_initiator initiator,struct ieee80211_supported_band * sband)2021*4882a593Smuzhiyun static void handle_band(struct wiphy *wiphy,
2022*4882a593Smuzhiyun enum nl80211_reg_initiator initiator,
2023*4882a593Smuzhiyun struct ieee80211_supported_band *sband)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun unsigned int i;
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun if (!sband)
2028*4882a593Smuzhiyun return;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++)
2031*4882a593Smuzhiyun handle_channel(wiphy, initiator, &sband->channels[i]);
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun
reg_request_cell_base(struct regulatory_request * request)2034*4882a593Smuzhiyun static bool reg_request_cell_base(struct regulatory_request *request)
2035*4882a593Smuzhiyun {
2036*4882a593Smuzhiyun if (request->initiator != NL80211_REGDOM_SET_BY_USER)
2037*4882a593Smuzhiyun return false;
2038*4882a593Smuzhiyun return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun
reg_last_request_cell_base(void)2041*4882a593Smuzhiyun bool reg_last_request_cell_base(void)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun return reg_request_cell_base(get_last_request());
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun #ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS
2047*4882a593Smuzhiyun /* Core specific check */
2048*4882a593Smuzhiyun static enum reg_request_treatment
reg_ignore_cell_hint(struct regulatory_request * pending_request)2049*4882a593Smuzhiyun reg_ignore_cell_hint(struct regulatory_request *pending_request)
2050*4882a593Smuzhiyun {
2051*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun if (!reg_num_devs_support_basehint)
2054*4882a593Smuzhiyun return REG_REQ_IGNORE;
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun if (reg_request_cell_base(lr) &&
2057*4882a593Smuzhiyun !regdom_changes(pending_request->alpha2))
2058*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun return REG_REQ_OK;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun /* Device specific check */
reg_dev_ignore_cell_hint(struct wiphy * wiphy)2064*4882a593Smuzhiyun static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS);
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun #else
2069*4882a593Smuzhiyun static enum reg_request_treatment
reg_ignore_cell_hint(struct regulatory_request * pending_request)2070*4882a593Smuzhiyun reg_ignore_cell_hint(struct regulatory_request *pending_request)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun return REG_REQ_IGNORE;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
reg_dev_ignore_cell_hint(struct wiphy * wiphy)2075*4882a593Smuzhiyun static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
2076*4882a593Smuzhiyun {
2077*4882a593Smuzhiyun return true;
2078*4882a593Smuzhiyun }
2079*4882a593Smuzhiyun #endif
2080*4882a593Smuzhiyun
wiphy_strict_alpha2_regd(struct wiphy * wiphy)2081*4882a593Smuzhiyun static bool wiphy_strict_alpha2_regd(struct wiphy *wiphy)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_STRICT_REG &&
2084*4882a593Smuzhiyun !(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG))
2085*4882a593Smuzhiyun return true;
2086*4882a593Smuzhiyun return false;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun
ignore_reg_update(struct wiphy * wiphy,enum nl80211_reg_initiator initiator)2089*4882a593Smuzhiyun static bool ignore_reg_update(struct wiphy *wiphy,
2090*4882a593Smuzhiyun enum nl80211_reg_initiator initiator)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
2095*4882a593Smuzhiyun return true;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun if (!lr) {
2098*4882a593Smuzhiyun pr_debug("Ignoring regulatory request set by %s since last_request is not set\n",
2099*4882a593Smuzhiyun reg_initiator_name(initiator));
2100*4882a593Smuzhiyun return true;
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun if (initiator == NL80211_REGDOM_SET_BY_CORE &&
2104*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
2105*4882a593Smuzhiyun pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n",
2106*4882a593Smuzhiyun reg_initiator_name(initiator));
2107*4882a593Smuzhiyun return true;
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun /*
2111*4882a593Smuzhiyun * wiphy->regd will be set once the device has its own
2112*4882a593Smuzhiyun * desired regulatory domain set
2113*4882a593Smuzhiyun */
2114*4882a593Smuzhiyun if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd &&
2115*4882a593Smuzhiyun initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
2116*4882a593Smuzhiyun !is_world_regdom(lr->alpha2)) {
2117*4882a593Smuzhiyun pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n",
2118*4882a593Smuzhiyun reg_initiator_name(initiator));
2119*4882a593Smuzhiyun return true;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun if (reg_request_cell_base(lr))
2123*4882a593Smuzhiyun return reg_dev_ignore_cell_hint(wiphy);
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun return false;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun
reg_is_world_roaming(struct wiphy * wiphy)2128*4882a593Smuzhiyun static bool reg_is_world_roaming(struct wiphy *wiphy)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun const struct ieee80211_regdomain *cr = get_cfg80211_regdom();
2131*4882a593Smuzhiyun const struct ieee80211_regdomain *wr = get_wiphy_regdom(wiphy);
2132*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun if (is_world_regdom(cr->alpha2) || (wr && is_world_regdom(wr->alpha2)))
2135*4882a593Smuzhiyun return true;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun if (lr && lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
2138*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
2139*4882a593Smuzhiyun return true;
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun return false;
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun
handle_reg_beacon(struct wiphy * wiphy,unsigned int chan_idx,struct reg_beacon * reg_beacon)2144*4882a593Smuzhiyun static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
2145*4882a593Smuzhiyun struct reg_beacon *reg_beacon)
2146*4882a593Smuzhiyun {
2147*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
2148*4882a593Smuzhiyun struct ieee80211_channel *chan;
2149*4882a593Smuzhiyun bool channel_changed = false;
2150*4882a593Smuzhiyun struct ieee80211_channel chan_before;
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun sband = wiphy->bands[reg_beacon->chan.band];
2153*4882a593Smuzhiyun chan = &sband->channels[chan_idx];
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun if (likely(!ieee80211_channel_equal(chan, ®_beacon->chan)))
2156*4882a593Smuzhiyun return;
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun if (chan->beacon_found)
2159*4882a593Smuzhiyun return;
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun chan->beacon_found = true;
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun if (!reg_is_world_roaming(wiphy))
2164*4882a593Smuzhiyun return;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
2167*4882a593Smuzhiyun return;
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun chan_before = *chan;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_NO_IR) {
2172*4882a593Smuzhiyun chan->flags &= ~IEEE80211_CHAN_NO_IR;
2173*4882a593Smuzhiyun channel_changed = true;
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun if (channel_changed)
2177*4882a593Smuzhiyun nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun /*
2181*4882a593Smuzhiyun * Called when a scan on a wiphy finds a beacon on
2182*4882a593Smuzhiyun * new channel
2183*4882a593Smuzhiyun */
wiphy_update_new_beacon(struct wiphy * wiphy,struct reg_beacon * reg_beacon)2184*4882a593Smuzhiyun static void wiphy_update_new_beacon(struct wiphy *wiphy,
2185*4882a593Smuzhiyun struct reg_beacon *reg_beacon)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun unsigned int i;
2188*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun if (!wiphy->bands[reg_beacon->chan.band])
2191*4882a593Smuzhiyun return;
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun sband = wiphy->bands[reg_beacon->chan.band];
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++)
2196*4882a593Smuzhiyun handle_reg_beacon(wiphy, i, reg_beacon);
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun /*
2200*4882a593Smuzhiyun * Called upon reg changes or a new wiphy is added
2201*4882a593Smuzhiyun */
wiphy_update_beacon_reg(struct wiphy * wiphy)2202*4882a593Smuzhiyun static void wiphy_update_beacon_reg(struct wiphy *wiphy)
2203*4882a593Smuzhiyun {
2204*4882a593Smuzhiyun unsigned int i;
2205*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
2206*4882a593Smuzhiyun struct reg_beacon *reg_beacon;
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun list_for_each_entry(reg_beacon, ®_beacon_list, list) {
2209*4882a593Smuzhiyun if (!wiphy->bands[reg_beacon->chan.band])
2210*4882a593Smuzhiyun continue;
2211*4882a593Smuzhiyun sband = wiphy->bands[reg_beacon->chan.band];
2212*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++)
2213*4882a593Smuzhiyun handle_reg_beacon(wiphy, i, reg_beacon);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun /* Reap the advantages of previously found beacons */
reg_process_beacons(struct wiphy * wiphy)2218*4882a593Smuzhiyun static void reg_process_beacons(struct wiphy *wiphy)
2219*4882a593Smuzhiyun {
2220*4882a593Smuzhiyun /*
2221*4882a593Smuzhiyun * Means we are just firing up cfg80211, so no beacons would
2222*4882a593Smuzhiyun * have been processed yet.
2223*4882a593Smuzhiyun */
2224*4882a593Smuzhiyun if (!last_request)
2225*4882a593Smuzhiyun return;
2226*4882a593Smuzhiyun wiphy_update_beacon_reg(wiphy);
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun
is_ht40_allowed(struct ieee80211_channel * chan)2229*4882a593Smuzhiyun static bool is_ht40_allowed(struct ieee80211_channel *chan)
2230*4882a593Smuzhiyun {
2231*4882a593Smuzhiyun if (!chan)
2232*4882a593Smuzhiyun return false;
2233*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_DISABLED)
2234*4882a593Smuzhiyun return false;
2235*4882a593Smuzhiyun /* This would happen when regulatory rules disallow HT40 completely */
2236*4882a593Smuzhiyun if ((chan->flags & IEEE80211_CHAN_NO_HT40) == IEEE80211_CHAN_NO_HT40)
2237*4882a593Smuzhiyun return false;
2238*4882a593Smuzhiyun return true;
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun
reg_process_ht_flags_channel(struct wiphy * wiphy,struct ieee80211_channel * channel)2241*4882a593Smuzhiyun static void reg_process_ht_flags_channel(struct wiphy *wiphy,
2242*4882a593Smuzhiyun struct ieee80211_channel *channel)
2243*4882a593Smuzhiyun {
2244*4882a593Smuzhiyun struct ieee80211_supported_band *sband = wiphy->bands[channel->band];
2245*4882a593Smuzhiyun struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
2246*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
2247*4882a593Smuzhiyun unsigned int i;
2248*4882a593Smuzhiyun u32 flags;
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun if (!is_ht40_allowed(channel)) {
2251*4882a593Smuzhiyun channel->flags |= IEEE80211_CHAN_NO_HT40;
2252*4882a593Smuzhiyun return;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun /*
2256*4882a593Smuzhiyun * We need to ensure the extension channels exist to
2257*4882a593Smuzhiyun * be able to use HT40- or HT40+, this finds them (or not)
2258*4882a593Smuzhiyun */
2259*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++) {
2260*4882a593Smuzhiyun struct ieee80211_channel *c = &sband->channels[i];
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun if (c->center_freq == (channel->center_freq - 20))
2263*4882a593Smuzhiyun channel_before = c;
2264*4882a593Smuzhiyun if (c->center_freq == (channel->center_freq + 20))
2265*4882a593Smuzhiyun channel_after = c;
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun flags = 0;
2269*4882a593Smuzhiyun regd = get_wiphy_regdom(wiphy);
2270*4882a593Smuzhiyun if (regd) {
2271*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule =
2272*4882a593Smuzhiyun freq_reg_info_regd(MHZ_TO_KHZ(channel->center_freq),
2273*4882a593Smuzhiyun regd, MHZ_TO_KHZ(20));
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun if (!IS_ERR(reg_rule))
2276*4882a593Smuzhiyun flags = reg_rule->flags;
2277*4882a593Smuzhiyun }
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun /*
2280*4882a593Smuzhiyun * Please note that this assumes target bandwidth is 20 MHz,
2281*4882a593Smuzhiyun * if that ever changes we also need to change the below logic
2282*4882a593Smuzhiyun * to include that as well.
2283*4882a593Smuzhiyun */
2284*4882a593Smuzhiyun if (!is_ht40_allowed(channel_before) ||
2285*4882a593Smuzhiyun flags & NL80211_RRF_NO_HT40MINUS)
2286*4882a593Smuzhiyun channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
2287*4882a593Smuzhiyun else
2288*4882a593Smuzhiyun channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun if (!is_ht40_allowed(channel_after) ||
2291*4882a593Smuzhiyun flags & NL80211_RRF_NO_HT40PLUS)
2292*4882a593Smuzhiyun channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
2293*4882a593Smuzhiyun else
2294*4882a593Smuzhiyun channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
reg_process_ht_flags_band(struct wiphy * wiphy,struct ieee80211_supported_band * sband)2297*4882a593Smuzhiyun static void reg_process_ht_flags_band(struct wiphy *wiphy,
2298*4882a593Smuzhiyun struct ieee80211_supported_band *sband)
2299*4882a593Smuzhiyun {
2300*4882a593Smuzhiyun unsigned int i;
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun if (!sband)
2303*4882a593Smuzhiyun return;
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++)
2306*4882a593Smuzhiyun reg_process_ht_flags_channel(wiphy, &sband->channels[i]);
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun
reg_process_ht_flags(struct wiphy * wiphy)2309*4882a593Smuzhiyun static void reg_process_ht_flags(struct wiphy *wiphy)
2310*4882a593Smuzhiyun {
2311*4882a593Smuzhiyun enum nl80211_band band;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun if (!wiphy)
2314*4882a593Smuzhiyun return;
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++)
2317*4882a593Smuzhiyun reg_process_ht_flags_band(wiphy, wiphy->bands[band]);
2318*4882a593Smuzhiyun }
2319*4882a593Smuzhiyun
reg_call_notifier(struct wiphy * wiphy,struct regulatory_request * request)2320*4882a593Smuzhiyun static void reg_call_notifier(struct wiphy *wiphy,
2321*4882a593Smuzhiyun struct regulatory_request *request)
2322*4882a593Smuzhiyun {
2323*4882a593Smuzhiyun if (wiphy->reg_notifier)
2324*4882a593Smuzhiyun wiphy->reg_notifier(wiphy, request);
2325*4882a593Smuzhiyun }
2326*4882a593Smuzhiyun
reg_wdev_chan_valid(struct wiphy * wiphy,struct wireless_dev * wdev)2327*4882a593Smuzhiyun static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
2328*4882a593Smuzhiyun {
2329*4882a593Smuzhiyun struct cfg80211_chan_def chandef = {};
2330*4882a593Smuzhiyun struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
2331*4882a593Smuzhiyun enum nl80211_iftype iftype;
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun wdev_lock(wdev);
2334*4882a593Smuzhiyun iftype = wdev->iftype;
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /* make sure the interface is active */
2337*4882a593Smuzhiyun if (!wdev->netdev || !netif_running(wdev->netdev))
2338*4882a593Smuzhiyun goto wdev_inactive_unlock;
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun switch (iftype) {
2341*4882a593Smuzhiyun case NL80211_IFTYPE_AP:
2342*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_GO:
2343*4882a593Smuzhiyun if (!wdev->beacon_interval)
2344*4882a593Smuzhiyun goto wdev_inactive_unlock;
2345*4882a593Smuzhiyun chandef = wdev->chandef;
2346*4882a593Smuzhiyun break;
2347*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC:
2348*4882a593Smuzhiyun if (!wdev->ssid_len)
2349*4882a593Smuzhiyun goto wdev_inactive_unlock;
2350*4882a593Smuzhiyun chandef = wdev->chandef;
2351*4882a593Smuzhiyun break;
2352*4882a593Smuzhiyun case NL80211_IFTYPE_STATION:
2353*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_CLIENT:
2354*4882a593Smuzhiyun if (!wdev->current_bss ||
2355*4882a593Smuzhiyun !wdev->current_bss->pub.channel)
2356*4882a593Smuzhiyun goto wdev_inactive_unlock;
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun if (!rdev->ops->get_channel ||
2359*4882a593Smuzhiyun rdev_get_channel(rdev, wdev, &chandef))
2360*4882a593Smuzhiyun cfg80211_chandef_create(&chandef,
2361*4882a593Smuzhiyun wdev->current_bss->pub.channel,
2362*4882a593Smuzhiyun NL80211_CHAN_NO_HT);
2363*4882a593Smuzhiyun break;
2364*4882a593Smuzhiyun case NL80211_IFTYPE_MONITOR:
2365*4882a593Smuzhiyun case NL80211_IFTYPE_AP_VLAN:
2366*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_DEVICE:
2367*4882a593Smuzhiyun /* no enforcement required */
2368*4882a593Smuzhiyun break;
2369*4882a593Smuzhiyun default:
2370*4882a593Smuzhiyun /* others not implemented for now */
2371*4882a593Smuzhiyun WARN_ON(1);
2372*4882a593Smuzhiyun break;
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun wdev_unlock(wdev);
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun switch (iftype) {
2378*4882a593Smuzhiyun case NL80211_IFTYPE_AP:
2379*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_GO:
2380*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC:
2381*4882a593Smuzhiyun return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
2382*4882a593Smuzhiyun case NL80211_IFTYPE_STATION:
2383*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_CLIENT:
2384*4882a593Smuzhiyun return cfg80211_chandef_usable(wiphy, &chandef,
2385*4882a593Smuzhiyun IEEE80211_CHAN_DISABLED);
2386*4882a593Smuzhiyun default:
2387*4882a593Smuzhiyun break;
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun return true;
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun wdev_inactive_unlock:
2393*4882a593Smuzhiyun wdev_unlock(wdev);
2394*4882a593Smuzhiyun return true;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun
reg_leave_invalid_chans(struct wiphy * wiphy)2397*4882a593Smuzhiyun static void reg_leave_invalid_chans(struct wiphy *wiphy)
2398*4882a593Smuzhiyun {
2399*4882a593Smuzhiyun struct wireless_dev *wdev;
2400*4882a593Smuzhiyun struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun ASSERT_RTNL();
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
2405*4882a593Smuzhiyun if (!reg_wdev_chan_valid(wiphy, wdev))
2406*4882a593Smuzhiyun cfg80211_leave(rdev, wdev);
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun
reg_check_chans_work(struct work_struct * work)2409*4882a593Smuzhiyun static void reg_check_chans_work(struct work_struct *work)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun pr_debug("Verifying active interfaces after reg change\n");
2414*4882a593Smuzhiyun rtnl_lock();
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list)
2417*4882a593Smuzhiyun if (!(rdev->wiphy.regulatory_flags &
2418*4882a593Smuzhiyun REGULATORY_IGNORE_STALE_KICKOFF))
2419*4882a593Smuzhiyun reg_leave_invalid_chans(&rdev->wiphy);
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun rtnl_unlock();
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
reg_check_channels(void)2424*4882a593Smuzhiyun static void reg_check_channels(void)
2425*4882a593Smuzhiyun {
2426*4882a593Smuzhiyun /*
2427*4882a593Smuzhiyun * Give usermode a chance to do something nicer (move to another
2428*4882a593Smuzhiyun * channel, orderly disconnection), before forcing a disconnection.
2429*4882a593Smuzhiyun */
2430*4882a593Smuzhiyun mod_delayed_work(system_power_efficient_wq,
2431*4882a593Smuzhiyun ®_check_chans,
2432*4882a593Smuzhiyun msecs_to_jiffies(REG_ENFORCE_GRACE_MS));
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
wiphy_update_regulatory(struct wiphy * wiphy,enum nl80211_reg_initiator initiator)2435*4882a593Smuzhiyun static void wiphy_update_regulatory(struct wiphy *wiphy,
2436*4882a593Smuzhiyun enum nl80211_reg_initiator initiator)
2437*4882a593Smuzhiyun {
2438*4882a593Smuzhiyun enum nl80211_band band;
2439*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun if (ignore_reg_update(wiphy, initiator)) {
2442*4882a593Smuzhiyun /*
2443*4882a593Smuzhiyun * Regulatory updates set by CORE are ignored for custom
2444*4882a593Smuzhiyun * regulatory cards. Let us notify the changes to the driver,
2445*4882a593Smuzhiyun * as some drivers used this to restore its orig_* reg domain.
2446*4882a593Smuzhiyun */
2447*4882a593Smuzhiyun if (initiator == NL80211_REGDOM_SET_BY_CORE &&
2448*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
2449*4882a593Smuzhiyun !(wiphy->regulatory_flags &
2450*4882a593Smuzhiyun REGULATORY_WIPHY_SELF_MANAGED))
2451*4882a593Smuzhiyun reg_call_notifier(wiphy, lr);
2452*4882a593Smuzhiyun return;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun lr->dfs_region = get_cfg80211_regdom()->dfs_region;
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++)
2458*4882a593Smuzhiyun handle_band(wiphy, initiator, wiphy->bands[band]);
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun reg_process_beacons(wiphy);
2461*4882a593Smuzhiyun reg_process_ht_flags(wiphy);
2462*4882a593Smuzhiyun reg_call_notifier(wiphy, lr);
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun
update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)2465*4882a593Smuzhiyun static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
2466*4882a593Smuzhiyun {
2467*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
2468*4882a593Smuzhiyun struct wiphy *wiphy;
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun ASSERT_RTNL();
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
2473*4882a593Smuzhiyun wiphy = &rdev->wiphy;
2474*4882a593Smuzhiyun wiphy_update_regulatory(wiphy, initiator);
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun reg_check_channels();
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun
handle_channel_custom(struct wiphy * wiphy,struct ieee80211_channel * chan,const struct ieee80211_regdomain * regd,u32 min_bw)2480*4882a593Smuzhiyun static void handle_channel_custom(struct wiphy *wiphy,
2481*4882a593Smuzhiyun struct ieee80211_channel *chan,
2482*4882a593Smuzhiyun const struct ieee80211_regdomain *regd,
2483*4882a593Smuzhiyun u32 min_bw)
2484*4882a593Smuzhiyun {
2485*4882a593Smuzhiyun u32 bw_flags = 0;
2486*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule = NULL;
2487*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule = NULL;
2488*4882a593Smuzhiyun u32 bw, center_freq_khz;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun center_freq_khz = ieee80211_channel_to_khz(chan);
2491*4882a593Smuzhiyun for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
2492*4882a593Smuzhiyun reg_rule = freq_reg_info_regd(center_freq_khz, regd, bw);
2493*4882a593Smuzhiyun if (!IS_ERR(reg_rule))
2494*4882a593Smuzhiyun break;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun if (IS_ERR_OR_NULL(reg_rule)) {
2498*4882a593Smuzhiyun pr_debug("Disabling freq %d.%03d MHz as custom regd has no rule that fits it\n",
2499*4882a593Smuzhiyun chan->center_freq, chan->freq_offset);
2500*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
2501*4882a593Smuzhiyun chan->flags |= IEEE80211_CHAN_DISABLED;
2502*4882a593Smuzhiyun } else {
2503*4882a593Smuzhiyun chan->orig_flags |= IEEE80211_CHAN_DISABLED;
2504*4882a593Smuzhiyun chan->flags = chan->orig_flags;
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun return;
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun power_rule = ®_rule->power_rule;
2510*4882a593Smuzhiyun bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun chan->dfs_state_entered = jiffies;
2513*4882a593Smuzhiyun chan->dfs_state = NL80211_DFS_USABLE;
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun chan->beacon_found = false;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
2518*4882a593Smuzhiyun chan->flags = chan->orig_flags | bw_flags |
2519*4882a593Smuzhiyun map_regdom_flags(reg_rule->flags);
2520*4882a593Smuzhiyun else
2521*4882a593Smuzhiyun chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
2524*4882a593Smuzhiyun chan->max_reg_power = chan->max_power =
2525*4882a593Smuzhiyun (int) MBM_TO_DBM(power_rule->max_eirp);
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun if (chan->flags & IEEE80211_CHAN_RADAR) {
2528*4882a593Smuzhiyun if (reg_rule->dfs_cac_ms)
2529*4882a593Smuzhiyun chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
2530*4882a593Smuzhiyun else
2531*4882a593Smuzhiyun chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun chan->max_power = chan->max_reg_power;
2535*4882a593Smuzhiyun }
2536*4882a593Smuzhiyun
handle_band_custom(struct wiphy * wiphy,struct ieee80211_supported_band * sband,const struct ieee80211_regdomain * regd)2537*4882a593Smuzhiyun static void handle_band_custom(struct wiphy *wiphy,
2538*4882a593Smuzhiyun struct ieee80211_supported_band *sband,
2539*4882a593Smuzhiyun const struct ieee80211_regdomain *regd)
2540*4882a593Smuzhiyun {
2541*4882a593Smuzhiyun unsigned int i;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun if (!sband)
2544*4882a593Smuzhiyun return;
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /*
2547*4882a593Smuzhiyun * We currently assume that you always want at least 20 MHz,
2548*4882a593Smuzhiyun * otherwise channel 12 might get enabled if this rule is
2549*4882a593Smuzhiyun * compatible to US, which permits 2402 - 2472 MHz.
2550*4882a593Smuzhiyun */
2551*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++)
2552*4882a593Smuzhiyun handle_channel_custom(wiphy, &sband->channels[i], regd,
2553*4882a593Smuzhiyun MHZ_TO_KHZ(20));
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun /* Used by drivers prior to wiphy registration */
wiphy_apply_custom_regulatory(struct wiphy * wiphy,const struct ieee80211_regdomain * regd)2557*4882a593Smuzhiyun void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
2558*4882a593Smuzhiyun const struct ieee80211_regdomain *regd)
2559*4882a593Smuzhiyun {
2560*4882a593Smuzhiyun enum nl80211_band band;
2561*4882a593Smuzhiyun unsigned int bands_set = 0;
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG),
2564*4882a593Smuzhiyun "wiphy should have REGULATORY_CUSTOM_REG\n");
2565*4882a593Smuzhiyun wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++) {
2568*4882a593Smuzhiyun if (!wiphy->bands[band])
2569*4882a593Smuzhiyun continue;
2570*4882a593Smuzhiyun handle_band_custom(wiphy, wiphy->bands[band], regd);
2571*4882a593Smuzhiyun bands_set++;
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun /*
2575*4882a593Smuzhiyun * no point in calling this if it won't have any effect
2576*4882a593Smuzhiyun * on your device's supported bands.
2577*4882a593Smuzhiyun */
2578*4882a593Smuzhiyun WARN_ON(!bands_set);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
2581*4882a593Smuzhiyun
reg_set_request_processed(void)2582*4882a593Smuzhiyun static void reg_set_request_processed(void)
2583*4882a593Smuzhiyun {
2584*4882a593Smuzhiyun bool need_more_processing = false;
2585*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun lr->processed = true;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun spin_lock(®_requests_lock);
2590*4882a593Smuzhiyun if (!list_empty(®_requests_list))
2591*4882a593Smuzhiyun need_more_processing = true;
2592*4882a593Smuzhiyun spin_unlock(®_requests_lock);
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun cancel_crda_timeout();
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun if (need_more_processing)
2597*4882a593Smuzhiyun schedule_work(®_work);
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun /**
2601*4882a593Smuzhiyun * reg_process_hint_core - process core regulatory requests
2602*4882a593Smuzhiyun * @core_request: a pending core regulatory request
2603*4882a593Smuzhiyun *
2604*4882a593Smuzhiyun * The wireless subsystem can use this function to process
2605*4882a593Smuzhiyun * a regulatory request issued by the regulatory core.
2606*4882a593Smuzhiyun */
2607*4882a593Smuzhiyun static enum reg_request_treatment
reg_process_hint_core(struct regulatory_request * core_request)2608*4882a593Smuzhiyun reg_process_hint_core(struct regulatory_request *core_request)
2609*4882a593Smuzhiyun {
2610*4882a593Smuzhiyun if (reg_query_database(core_request)) {
2611*4882a593Smuzhiyun core_request->intersect = false;
2612*4882a593Smuzhiyun core_request->processed = false;
2613*4882a593Smuzhiyun reg_update_last_request(core_request);
2614*4882a593Smuzhiyun return REG_REQ_OK;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun return REG_REQ_IGNORE;
2618*4882a593Smuzhiyun }
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun static enum reg_request_treatment
__reg_process_hint_user(struct regulatory_request * user_request)2621*4882a593Smuzhiyun __reg_process_hint_user(struct regulatory_request *user_request)
2622*4882a593Smuzhiyun {
2623*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun if (reg_request_cell_base(user_request))
2626*4882a593Smuzhiyun return reg_ignore_cell_hint(user_request);
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun if (reg_request_cell_base(lr))
2629*4882a593Smuzhiyun return REG_REQ_IGNORE;
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
2632*4882a593Smuzhiyun return REG_REQ_INTERSECT;
2633*4882a593Smuzhiyun /*
2634*4882a593Smuzhiyun * If the user knows better the user should set the regdom
2635*4882a593Smuzhiyun * to their country before the IE is picked up
2636*4882a593Smuzhiyun */
2637*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
2638*4882a593Smuzhiyun lr->intersect)
2639*4882a593Smuzhiyun return REG_REQ_IGNORE;
2640*4882a593Smuzhiyun /*
2641*4882a593Smuzhiyun * Process user requests only after previous user/driver/core
2642*4882a593Smuzhiyun * requests have been processed
2643*4882a593Smuzhiyun */
2644*4882a593Smuzhiyun if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE ||
2645*4882a593Smuzhiyun lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
2646*4882a593Smuzhiyun lr->initiator == NL80211_REGDOM_SET_BY_USER) &&
2647*4882a593Smuzhiyun regdom_changes(lr->alpha2))
2648*4882a593Smuzhiyun return REG_REQ_IGNORE;
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun if (!regdom_changes(user_request->alpha2))
2651*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun return REG_REQ_OK;
2654*4882a593Smuzhiyun }
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun /**
2657*4882a593Smuzhiyun * reg_process_hint_user - process user regulatory requests
2658*4882a593Smuzhiyun * @user_request: a pending user regulatory request
2659*4882a593Smuzhiyun *
2660*4882a593Smuzhiyun * The wireless subsystem can use this function to process
2661*4882a593Smuzhiyun * a regulatory request initiated by userspace.
2662*4882a593Smuzhiyun */
2663*4882a593Smuzhiyun static enum reg_request_treatment
reg_process_hint_user(struct regulatory_request * user_request)2664*4882a593Smuzhiyun reg_process_hint_user(struct regulatory_request *user_request)
2665*4882a593Smuzhiyun {
2666*4882a593Smuzhiyun enum reg_request_treatment treatment;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun treatment = __reg_process_hint_user(user_request);
2669*4882a593Smuzhiyun if (treatment == REG_REQ_IGNORE ||
2670*4882a593Smuzhiyun treatment == REG_REQ_ALREADY_SET)
2671*4882a593Smuzhiyun return REG_REQ_IGNORE;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun user_request->intersect = treatment == REG_REQ_INTERSECT;
2674*4882a593Smuzhiyun user_request->processed = false;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun if (reg_query_database(user_request)) {
2677*4882a593Smuzhiyun reg_update_last_request(user_request);
2678*4882a593Smuzhiyun user_alpha2[0] = user_request->alpha2[0];
2679*4882a593Smuzhiyun user_alpha2[1] = user_request->alpha2[1];
2680*4882a593Smuzhiyun return REG_REQ_OK;
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun return REG_REQ_IGNORE;
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun static enum reg_request_treatment
__reg_process_hint_driver(struct regulatory_request * driver_request)2687*4882a593Smuzhiyun __reg_process_hint_driver(struct regulatory_request *driver_request)
2688*4882a593Smuzhiyun {
2689*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) {
2692*4882a593Smuzhiyun if (regdom_changes(driver_request->alpha2))
2693*4882a593Smuzhiyun return REG_REQ_OK;
2694*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /*
2698*4882a593Smuzhiyun * This would happen if you unplug and plug your card
2699*4882a593Smuzhiyun * back in or if you add a new device for which the previously
2700*4882a593Smuzhiyun * loaded card also agrees on the regulatory domain.
2701*4882a593Smuzhiyun */
2702*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
2703*4882a593Smuzhiyun !regdom_changes(driver_request->alpha2))
2704*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun return REG_REQ_INTERSECT;
2707*4882a593Smuzhiyun }
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun /**
2710*4882a593Smuzhiyun * reg_process_hint_driver - process driver regulatory requests
2711*4882a593Smuzhiyun * @wiphy: the wireless device for the regulatory request
2712*4882a593Smuzhiyun * @driver_request: a pending driver regulatory request
2713*4882a593Smuzhiyun *
2714*4882a593Smuzhiyun * The wireless subsystem can use this function to process
2715*4882a593Smuzhiyun * a regulatory request issued by an 802.11 driver.
2716*4882a593Smuzhiyun *
2717*4882a593Smuzhiyun * Returns one of the different reg request treatment values.
2718*4882a593Smuzhiyun */
2719*4882a593Smuzhiyun static enum reg_request_treatment
reg_process_hint_driver(struct wiphy * wiphy,struct regulatory_request * driver_request)2720*4882a593Smuzhiyun reg_process_hint_driver(struct wiphy *wiphy,
2721*4882a593Smuzhiyun struct regulatory_request *driver_request)
2722*4882a593Smuzhiyun {
2723*4882a593Smuzhiyun const struct ieee80211_regdomain *regd, *tmp;
2724*4882a593Smuzhiyun enum reg_request_treatment treatment;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun treatment = __reg_process_hint_driver(driver_request);
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun switch (treatment) {
2729*4882a593Smuzhiyun case REG_REQ_OK:
2730*4882a593Smuzhiyun break;
2731*4882a593Smuzhiyun case REG_REQ_IGNORE:
2732*4882a593Smuzhiyun return REG_REQ_IGNORE;
2733*4882a593Smuzhiyun case REG_REQ_INTERSECT:
2734*4882a593Smuzhiyun case REG_REQ_ALREADY_SET:
2735*4882a593Smuzhiyun regd = reg_copy_regd(get_cfg80211_regdom());
2736*4882a593Smuzhiyun if (IS_ERR(regd))
2737*4882a593Smuzhiyun return REG_REQ_IGNORE;
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun tmp = get_wiphy_regdom(wiphy);
2740*4882a593Smuzhiyun rcu_assign_pointer(wiphy->regd, regd);
2741*4882a593Smuzhiyun rcu_free_regdom(tmp);
2742*4882a593Smuzhiyun }
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun
2745*4882a593Smuzhiyun driver_request->intersect = treatment == REG_REQ_INTERSECT;
2746*4882a593Smuzhiyun driver_request->processed = false;
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun /*
2749*4882a593Smuzhiyun * Since CRDA will not be called in this case as we already
2750*4882a593Smuzhiyun * have applied the requested regulatory domain before we just
2751*4882a593Smuzhiyun * inform userspace we have processed the request
2752*4882a593Smuzhiyun */
2753*4882a593Smuzhiyun if (treatment == REG_REQ_ALREADY_SET) {
2754*4882a593Smuzhiyun nl80211_send_reg_change_event(driver_request);
2755*4882a593Smuzhiyun reg_update_last_request(driver_request);
2756*4882a593Smuzhiyun reg_set_request_processed();
2757*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2758*4882a593Smuzhiyun }
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun if (reg_query_database(driver_request)) {
2761*4882a593Smuzhiyun reg_update_last_request(driver_request);
2762*4882a593Smuzhiyun return REG_REQ_OK;
2763*4882a593Smuzhiyun }
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun return REG_REQ_IGNORE;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun static enum reg_request_treatment
__reg_process_hint_country_ie(struct wiphy * wiphy,struct regulatory_request * country_ie_request)2769*4882a593Smuzhiyun __reg_process_hint_country_ie(struct wiphy *wiphy,
2770*4882a593Smuzhiyun struct regulatory_request *country_ie_request)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun struct wiphy *last_wiphy = NULL;
2773*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun if (reg_request_cell_base(lr)) {
2776*4882a593Smuzhiyun /* Trust a Cell base station over the AP's country IE */
2777*4882a593Smuzhiyun if (regdom_changes(country_ie_request->alpha2))
2778*4882a593Smuzhiyun return REG_REQ_IGNORE;
2779*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2780*4882a593Smuzhiyun } else {
2781*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_IGNORE)
2782*4882a593Smuzhiyun return REG_REQ_IGNORE;
2783*4882a593Smuzhiyun }
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun if (unlikely(!is_an_alpha2(country_ie_request->alpha2)))
2786*4882a593Smuzhiyun return -EINVAL;
2787*4882a593Smuzhiyun
2788*4882a593Smuzhiyun if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)
2789*4882a593Smuzhiyun return REG_REQ_OK;
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun last_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun if (last_wiphy != wiphy) {
2794*4882a593Smuzhiyun /*
2795*4882a593Smuzhiyun * Two cards with two APs claiming different
2796*4882a593Smuzhiyun * Country IE alpha2s. We could
2797*4882a593Smuzhiyun * intersect them, but that seems unlikely
2798*4882a593Smuzhiyun * to be correct. Reject second one for now.
2799*4882a593Smuzhiyun */
2800*4882a593Smuzhiyun if (regdom_changes(country_ie_request->alpha2))
2801*4882a593Smuzhiyun return REG_REQ_IGNORE;
2802*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2803*4882a593Smuzhiyun }
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun if (regdom_changes(country_ie_request->alpha2))
2806*4882a593Smuzhiyun return REG_REQ_OK;
2807*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun /**
2811*4882a593Smuzhiyun * reg_process_hint_country_ie - process regulatory requests from country IEs
2812*4882a593Smuzhiyun * @wiphy: the wireless device for the regulatory request
2813*4882a593Smuzhiyun * @country_ie_request: a regulatory request from a country IE
2814*4882a593Smuzhiyun *
2815*4882a593Smuzhiyun * The wireless subsystem can use this function to process
2816*4882a593Smuzhiyun * a regulatory request issued by a country Information Element.
2817*4882a593Smuzhiyun *
2818*4882a593Smuzhiyun * Returns one of the different reg request treatment values.
2819*4882a593Smuzhiyun */
2820*4882a593Smuzhiyun static enum reg_request_treatment
reg_process_hint_country_ie(struct wiphy * wiphy,struct regulatory_request * country_ie_request)2821*4882a593Smuzhiyun reg_process_hint_country_ie(struct wiphy *wiphy,
2822*4882a593Smuzhiyun struct regulatory_request *country_ie_request)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun enum reg_request_treatment treatment;
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun treatment = __reg_process_hint_country_ie(wiphy, country_ie_request);
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun switch (treatment) {
2829*4882a593Smuzhiyun case REG_REQ_OK:
2830*4882a593Smuzhiyun break;
2831*4882a593Smuzhiyun case REG_REQ_IGNORE:
2832*4882a593Smuzhiyun return REG_REQ_IGNORE;
2833*4882a593Smuzhiyun case REG_REQ_ALREADY_SET:
2834*4882a593Smuzhiyun reg_free_request(country_ie_request);
2835*4882a593Smuzhiyun return REG_REQ_ALREADY_SET;
2836*4882a593Smuzhiyun case REG_REQ_INTERSECT:
2837*4882a593Smuzhiyun /*
2838*4882a593Smuzhiyun * This doesn't happen yet, not sure we
2839*4882a593Smuzhiyun * ever want to support it for this case.
2840*4882a593Smuzhiyun */
2841*4882a593Smuzhiyun WARN_ONCE(1, "Unexpected intersection for country elements");
2842*4882a593Smuzhiyun return REG_REQ_IGNORE;
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun country_ie_request->intersect = false;
2846*4882a593Smuzhiyun country_ie_request->processed = false;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun if (reg_query_database(country_ie_request)) {
2849*4882a593Smuzhiyun reg_update_last_request(country_ie_request);
2850*4882a593Smuzhiyun return REG_REQ_OK;
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun return REG_REQ_IGNORE;
2854*4882a593Smuzhiyun }
2855*4882a593Smuzhiyun
reg_dfs_domain_same(struct wiphy * wiphy1,struct wiphy * wiphy2)2856*4882a593Smuzhiyun bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2)
2857*4882a593Smuzhiyun {
2858*4882a593Smuzhiyun const struct ieee80211_regdomain *wiphy1_regd = NULL;
2859*4882a593Smuzhiyun const struct ieee80211_regdomain *wiphy2_regd = NULL;
2860*4882a593Smuzhiyun const struct ieee80211_regdomain *cfg80211_regd = NULL;
2861*4882a593Smuzhiyun bool dfs_domain_same;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun rcu_read_lock();
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun cfg80211_regd = rcu_dereference(cfg80211_regdomain);
2866*4882a593Smuzhiyun wiphy1_regd = rcu_dereference(wiphy1->regd);
2867*4882a593Smuzhiyun if (!wiphy1_regd)
2868*4882a593Smuzhiyun wiphy1_regd = cfg80211_regd;
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun wiphy2_regd = rcu_dereference(wiphy2->regd);
2871*4882a593Smuzhiyun if (!wiphy2_regd)
2872*4882a593Smuzhiyun wiphy2_regd = cfg80211_regd;
2873*4882a593Smuzhiyun
2874*4882a593Smuzhiyun dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun rcu_read_unlock();
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun return dfs_domain_same;
2879*4882a593Smuzhiyun }
2880*4882a593Smuzhiyun
reg_copy_dfs_chan_state(struct ieee80211_channel * dst_chan,struct ieee80211_channel * src_chan)2881*4882a593Smuzhiyun static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan,
2882*4882a593Smuzhiyun struct ieee80211_channel *src_chan)
2883*4882a593Smuzhiyun {
2884*4882a593Smuzhiyun if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) ||
2885*4882a593Smuzhiyun !(src_chan->flags & IEEE80211_CHAN_RADAR))
2886*4882a593Smuzhiyun return;
2887*4882a593Smuzhiyun
2888*4882a593Smuzhiyun if (dst_chan->flags & IEEE80211_CHAN_DISABLED ||
2889*4882a593Smuzhiyun src_chan->flags & IEEE80211_CHAN_DISABLED)
2890*4882a593Smuzhiyun return;
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun if (src_chan->center_freq == dst_chan->center_freq &&
2893*4882a593Smuzhiyun dst_chan->dfs_state == NL80211_DFS_USABLE) {
2894*4882a593Smuzhiyun dst_chan->dfs_state = src_chan->dfs_state;
2895*4882a593Smuzhiyun dst_chan->dfs_state_entered = src_chan->dfs_state_entered;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun }
2898*4882a593Smuzhiyun
wiphy_share_dfs_chan_state(struct wiphy * dst_wiphy,struct wiphy * src_wiphy)2899*4882a593Smuzhiyun static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy,
2900*4882a593Smuzhiyun struct wiphy *src_wiphy)
2901*4882a593Smuzhiyun {
2902*4882a593Smuzhiyun struct ieee80211_supported_band *src_sband, *dst_sband;
2903*4882a593Smuzhiyun struct ieee80211_channel *src_chan, *dst_chan;
2904*4882a593Smuzhiyun int i, j, band;
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun if (!reg_dfs_domain_same(dst_wiphy, src_wiphy))
2907*4882a593Smuzhiyun return;
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++) {
2910*4882a593Smuzhiyun dst_sband = dst_wiphy->bands[band];
2911*4882a593Smuzhiyun src_sband = src_wiphy->bands[band];
2912*4882a593Smuzhiyun if (!dst_sband || !src_sband)
2913*4882a593Smuzhiyun continue;
2914*4882a593Smuzhiyun
2915*4882a593Smuzhiyun for (i = 0; i < dst_sband->n_channels; i++) {
2916*4882a593Smuzhiyun dst_chan = &dst_sband->channels[i];
2917*4882a593Smuzhiyun for (j = 0; j < src_sband->n_channels; j++) {
2918*4882a593Smuzhiyun src_chan = &src_sband->channels[j];
2919*4882a593Smuzhiyun reg_copy_dfs_chan_state(dst_chan, src_chan);
2920*4882a593Smuzhiyun }
2921*4882a593Smuzhiyun }
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun }
2924*4882a593Smuzhiyun
wiphy_all_share_dfs_chan_state(struct wiphy * wiphy)2925*4882a593Smuzhiyun static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy)
2926*4882a593Smuzhiyun {
2927*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
2928*4882a593Smuzhiyun
2929*4882a593Smuzhiyun ASSERT_RTNL();
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
2932*4882a593Smuzhiyun if (wiphy == &rdev->wiphy)
2933*4882a593Smuzhiyun continue;
2934*4882a593Smuzhiyun wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy);
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun /* This processes *all* regulatory hints */
reg_process_hint(struct regulatory_request * reg_request)2939*4882a593Smuzhiyun static void reg_process_hint(struct regulatory_request *reg_request)
2940*4882a593Smuzhiyun {
2941*4882a593Smuzhiyun struct wiphy *wiphy = NULL;
2942*4882a593Smuzhiyun enum reg_request_treatment treatment;
2943*4882a593Smuzhiyun enum nl80211_reg_initiator initiator = reg_request->initiator;
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
2946*4882a593Smuzhiyun wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun switch (initiator) {
2949*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_CORE:
2950*4882a593Smuzhiyun treatment = reg_process_hint_core(reg_request);
2951*4882a593Smuzhiyun break;
2952*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_USER:
2953*4882a593Smuzhiyun treatment = reg_process_hint_user(reg_request);
2954*4882a593Smuzhiyun break;
2955*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_DRIVER:
2956*4882a593Smuzhiyun if (!wiphy)
2957*4882a593Smuzhiyun goto out_free;
2958*4882a593Smuzhiyun treatment = reg_process_hint_driver(wiphy, reg_request);
2959*4882a593Smuzhiyun break;
2960*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_COUNTRY_IE:
2961*4882a593Smuzhiyun if (!wiphy)
2962*4882a593Smuzhiyun goto out_free;
2963*4882a593Smuzhiyun treatment = reg_process_hint_country_ie(wiphy, reg_request);
2964*4882a593Smuzhiyun break;
2965*4882a593Smuzhiyun default:
2966*4882a593Smuzhiyun WARN(1, "invalid initiator %d\n", initiator);
2967*4882a593Smuzhiyun goto out_free;
2968*4882a593Smuzhiyun }
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun if (treatment == REG_REQ_IGNORE)
2971*4882a593Smuzhiyun goto out_free;
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET,
2974*4882a593Smuzhiyun "unexpected treatment value %d\n", treatment);
2975*4882a593Smuzhiyun
2976*4882a593Smuzhiyun /* This is required so that the orig_* parameters are saved.
2977*4882a593Smuzhiyun * NOTE: treatment must be set for any case that reaches here!
2978*4882a593Smuzhiyun */
2979*4882a593Smuzhiyun if (treatment == REG_REQ_ALREADY_SET && wiphy &&
2980*4882a593Smuzhiyun wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
2981*4882a593Smuzhiyun wiphy_update_regulatory(wiphy, initiator);
2982*4882a593Smuzhiyun wiphy_all_share_dfs_chan_state(wiphy);
2983*4882a593Smuzhiyun reg_check_channels();
2984*4882a593Smuzhiyun }
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun return;
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun out_free:
2989*4882a593Smuzhiyun reg_free_request(reg_request);
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun
notify_self_managed_wiphys(struct regulatory_request * request)2992*4882a593Smuzhiyun static void notify_self_managed_wiphys(struct regulatory_request *request)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
2995*4882a593Smuzhiyun struct wiphy *wiphy;
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
2998*4882a593Smuzhiyun wiphy = &rdev->wiphy;
2999*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
3000*4882a593Smuzhiyun request->initiator == NL80211_REGDOM_SET_BY_USER)
3001*4882a593Smuzhiyun reg_call_notifier(wiphy, request);
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun /*
3006*4882a593Smuzhiyun * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
3007*4882a593Smuzhiyun * Regulatory hints come on a first come first serve basis and we
3008*4882a593Smuzhiyun * must process each one atomically.
3009*4882a593Smuzhiyun */
reg_process_pending_hints(void)3010*4882a593Smuzhiyun static void reg_process_pending_hints(void)
3011*4882a593Smuzhiyun {
3012*4882a593Smuzhiyun struct regulatory_request *reg_request, *lr;
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun lr = get_last_request();
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun /* When last_request->processed becomes true this will be rescheduled */
3017*4882a593Smuzhiyun if (lr && !lr->processed) {
3018*4882a593Smuzhiyun pr_debug("Pending regulatory request, waiting for it to be processed...\n");
3019*4882a593Smuzhiyun return;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun spin_lock(®_requests_lock);
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun if (list_empty(®_requests_list)) {
3025*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3026*4882a593Smuzhiyun return;
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun reg_request = list_first_entry(®_requests_list,
3030*4882a593Smuzhiyun struct regulatory_request,
3031*4882a593Smuzhiyun list);
3032*4882a593Smuzhiyun list_del_init(®_request->list);
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun notify_self_managed_wiphys(reg_request);
3037*4882a593Smuzhiyun
3038*4882a593Smuzhiyun reg_process_hint(reg_request);
3039*4882a593Smuzhiyun
3040*4882a593Smuzhiyun lr = get_last_request();
3041*4882a593Smuzhiyun
3042*4882a593Smuzhiyun spin_lock(®_requests_lock);
3043*4882a593Smuzhiyun if (!list_empty(®_requests_list) && lr && lr->processed)
3044*4882a593Smuzhiyun schedule_work(®_work);
3045*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3046*4882a593Smuzhiyun }
3047*4882a593Smuzhiyun
3048*4882a593Smuzhiyun /* Processes beacon hints -- this has nothing to do with country IEs */
reg_process_pending_beacon_hints(void)3049*4882a593Smuzhiyun static void reg_process_pending_beacon_hints(void)
3050*4882a593Smuzhiyun {
3051*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3052*4882a593Smuzhiyun struct reg_beacon *pending_beacon, *tmp;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun /* This goes through the _pending_ beacon list */
3055*4882a593Smuzhiyun spin_lock_bh(®_pending_beacons_lock);
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun list_for_each_entry_safe(pending_beacon, tmp,
3058*4882a593Smuzhiyun ®_pending_beacons, list) {
3059*4882a593Smuzhiyun list_del_init(&pending_beacon->list);
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun /* Applies the beacon hint to current wiphys */
3062*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list)
3063*4882a593Smuzhiyun wiphy_update_new_beacon(&rdev->wiphy, pending_beacon);
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun /* Remembers the beacon hint for new wiphys or reg changes */
3066*4882a593Smuzhiyun list_add_tail(&pending_beacon->list, ®_beacon_list);
3067*4882a593Smuzhiyun }
3068*4882a593Smuzhiyun
3069*4882a593Smuzhiyun spin_unlock_bh(®_pending_beacons_lock);
3070*4882a593Smuzhiyun }
3071*4882a593Smuzhiyun
reg_process_self_managed_hints(void)3072*4882a593Smuzhiyun static void reg_process_self_managed_hints(void)
3073*4882a593Smuzhiyun {
3074*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3075*4882a593Smuzhiyun struct wiphy *wiphy;
3076*4882a593Smuzhiyun const struct ieee80211_regdomain *tmp;
3077*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
3078*4882a593Smuzhiyun enum nl80211_band band;
3079*4882a593Smuzhiyun struct regulatory_request request = {};
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
3082*4882a593Smuzhiyun wiphy = &rdev->wiphy;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun spin_lock(®_requests_lock);
3085*4882a593Smuzhiyun regd = rdev->requested_regd;
3086*4882a593Smuzhiyun rdev->requested_regd = NULL;
3087*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun if (regd == NULL)
3090*4882a593Smuzhiyun continue;
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun tmp = get_wiphy_regdom(wiphy);
3093*4882a593Smuzhiyun rcu_assign_pointer(wiphy->regd, regd);
3094*4882a593Smuzhiyun rcu_free_regdom(tmp);
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++)
3097*4882a593Smuzhiyun handle_band_custom(wiphy, wiphy->bands[band], regd);
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun reg_process_ht_flags(wiphy);
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun request.wiphy_idx = get_wiphy_idx(wiphy);
3102*4882a593Smuzhiyun request.alpha2[0] = regd->alpha2[0];
3103*4882a593Smuzhiyun request.alpha2[1] = regd->alpha2[1];
3104*4882a593Smuzhiyun request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
3105*4882a593Smuzhiyun
3106*4882a593Smuzhiyun nl80211_send_wiphy_reg_change_event(&request);
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun reg_check_channels();
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun
reg_todo(struct work_struct * work)3112*4882a593Smuzhiyun static void reg_todo(struct work_struct *work)
3113*4882a593Smuzhiyun {
3114*4882a593Smuzhiyun rtnl_lock();
3115*4882a593Smuzhiyun reg_process_pending_hints();
3116*4882a593Smuzhiyun reg_process_pending_beacon_hints();
3117*4882a593Smuzhiyun reg_process_self_managed_hints();
3118*4882a593Smuzhiyun rtnl_unlock();
3119*4882a593Smuzhiyun }
3120*4882a593Smuzhiyun
queue_regulatory_request(struct regulatory_request * request)3121*4882a593Smuzhiyun static void queue_regulatory_request(struct regulatory_request *request)
3122*4882a593Smuzhiyun {
3123*4882a593Smuzhiyun request->alpha2[0] = toupper(request->alpha2[0]);
3124*4882a593Smuzhiyun request->alpha2[1] = toupper(request->alpha2[1]);
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun spin_lock(®_requests_lock);
3127*4882a593Smuzhiyun list_add_tail(&request->list, ®_requests_list);
3128*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun schedule_work(®_work);
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun /*
3134*4882a593Smuzhiyun * Core regulatory hint -- happens during cfg80211_init()
3135*4882a593Smuzhiyun * and when we restore regulatory settings.
3136*4882a593Smuzhiyun */
regulatory_hint_core(const char * alpha2)3137*4882a593Smuzhiyun static int regulatory_hint_core(const char *alpha2)
3138*4882a593Smuzhiyun {
3139*4882a593Smuzhiyun struct regulatory_request *request;
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
3142*4882a593Smuzhiyun if (!request)
3143*4882a593Smuzhiyun return -ENOMEM;
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun request->alpha2[0] = alpha2[0];
3146*4882a593Smuzhiyun request->alpha2[1] = alpha2[1];
3147*4882a593Smuzhiyun request->initiator = NL80211_REGDOM_SET_BY_CORE;
3148*4882a593Smuzhiyun request->wiphy_idx = WIPHY_IDX_INVALID;
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun queue_regulatory_request(request);
3151*4882a593Smuzhiyun
3152*4882a593Smuzhiyun return 0;
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun /* User hints */
regulatory_hint_user(const char * alpha2,enum nl80211_user_reg_hint_type user_reg_hint_type)3156*4882a593Smuzhiyun int regulatory_hint_user(const char *alpha2,
3157*4882a593Smuzhiyun enum nl80211_user_reg_hint_type user_reg_hint_type)
3158*4882a593Smuzhiyun {
3159*4882a593Smuzhiyun struct regulatory_request *request;
3160*4882a593Smuzhiyun
3161*4882a593Smuzhiyun if (WARN_ON(!alpha2))
3162*4882a593Smuzhiyun return -EINVAL;
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
3165*4882a593Smuzhiyun return -EINVAL;
3166*4882a593Smuzhiyun
3167*4882a593Smuzhiyun request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
3168*4882a593Smuzhiyun if (!request)
3169*4882a593Smuzhiyun return -ENOMEM;
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun request->wiphy_idx = WIPHY_IDX_INVALID;
3172*4882a593Smuzhiyun request->alpha2[0] = alpha2[0];
3173*4882a593Smuzhiyun request->alpha2[1] = alpha2[1];
3174*4882a593Smuzhiyun request->initiator = NL80211_REGDOM_SET_BY_USER;
3175*4882a593Smuzhiyun request->user_reg_hint_type = user_reg_hint_type;
3176*4882a593Smuzhiyun
3177*4882a593Smuzhiyun /* Allow calling CRDA again */
3178*4882a593Smuzhiyun reset_crda_timeouts();
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun queue_regulatory_request(request);
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun return 0;
3183*4882a593Smuzhiyun }
3184*4882a593Smuzhiyun
regulatory_hint_indoor(bool is_indoor,u32 portid)3185*4882a593Smuzhiyun int regulatory_hint_indoor(bool is_indoor, u32 portid)
3186*4882a593Smuzhiyun {
3187*4882a593Smuzhiyun spin_lock(®_indoor_lock);
3188*4882a593Smuzhiyun
3189*4882a593Smuzhiyun /* It is possible that more than one user space process is trying to
3190*4882a593Smuzhiyun * configure the indoor setting. To handle such cases, clear the indoor
3191*4882a593Smuzhiyun * setting in case that some process does not think that the device
3192*4882a593Smuzhiyun * is operating in an indoor environment. In addition, if a user space
3193*4882a593Smuzhiyun * process indicates that it is controlling the indoor setting, save its
3194*4882a593Smuzhiyun * portid, i.e., make it the owner.
3195*4882a593Smuzhiyun */
3196*4882a593Smuzhiyun reg_is_indoor = is_indoor;
3197*4882a593Smuzhiyun if (reg_is_indoor) {
3198*4882a593Smuzhiyun if (!reg_is_indoor_portid)
3199*4882a593Smuzhiyun reg_is_indoor_portid = portid;
3200*4882a593Smuzhiyun } else {
3201*4882a593Smuzhiyun reg_is_indoor_portid = 0;
3202*4882a593Smuzhiyun }
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun spin_unlock(®_indoor_lock);
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun if (!is_indoor)
3207*4882a593Smuzhiyun reg_check_channels();
3208*4882a593Smuzhiyun
3209*4882a593Smuzhiyun return 0;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun
regulatory_netlink_notify(u32 portid)3212*4882a593Smuzhiyun void regulatory_netlink_notify(u32 portid)
3213*4882a593Smuzhiyun {
3214*4882a593Smuzhiyun spin_lock(®_indoor_lock);
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun if (reg_is_indoor_portid != portid) {
3217*4882a593Smuzhiyun spin_unlock(®_indoor_lock);
3218*4882a593Smuzhiyun return;
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun reg_is_indoor = false;
3222*4882a593Smuzhiyun reg_is_indoor_portid = 0;
3223*4882a593Smuzhiyun
3224*4882a593Smuzhiyun spin_unlock(®_indoor_lock);
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun reg_check_channels();
3227*4882a593Smuzhiyun }
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun /* Driver hints */
regulatory_hint(struct wiphy * wiphy,const char * alpha2)3230*4882a593Smuzhiyun int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
3231*4882a593Smuzhiyun {
3232*4882a593Smuzhiyun struct regulatory_request *request;
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun if (WARN_ON(!alpha2 || !wiphy))
3235*4882a593Smuzhiyun return -EINVAL;
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
3240*4882a593Smuzhiyun if (!request)
3241*4882a593Smuzhiyun return -ENOMEM;
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun request->wiphy_idx = get_wiphy_idx(wiphy);
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun request->alpha2[0] = alpha2[0];
3246*4882a593Smuzhiyun request->alpha2[1] = alpha2[1];
3247*4882a593Smuzhiyun request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun /* Allow calling CRDA again */
3250*4882a593Smuzhiyun reset_crda_timeouts();
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun queue_regulatory_request(request);
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun return 0;
3255*4882a593Smuzhiyun }
3256*4882a593Smuzhiyun EXPORT_SYMBOL(regulatory_hint);
3257*4882a593Smuzhiyun
regulatory_hint_country_ie(struct wiphy * wiphy,enum nl80211_band band,const u8 * country_ie,u8 country_ie_len)3258*4882a593Smuzhiyun void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band,
3259*4882a593Smuzhiyun const u8 *country_ie, u8 country_ie_len)
3260*4882a593Smuzhiyun {
3261*4882a593Smuzhiyun char alpha2[2];
3262*4882a593Smuzhiyun enum environment_cap env = ENVIRON_ANY;
3263*4882a593Smuzhiyun struct regulatory_request *request = NULL, *lr;
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun /* IE len must be evenly divisible by 2 */
3266*4882a593Smuzhiyun if (country_ie_len & 0x01)
3267*4882a593Smuzhiyun return;
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
3270*4882a593Smuzhiyun return;
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun request = kzalloc(sizeof(*request), GFP_KERNEL);
3273*4882a593Smuzhiyun if (!request)
3274*4882a593Smuzhiyun return;
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun alpha2[0] = country_ie[0];
3277*4882a593Smuzhiyun alpha2[1] = country_ie[1];
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun if (country_ie[2] == 'I')
3280*4882a593Smuzhiyun env = ENVIRON_INDOOR;
3281*4882a593Smuzhiyun else if (country_ie[2] == 'O')
3282*4882a593Smuzhiyun env = ENVIRON_OUTDOOR;
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun rcu_read_lock();
3285*4882a593Smuzhiyun lr = get_last_request();
3286*4882a593Smuzhiyun
3287*4882a593Smuzhiyun if (unlikely(!lr))
3288*4882a593Smuzhiyun goto out;
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun /*
3291*4882a593Smuzhiyun * We will run this only upon a successful connection on cfg80211.
3292*4882a593Smuzhiyun * We leave conflict resolution to the workqueue, where can hold
3293*4882a593Smuzhiyun * the RTNL.
3294*4882a593Smuzhiyun */
3295*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
3296*4882a593Smuzhiyun lr->wiphy_idx != WIPHY_IDX_INVALID)
3297*4882a593Smuzhiyun goto out;
3298*4882a593Smuzhiyun
3299*4882a593Smuzhiyun request->wiphy_idx = get_wiphy_idx(wiphy);
3300*4882a593Smuzhiyun request->alpha2[0] = alpha2[0];
3301*4882a593Smuzhiyun request->alpha2[1] = alpha2[1];
3302*4882a593Smuzhiyun request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
3303*4882a593Smuzhiyun request->country_ie_env = env;
3304*4882a593Smuzhiyun
3305*4882a593Smuzhiyun /* Allow calling CRDA again */
3306*4882a593Smuzhiyun reset_crda_timeouts();
3307*4882a593Smuzhiyun
3308*4882a593Smuzhiyun queue_regulatory_request(request);
3309*4882a593Smuzhiyun request = NULL;
3310*4882a593Smuzhiyun out:
3311*4882a593Smuzhiyun kfree(request);
3312*4882a593Smuzhiyun rcu_read_unlock();
3313*4882a593Smuzhiyun }
3314*4882a593Smuzhiyun
restore_alpha2(char * alpha2,bool reset_user)3315*4882a593Smuzhiyun static void restore_alpha2(char *alpha2, bool reset_user)
3316*4882a593Smuzhiyun {
3317*4882a593Smuzhiyun /* indicates there is no alpha2 to consider for restoration */
3318*4882a593Smuzhiyun alpha2[0] = '9';
3319*4882a593Smuzhiyun alpha2[1] = '7';
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun /* The user setting has precedence over the module parameter */
3322*4882a593Smuzhiyun if (is_user_regdom_saved()) {
3323*4882a593Smuzhiyun /* Unless we're asked to ignore it and reset it */
3324*4882a593Smuzhiyun if (reset_user) {
3325*4882a593Smuzhiyun pr_debug("Restoring regulatory settings including user preference\n");
3326*4882a593Smuzhiyun user_alpha2[0] = '9';
3327*4882a593Smuzhiyun user_alpha2[1] = '7';
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun /*
3330*4882a593Smuzhiyun * If we're ignoring user settings, we still need to
3331*4882a593Smuzhiyun * check the module parameter to ensure we put things
3332*4882a593Smuzhiyun * back as they were for a full restore.
3333*4882a593Smuzhiyun */
3334*4882a593Smuzhiyun if (!is_world_regdom(ieee80211_regdom)) {
3335*4882a593Smuzhiyun pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
3336*4882a593Smuzhiyun ieee80211_regdom[0], ieee80211_regdom[1]);
3337*4882a593Smuzhiyun alpha2[0] = ieee80211_regdom[0];
3338*4882a593Smuzhiyun alpha2[1] = ieee80211_regdom[1];
3339*4882a593Smuzhiyun }
3340*4882a593Smuzhiyun } else {
3341*4882a593Smuzhiyun pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n",
3342*4882a593Smuzhiyun user_alpha2[0], user_alpha2[1]);
3343*4882a593Smuzhiyun alpha2[0] = user_alpha2[0];
3344*4882a593Smuzhiyun alpha2[1] = user_alpha2[1];
3345*4882a593Smuzhiyun }
3346*4882a593Smuzhiyun } else if (!is_world_regdom(ieee80211_regdom)) {
3347*4882a593Smuzhiyun pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
3348*4882a593Smuzhiyun ieee80211_regdom[0], ieee80211_regdom[1]);
3349*4882a593Smuzhiyun alpha2[0] = ieee80211_regdom[0];
3350*4882a593Smuzhiyun alpha2[1] = ieee80211_regdom[1];
3351*4882a593Smuzhiyun } else
3352*4882a593Smuzhiyun pr_debug("Restoring regulatory settings\n");
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun
restore_custom_reg_settings(struct wiphy * wiphy)3355*4882a593Smuzhiyun static void restore_custom_reg_settings(struct wiphy *wiphy)
3356*4882a593Smuzhiyun {
3357*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
3358*4882a593Smuzhiyun enum nl80211_band band;
3359*4882a593Smuzhiyun struct ieee80211_channel *chan;
3360*4882a593Smuzhiyun int i;
3361*4882a593Smuzhiyun
3362*4882a593Smuzhiyun for (band = 0; band < NUM_NL80211_BANDS; band++) {
3363*4882a593Smuzhiyun sband = wiphy->bands[band];
3364*4882a593Smuzhiyun if (!sband)
3365*4882a593Smuzhiyun continue;
3366*4882a593Smuzhiyun for (i = 0; i < sband->n_channels; i++) {
3367*4882a593Smuzhiyun chan = &sband->channels[i];
3368*4882a593Smuzhiyun chan->flags = chan->orig_flags;
3369*4882a593Smuzhiyun chan->max_antenna_gain = chan->orig_mag;
3370*4882a593Smuzhiyun chan->max_power = chan->orig_mpwr;
3371*4882a593Smuzhiyun chan->beacon_found = false;
3372*4882a593Smuzhiyun }
3373*4882a593Smuzhiyun }
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun /*
3377*4882a593Smuzhiyun * Restoring regulatory settings involves ingoring any
3378*4882a593Smuzhiyun * possibly stale country IE information and user regulatory
3379*4882a593Smuzhiyun * settings if so desired, this includes any beacon hints
3380*4882a593Smuzhiyun * learned as we could have traveled outside to another country
3381*4882a593Smuzhiyun * after disconnection. To restore regulatory settings we do
3382*4882a593Smuzhiyun * exactly what we did at bootup:
3383*4882a593Smuzhiyun *
3384*4882a593Smuzhiyun * - send a core regulatory hint
3385*4882a593Smuzhiyun * - send a user regulatory hint if applicable
3386*4882a593Smuzhiyun *
3387*4882a593Smuzhiyun * Device drivers that send a regulatory hint for a specific country
3388*4882a593Smuzhiyun * keep their own regulatory domain on wiphy->regd so that does
3389*4882a593Smuzhiyun * not need to be remembered.
3390*4882a593Smuzhiyun */
restore_regulatory_settings(bool reset_user,bool cached)3391*4882a593Smuzhiyun static void restore_regulatory_settings(bool reset_user, bool cached)
3392*4882a593Smuzhiyun {
3393*4882a593Smuzhiyun char alpha2[2];
3394*4882a593Smuzhiyun char world_alpha2[2];
3395*4882a593Smuzhiyun struct reg_beacon *reg_beacon, *btmp;
3396*4882a593Smuzhiyun LIST_HEAD(tmp_reg_req_list);
3397*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3398*4882a593Smuzhiyun
3399*4882a593Smuzhiyun ASSERT_RTNL();
3400*4882a593Smuzhiyun
3401*4882a593Smuzhiyun /*
3402*4882a593Smuzhiyun * Clear the indoor setting in case that it is not controlled by user
3403*4882a593Smuzhiyun * space, as otherwise there is no guarantee that the device is still
3404*4882a593Smuzhiyun * operating in an indoor environment.
3405*4882a593Smuzhiyun */
3406*4882a593Smuzhiyun spin_lock(®_indoor_lock);
3407*4882a593Smuzhiyun if (reg_is_indoor && !reg_is_indoor_portid) {
3408*4882a593Smuzhiyun reg_is_indoor = false;
3409*4882a593Smuzhiyun reg_check_channels();
3410*4882a593Smuzhiyun }
3411*4882a593Smuzhiyun spin_unlock(®_indoor_lock);
3412*4882a593Smuzhiyun
3413*4882a593Smuzhiyun reset_regdomains(true, &world_regdom);
3414*4882a593Smuzhiyun restore_alpha2(alpha2, reset_user);
3415*4882a593Smuzhiyun
3416*4882a593Smuzhiyun /*
3417*4882a593Smuzhiyun * If there's any pending requests we simply
3418*4882a593Smuzhiyun * stash them to a temporary pending queue and
3419*4882a593Smuzhiyun * add then after we've restored regulatory
3420*4882a593Smuzhiyun * settings.
3421*4882a593Smuzhiyun */
3422*4882a593Smuzhiyun spin_lock(®_requests_lock);
3423*4882a593Smuzhiyun list_splice_tail_init(®_requests_list, &tmp_reg_req_list);
3424*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun /* Clear beacon hints */
3427*4882a593Smuzhiyun spin_lock_bh(®_pending_beacons_lock);
3428*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) {
3429*4882a593Smuzhiyun list_del(®_beacon->list);
3430*4882a593Smuzhiyun kfree(reg_beacon);
3431*4882a593Smuzhiyun }
3432*4882a593Smuzhiyun spin_unlock_bh(®_pending_beacons_lock);
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) {
3435*4882a593Smuzhiyun list_del(®_beacon->list);
3436*4882a593Smuzhiyun kfree(reg_beacon);
3437*4882a593Smuzhiyun }
3438*4882a593Smuzhiyun
3439*4882a593Smuzhiyun /* First restore to the basic regulatory settings */
3440*4882a593Smuzhiyun world_alpha2[0] = cfg80211_world_regdom->alpha2[0];
3441*4882a593Smuzhiyun world_alpha2[1] = cfg80211_world_regdom->alpha2[1];
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
3444*4882a593Smuzhiyun if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
3445*4882a593Smuzhiyun continue;
3446*4882a593Smuzhiyun if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG)
3447*4882a593Smuzhiyun restore_custom_reg_settings(&rdev->wiphy);
3448*4882a593Smuzhiyun }
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun if (cached && (!is_an_alpha2(alpha2) ||
3451*4882a593Smuzhiyun !IS_ERR_OR_NULL(cfg80211_user_regdom))) {
3452*4882a593Smuzhiyun reset_regdomains(false, cfg80211_world_regdom);
3453*4882a593Smuzhiyun update_all_wiphy_regulatory(NL80211_REGDOM_SET_BY_CORE);
3454*4882a593Smuzhiyun print_regdomain(get_cfg80211_regdom());
3455*4882a593Smuzhiyun nl80211_send_reg_change_event(&core_request_world);
3456*4882a593Smuzhiyun reg_set_request_processed();
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun if (is_an_alpha2(alpha2) &&
3459*4882a593Smuzhiyun !regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER)) {
3460*4882a593Smuzhiyun struct regulatory_request *ureq;
3461*4882a593Smuzhiyun
3462*4882a593Smuzhiyun spin_lock(®_requests_lock);
3463*4882a593Smuzhiyun ureq = list_last_entry(®_requests_list,
3464*4882a593Smuzhiyun struct regulatory_request,
3465*4882a593Smuzhiyun list);
3466*4882a593Smuzhiyun list_del(&ureq->list);
3467*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun notify_self_managed_wiphys(ureq);
3470*4882a593Smuzhiyun reg_update_last_request(ureq);
3471*4882a593Smuzhiyun set_regdom(reg_copy_regd(cfg80211_user_regdom),
3472*4882a593Smuzhiyun REGD_SOURCE_CACHED);
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun } else {
3475*4882a593Smuzhiyun regulatory_hint_core(world_alpha2);
3476*4882a593Smuzhiyun
3477*4882a593Smuzhiyun /*
3478*4882a593Smuzhiyun * This restores the ieee80211_regdom module parameter
3479*4882a593Smuzhiyun * preference or the last user requested regulatory
3480*4882a593Smuzhiyun * settings, user regulatory settings takes precedence.
3481*4882a593Smuzhiyun */
3482*4882a593Smuzhiyun if (is_an_alpha2(alpha2))
3483*4882a593Smuzhiyun regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
3484*4882a593Smuzhiyun }
3485*4882a593Smuzhiyun
3486*4882a593Smuzhiyun spin_lock(®_requests_lock);
3487*4882a593Smuzhiyun list_splice_tail_init(&tmp_reg_req_list, ®_requests_list);
3488*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun pr_debug("Kicking the queue\n");
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun schedule_work(®_work);
3493*4882a593Smuzhiyun }
3494*4882a593Smuzhiyun
is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)3495*4882a593Smuzhiyun static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)
3496*4882a593Smuzhiyun {
3497*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3498*4882a593Smuzhiyun struct wireless_dev *wdev;
3499*4882a593Smuzhiyun
3500*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
3501*4882a593Smuzhiyun list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
3502*4882a593Smuzhiyun wdev_lock(wdev);
3503*4882a593Smuzhiyun if (!(wdev->wiphy->regulatory_flags & flag)) {
3504*4882a593Smuzhiyun wdev_unlock(wdev);
3505*4882a593Smuzhiyun return false;
3506*4882a593Smuzhiyun }
3507*4882a593Smuzhiyun wdev_unlock(wdev);
3508*4882a593Smuzhiyun }
3509*4882a593Smuzhiyun }
3510*4882a593Smuzhiyun
3511*4882a593Smuzhiyun return true;
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun
regulatory_hint_disconnect(void)3514*4882a593Smuzhiyun void regulatory_hint_disconnect(void)
3515*4882a593Smuzhiyun {
3516*4882a593Smuzhiyun /* Restore of regulatory settings is not required when wiphy(s)
3517*4882a593Smuzhiyun * ignore IE from connected access point but clearance of beacon hints
3518*4882a593Smuzhiyun * is required when wiphy(s) supports beacon hints.
3519*4882a593Smuzhiyun */
3520*4882a593Smuzhiyun if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) {
3521*4882a593Smuzhiyun struct reg_beacon *reg_beacon, *btmp;
3522*4882a593Smuzhiyun
3523*4882a593Smuzhiyun if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS))
3524*4882a593Smuzhiyun return;
3525*4882a593Smuzhiyun
3526*4882a593Smuzhiyun spin_lock_bh(®_pending_beacons_lock);
3527*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp,
3528*4882a593Smuzhiyun ®_pending_beacons, list) {
3529*4882a593Smuzhiyun list_del(®_beacon->list);
3530*4882a593Smuzhiyun kfree(reg_beacon);
3531*4882a593Smuzhiyun }
3532*4882a593Smuzhiyun spin_unlock_bh(®_pending_beacons_lock);
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp,
3535*4882a593Smuzhiyun ®_beacon_list, list) {
3536*4882a593Smuzhiyun list_del(®_beacon->list);
3537*4882a593Smuzhiyun kfree(reg_beacon);
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun
3540*4882a593Smuzhiyun return;
3541*4882a593Smuzhiyun }
3542*4882a593Smuzhiyun
3543*4882a593Smuzhiyun pr_debug("All devices are disconnected, going to restore regulatory settings\n");
3544*4882a593Smuzhiyun restore_regulatory_settings(false, true);
3545*4882a593Smuzhiyun }
3546*4882a593Smuzhiyun
freq_is_chan_12_13_14(u32 freq)3547*4882a593Smuzhiyun static bool freq_is_chan_12_13_14(u32 freq)
3548*4882a593Smuzhiyun {
3549*4882a593Smuzhiyun if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) ||
3550*4882a593Smuzhiyun freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) ||
3551*4882a593Smuzhiyun freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ))
3552*4882a593Smuzhiyun return true;
3553*4882a593Smuzhiyun return false;
3554*4882a593Smuzhiyun }
3555*4882a593Smuzhiyun
pending_reg_beacon(struct ieee80211_channel * beacon_chan)3556*4882a593Smuzhiyun static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan)
3557*4882a593Smuzhiyun {
3558*4882a593Smuzhiyun struct reg_beacon *pending_beacon;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun list_for_each_entry(pending_beacon, ®_pending_beacons, list)
3561*4882a593Smuzhiyun if (ieee80211_channel_equal(beacon_chan,
3562*4882a593Smuzhiyun &pending_beacon->chan))
3563*4882a593Smuzhiyun return true;
3564*4882a593Smuzhiyun return false;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
regulatory_hint_found_beacon(struct wiphy * wiphy,struct ieee80211_channel * beacon_chan,gfp_t gfp)3567*4882a593Smuzhiyun int regulatory_hint_found_beacon(struct wiphy *wiphy,
3568*4882a593Smuzhiyun struct ieee80211_channel *beacon_chan,
3569*4882a593Smuzhiyun gfp_t gfp)
3570*4882a593Smuzhiyun {
3571*4882a593Smuzhiyun struct reg_beacon *reg_beacon;
3572*4882a593Smuzhiyun bool processing;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun if (beacon_chan->beacon_found ||
3575*4882a593Smuzhiyun beacon_chan->flags & IEEE80211_CHAN_RADAR ||
3576*4882a593Smuzhiyun (beacon_chan->band == NL80211_BAND_2GHZ &&
3577*4882a593Smuzhiyun !freq_is_chan_12_13_14(beacon_chan->center_freq)))
3578*4882a593Smuzhiyun return 0;
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun spin_lock_bh(®_pending_beacons_lock);
3581*4882a593Smuzhiyun processing = pending_reg_beacon(beacon_chan);
3582*4882a593Smuzhiyun spin_unlock_bh(®_pending_beacons_lock);
3583*4882a593Smuzhiyun
3584*4882a593Smuzhiyun if (processing)
3585*4882a593Smuzhiyun return 0;
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
3588*4882a593Smuzhiyun if (!reg_beacon)
3589*4882a593Smuzhiyun return -ENOMEM;
3590*4882a593Smuzhiyun
3591*4882a593Smuzhiyun pr_debug("Found new beacon on frequency: %d.%03d MHz (Ch %d) on %s\n",
3592*4882a593Smuzhiyun beacon_chan->center_freq, beacon_chan->freq_offset,
3593*4882a593Smuzhiyun ieee80211_freq_khz_to_channel(
3594*4882a593Smuzhiyun ieee80211_channel_to_khz(beacon_chan)),
3595*4882a593Smuzhiyun wiphy_name(wiphy));
3596*4882a593Smuzhiyun
3597*4882a593Smuzhiyun memcpy(®_beacon->chan, beacon_chan,
3598*4882a593Smuzhiyun sizeof(struct ieee80211_channel));
3599*4882a593Smuzhiyun
3600*4882a593Smuzhiyun /*
3601*4882a593Smuzhiyun * Since we can be called from BH or and non-BH context
3602*4882a593Smuzhiyun * we must use spin_lock_bh()
3603*4882a593Smuzhiyun */
3604*4882a593Smuzhiyun spin_lock_bh(®_pending_beacons_lock);
3605*4882a593Smuzhiyun list_add_tail(®_beacon->list, ®_pending_beacons);
3606*4882a593Smuzhiyun spin_unlock_bh(®_pending_beacons_lock);
3607*4882a593Smuzhiyun
3608*4882a593Smuzhiyun schedule_work(®_work);
3609*4882a593Smuzhiyun
3610*4882a593Smuzhiyun return 0;
3611*4882a593Smuzhiyun }
3612*4882a593Smuzhiyun
print_rd_rules(const struct ieee80211_regdomain * rd)3613*4882a593Smuzhiyun static void print_rd_rules(const struct ieee80211_regdomain *rd)
3614*4882a593Smuzhiyun {
3615*4882a593Smuzhiyun unsigned int i;
3616*4882a593Smuzhiyun const struct ieee80211_reg_rule *reg_rule = NULL;
3617*4882a593Smuzhiyun const struct ieee80211_freq_range *freq_range = NULL;
3618*4882a593Smuzhiyun const struct ieee80211_power_rule *power_rule = NULL;
3619*4882a593Smuzhiyun char bw[32], cac_time[32];
3620*4882a593Smuzhiyun
3621*4882a593Smuzhiyun pr_debug(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
3622*4882a593Smuzhiyun
3623*4882a593Smuzhiyun for (i = 0; i < rd->n_reg_rules; i++) {
3624*4882a593Smuzhiyun reg_rule = &rd->reg_rules[i];
3625*4882a593Smuzhiyun freq_range = ®_rule->freq_range;
3626*4882a593Smuzhiyun power_rule = ®_rule->power_rule;
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun if (reg_rule->flags & NL80211_RRF_AUTO_BW)
3629*4882a593Smuzhiyun snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
3630*4882a593Smuzhiyun freq_range->max_bandwidth_khz,
3631*4882a593Smuzhiyun reg_get_max_bandwidth(rd, reg_rule));
3632*4882a593Smuzhiyun else
3633*4882a593Smuzhiyun snprintf(bw, sizeof(bw), "%d KHz",
3634*4882a593Smuzhiyun freq_range->max_bandwidth_khz);
3635*4882a593Smuzhiyun
3636*4882a593Smuzhiyun if (reg_rule->flags & NL80211_RRF_DFS)
3637*4882a593Smuzhiyun scnprintf(cac_time, sizeof(cac_time), "%u s",
3638*4882a593Smuzhiyun reg_rule->dfs_cac_ms/1000);
3639*4882a593Smuzhiyun else
3640*4882a593Smuzhiyun scnprintf(cac_time, sizeof(cac_time), "N/A");
3641*4882a593Smuzhiyun
3642*4882a593Smuzhiyun
3643*4882a593Smuzhiyun /*
3644*4882a593Smuzhiyun * There may not be documentation for max antenna gain
3645*4882a593Smuzhiyun * in certain regions
3646*4882a593Smuzhiyun */
3647*4882a593Smuzhiyun if (power_rule->max_antenna_gain)
3648*4882a593Smuzhiyun pr_debug(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
3649*4882a593Smuzhiyun freq_range->start_freq_khz,
3650*4882a593Smuzhiyun freq_range->end_freq_khz,
3651*4882a593Smuzhiyun bw,
3652*4882a593Smuzhiyun power_rule->max_antenna_gain,
3653*4882a593Smuzhiyun power_rule->max_eirp,
3654*4882a593Smuzhiyun cac_time);
3655*4882a593Smuzhiyun else
3656*4882a593Smuzhiyun pr_debug(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
3657*4882a593Smuzhiyun freq_range->start_freq_khz,
3658*4882a593Smuzhiyun freq_range->end_freq_khz,
3659*4882a593Smuzhiyun bw,
3660*4882a593Smuzhiyun power_rule->max_eirp,
3661*4882a593Smuzhiyun cac_time);
3662*4882a593Smuzhiyun }
3663*4882a593Smuzhiyun }
3664*4882a593Smuzhiyun
reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region)3665*4882a593Smuzhiyun bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region)
3666*4882a593Smuzhiyun {
3667*4882a593Smuzhiyun switch (dfs_region) {
3668*4882a593Smuzhiyun case NL80211_DFS_UNSET:
3669*4882a593Smuzhiyun case NL80211_DFS_FCC:
3670*4882a593Smuzhiyun case NL80211_DFS_ETSI:
3671*4882a593Smuzhiyun case NL80211_DFS_JP:
3672*4882a593Smuzhiyun return true;
3673*4882a593Smuzhiyun default:
3674*4882a593Smuzhiyun pr_debug("Ignoring unknown DFS master region: %d\n", dfs_region);
3675*4882a593Smuzhiyun return false;
3676*4882a593Smuzhiyun }
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun
print_regdomain(const struct ieee80211_regdomain * rd)3679*4882a593Smuzhiyun static void print_regdomain(const struct ieee80211_regdomain *rd)
3680*4882a593Smuzhiyun {
3681*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
3682*4882a593Smuzhiyun
3683*4882a593Smuzhiyun if (is_intersected_alpha2(rd->alpha2)) {
3684*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
3685*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3686*4882a593Smuzhiyun rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx);
3687*4882a593Smuzhiyun if (rdev) {
3688*4882a593Smuzhiyun pr_debug("Current regulatory domain updated by AP to: %c%c\n",
3689*4882a593Smuzhiyun rdev->country_ie_alpha2[0],
3690*4882a593Smuzhiyun rdev->country_ie_alpha2[1]);
3691*4882a593Smuzhiyun } else
3692*4882a593Smuzhiyun pr_debug("Current regulatory domain intersected:\n");
3693*4882a593Smuzhiyun } else
3694*4882a593Smuzhiyun pr_debug("Current regulatory domain intersected:\n");
3695*4882a593Smuzhiyun } else if (is_world_regdom(rd->alpha2)) {
3696*4882a593Smuzhiyun pr_debug("World regulatory domain updated:\n");
3697*4882a593Smuzhiyun } else {
3698*4882a593Smuzhiyun if (is_unknown_alpha2(rd->alpha2))
3699*4882a593Smuzhiyun pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n");
3700*4882a593Smuzhiyun else {
3701*4882a593Smuzhiyun if (reg_request_cell_base(lr))
3702*4882a593Smuzhiyun pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n",
3703*4882a593Smuzhiyun rd->alpha2[0], rd->alpha2[1]);
3704*4882a593Smuzhiyun else
3705*4882a593Smuzhiyun pr_debug("Regulatory domain changed to country: %c%c\n",
3706*4882a593Smuzhiyun rd->alpha2[0], rd->alpha2[1]);
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun }
3709*4882a593Smuzhiyun
3710*4882a593Smuzhiyun pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region));
3711*4882a593Smuzhiyun print_rd_rules(rd);
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun
print_regdomain_info(const struct ieee80211_regdomain * rd)3714*4882a593Smuzhiyun static void print_regdomain_info(const struct ieee80211_regdomain *rd)
3715*4882a593Smuzhiyun {
3716*4882a593Smuzhiyun pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
3717*4882a593Smuzhiyun print_rd_rules(rd);
3718*4882a593Smuzhiyun }
3719*4882a593Smuzhiyun
reg_set_rd_core(const struct ieee80211_regdomain * rd)3720*4882a593Smuzhiyun static int reg_set_rd_core(const struct ieee80211_regdomain *rd)
3721*4882a593Smuzhiyun {
3722*4882a593Smuzhiyun if (!is_world_regdom(rd->alpha2))
3723*4882a593Smuzhiyun return -EINVAL;
3724*4882a593Smuzhiyun update_world_regdomain(rd);
3725*4882a593Smuzhiyun return 0;
3726*4882a593Smuzhiyun }
3727*4882a593Smuzhiyun
reg_set_rd_user(const struct ieee80211_regdomain * rd,struct regulatory_request * user_request)3728*4882a593Smuzhiyun static int reg_set_rd_user(const struct ieee80211_regdomain *rd,
3729*4882a593Smuzhiyun struct regulatory_request *user_request)
3730*4882a593Smuzhiyun {
3731*4882a593Smuzhiyun const struct ieee80211_regdomain *intersected_rd = NULL;
3732*4882a593Smuzhiyun
3733*4882a593Smuzhiyun if (!regdom_changes(rd->alpha2))
3734*4882a593Smuzhiyun return -EALREADY;
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun if (!is_valid_rd(rd)) {
3737*4882a593Smuzhiyun pr_err("Invalid regulatory domain detected: %c%c\n",
3738*4882a593Smuzhiyun rd->alpha2[0], rd->alpha2[1]);
3739*4882a593Smuzhiyun print_regdomain_info(rd);
3740*4882a593Smuzhiyun return -EINVAL;
3741*4882a593Smuzhiyun }
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun if (!user_request->intersect) {
3744*4882a593Smuzhiyun reset_regdomains(false, rd);
3745*4882a593Smuzhiyun return 0;
3746*4882a593Smuzhiyun }
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun intersected_rd = regdom_intersect(rd, get_cfg80211_regdom());
3749*4882a593Smuzhiyun if (!intersected_rd)
3750*4882a593Smuzhiyun return -EINVAL;
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun kfree(rd);
3753*4882a593Smuzhiyun rd = NULL;
3754*4882a593Smuzhiyun reset_regdomains(false, intersected_rd);
3755*4882a593Smuzhiyun
3756*4882a593Smuzhiyun return 0;
3757*4882a593Smuzhiyun }
3758*4882a593Smuzhiyun
reg_set_rd_driver(const struct ieee80211_regdomain * rd,struct regulatory_request * driver_request)3759*4882a593Smuzhiyun static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
3760*4882a593Smuzhiyun struct regulatory_request *driver_request)
3761*4882a593Smuzhiyun {
3762*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
3763*4882a593Smuzhiyun const struct ieee80211_regdomain *intersected_rd = NULL;
3764*4882a593Smuzhiyun const struct ieee80211_regdomain *tmp;
3765*4882a593Smuzhiyun struct wiphy *request_wiphy;
3766*4882a593Smuzhiyun
3767*4882a593Smuzhiyun if (is_world_regdom(rd->alpha2))
3768*4882a593Smuzhiyun return -EINVAL;
3769*4882a593Smuzhiyun
3770*4882a593Smuzhiyun if (!regdom_changes(rd->alpha2))
3771*4882a593Smuzhiyun return -EALREADY;
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun if (!is_valid_rd(rd)) {
3774*4882a593Smuzhiyun pr_err("Invalid regulatory domain detected: %c%c\n",
3775*4882a593Smuzhiyun rd->alpha2[0], rd->alpha2[1]);
3776*4882a593Smuzhiyun print_regdomain_info(rd);
3777*4882a593Smuzhiyun return -EINVAL;
3778*4882a593Smuzhiyun }
3779*4882a593Smuzhiyun
3780*4882a593Smuzhiyun request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
3781*4882a593Smuzhiyun if (!request_wiphy)
3782*4882a593Smuzhiyun return -ENODEV;
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun if (!driver_request->intersect) {
3785*4882a593Smuzhiyun if (request_wiphy->regd)
3786*4882a593Smuzhiyun return -EALREADY;
3787*4882a593Smuzhiyun
3788*4882a593Smuzhiyun regd = reg_copy_regd(rd);
3789*4882a593Smuzhiyun if (IS_ERR(regd))
3790*4882a593Smuzhiyun return PTR_ERR(regd);
3791*4882a593Smuzhiyun
3792*4882a593Smuzhiyun rcu_assign_pointer(request_wiphy->regd, regd);
3793*4882a593Smuzhiyun reset_regdomains(false, rd);
3794*4882a593Smuzhiyun return 0;
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun intersected_rd = regdom_intersect(rd, get_cfg80211_regdom());
3798*4882a593Smuzhiyun if (!intersected_rd)
3799*4882a593Smuzhiyun return -EINVAL;
3800*4882a593Smuzhiyun
3801*4882a593Smuzhiyun /*
3802*4882a593Smuzhiyun * We can trash what CRDA provided now.
3803*4882a593Smuzhiyun * However if a driver requested this specific regulatory
3804*4882a593Smuzhiyun * domain we keep it for its private use
3805*4882a593Smuzhiyun */
3806*4882a593Smuzhiyun tmp = get_wiphy_regdom(request_wiphy);
3807*4882a593Smuzhiyun rcu_assign_pointer(request_wiphy->regd, rd);
3808*4882a593Smuzhiyun rcu_free_regdom(tmp);
3809*4882a593Smuzhiyun
3810*4882a593Smuzhiyun rd = NULL;
3811*4882a593Smuzhiyun
3812*4882a593Smuzhiyun reset_regdomains(false, intersected_rd);
3813*4882a593Smuzhiyun
3814*4882a593Smuzhiyun return 0;
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun
reg_set_rd_country_ie(const struct ieee80211_regdomain * rd,struct regulatory_request * country_ie_request)3817*4882a593Smuzhiyun static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
3818*4882a593Smuzhiyun struct regulatory_request *country_ie_request)
3819*4882a593Smuzhiyun {
3820*4882a593Smuzhiyun struct wiphy *request_wiphy;
3821*4882a593Smuzhiyun
3822*4882a593Smuzhiyun if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
3823*4882a593Smuzhiyun !is_unknown_alpha2(rd->alpha2))
3824*4882a593Smuzhiyun return -EINVAL;
3825*4882a593Smuzhiyun
3826*4882a593Smuzhiyun /*
3827*4882a593Smuzhiyun * Lets only bother proceeding on the same alpha2 if the current
3828*4882a593Smuzhiyun * rd is non static (it means CRDA was present and was used last)
3829*4882a593Smuzhiyun * and the pending request came in from a country IE
3830*4882a593Smuzhiyun */
3831*4882a593Smuzhiyun
3832*4882a593Smuzhiyun if (!is_valid_rd(rd)) {
3833*4882a593Smuzhiyun pr_err("Invalid regulatory domain detected: %c%c\n",
3834*4882a593Smuzhiyun rd->alpha2[0], rd->alpha2[1]);
3835*4882a593Smuzhiyun print_regdomain_info(rd);
3836*4882a593Smuzhiyun return -EINVAL;
3837*4882a593Smuzhiyun }
3838*4882a593Smuzhiyun
3839*4882a593Smuzhiyun request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
3840*4882a593Smuzhiyun if (!request_wiphy)
3841*4882a593Smuzhiyun return -ENODEV;
3842*4882a593Smuzhiyun
3843*4882a593Smuzhiyun if (country_ie_request->intersect)
3844*4882a593Smuzhiyun return -EINVAL;
3845*4882a593Smuzhiyun
3846*4882a593Smuzhiyun reset_regdomains(false, rd);
3847*4882a593Smuzhiyun return 0;
3848*4882a593Smuzhiyun }
3849*4882a593Smuzhiyun
3850*4882a593Smuzhiyun /*
3851*4882a593Smuzhiyun * Use this call to set the current regulatory domain. Conflicts with
3852*4882a593Smuzhiyun * multiple drivers can be ironed out later. Caller must've already
3853*4882a593Smuzhiyun * kmalloc'd the rd structure.
3854*4882a593Smuzhiyun */
set_regdom(const struct ieee80211_regdomain * rd,enum ieee80211_regd_source regd_src)3855*4882a593Smuzhiyun int set_regdom(const struct ieee80211_regdomain *rd,
3856*4882a593Smuzhiyun enum ieee80211_regd_source regd_src)
3857*4882a593Smuzhiyun {
3858*4882a593Smuzhiyun struct regulatory_request *lr;
3859*4882a593Smuzhiyun bool user_reset = false;
3860*4882a593Smuzhiyun int r;
3861*4882a593Smuzhiyun
3862*4882a593Smuzhiyun if (IS_ERR_OR_NULL(rd))
3863*4882a593Smuzhiyun return -ENODATA;
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun if (!reg_is_valid_request(rd->alpha2)) {
3866*4882a593Smuzhiyun kfree(rd);
3867*4882a593Smuzhiyun return -EINVAL;
3868*4882a593Smuzhiyun }
3869*4882a593Smuzhiyun
3870*4882a593Smuzhiyun if (regd_src == REGD_SOURCE_CRDA)
3871*4882a593Smuzhiyun reset_crda_timeouts();
3872*4882a593Smuzhiyun
3873*4882a593Smuzhiyun lr = get_last_request();
3874*4882a593Smuzhiyun
3875*4882a593Smuzhiyun /* Note that this doesn't update the wiphys, this is done below */
3876*4882a593Smuzhiyun switch (lr->initiator) {
3877*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_CORE:
3878*4882a593Smuzhiyun r = reg_set_rd_core(rd);
3879*4882a593Smuzhiyun break;
3880*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_USER:
3881*4882a593Smuzhiyun cfg80211_save_user_regdom(rd);
3882*4882a593Smuzhiyun r = reg_set_rd_user(rd, lr);
3883*4882a593Smuzhiyun user_reset = true;
3884*4882a593Smuzhiyun break;
3885*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_DRIVER:
3886*4882a593Smuzhiyun r = reg_set_rd_driver(rd, lr);
3887*4882a593Smuzhiyun break;
3888*4882a593Smuzhiyun case NL80211_REGDOM_SET_BY_COUNTRY_IE:
3889*4882a593Smuzhiyun r = reg_set_rd_country_ie(rd, lr);
3890*4882a593Smuzhiyun break;
3891*4882a593Smuzhiyun default:
3892*4882a593Smuzhiyun WARN(1, "invalid initiator %d\n", lr->initiator);
3893*4882a593Smuzhiyun kfree(rd);
3894*4882a593Smuzhiyun return -EINVAL;
3895*4882a593Smuzhiyun }
3896*4882a593Smuzhiyun
3897*4882a593Smuzhiyun if (r) {
3898*4882a593Smuzhiyun switch (r) {
3899*4882a593Smuzhiyun case -EALREADY:
3900*4882a593Smuzhiyun reg_set_request_processed();
3901*4882a593Smuzhiyun break;
3902*4882a593Smuzhiyun default:
3903*4882a593Smuzhiyun /* Back to world regulatory in case of errors */
3904*4882a593Smuzhiyun restore_regulatory_settings(user_reset, false);
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun kfree(rd);
3908*4882a593Smuzhiyun return r;
3909*4882a593Smuzhiyun }
3910*4882a593Smuzhiyun
3911*4882a593Smuzhiyun /* This would make this whole thing pointless */
3912*4882a593Smuzhiyun if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom()))
3913*4882a593Smuzhiyun return -EINVAL;
3914*4882a593Smuzhiyun
3915*4882a593Smuzhiyun /* update all wiphys now with the new established regulatory domain */
3916*4882a593Smuzhiyun update_all_wiphy_regulatory(lr->initiator);
3917*4882a593Smuzhiyun
3918*4882a593Smuzhiyun print_regdomain(get_cfg80211_regdom());
3919*4882a593Smuzhiyun
3920*4882a593Smuzhiyun nl80211_send_reg_change_event(lr);
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun reg_set_request_processed();
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun return 0;
3925*4882a593Smuzhiyun }
3926*4882a593Smuzhiyun
__regulatory_set_wiphy_regd(struct wiphy * wiphy,struct ieee80211_regdomain * rd)3927*4882a593Smuzhiyun static int __regulatory_set_wiphy_regd(struct wiphy *wiphy,
3928*4882a593Smuzhiyun struct ieee80211_regdomain *rd)
3929*4882a593Smuzhiyun {
3930*4882a593Smuzhiyun const struct ieee80211_regdomain *regd;
3931*4882a593Smuzhiyun const struct ieee80211_regdomain *prev_regd;
3932*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
3933*4882a593Smuzhiyun
3934*4882a593Smuzhiyun if (WARN_ON(!wiphy || !rd))
3935*4882a593Smuzhiyun return -EINVAL;
3936*4882a593Smuzhiyun
3937*4882a593Smuzhiyun if (WARN(!(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED),
3938*4882a593Smuzhiyun "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n"))
3939*4882a593Smuzhiyun return -EPERM;
3940*4882a593Smuzhiyun
3941*4882a593Smuzhiyun if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected\n")) {
3942*4882a593Smuzhiyun print_regdomain_info(rd);
3943*4882a593Smuzhiyun return -EINVAL;
3944*4882a593Smuzhiyun }
3945*4882a593Smuzhiyun
3946*4882a593Smuzhiyun regd = reg_copy_regd(rd);
3947*4882a593Smuzhiyun if (IS_ERR(regd))
3948*4882a593Smuzhiyun return PTR_ERR(regd);
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun rdev = wiphy_to_rdev(wiphy);
3951*4882a593Smuzhiyun
3952*4882a593Smuzhiyun spin_lock(®_requests_lock);
3953*4882a593Smuzhiyun prev_regd = rdev->requested_regd;
3954*4882a593Smuzhiyun rdev->requested_regd = regd;
3955*4882a593Smuzhiyun spin_unlock(®_requests_lock);
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun kfree(prev_regd);
3958*4882a593Smuzhiyun return 0;
3959*4882a593Smuzhiyun }
3960*4882a593Smuzhiyun
regulatory_set_wiphy_regd(struct wiphy * wiphy,struct ieee80211_regdomain * rd)3961*4882a593Smuzhiyun int regulatory_set_wiphy_regd(struct wiphy *wiphy,
3962*4882a593Smuzhiyun struct ieee80211_regdomain *rd)
3963*4882a593Smuzhiyun {
3964*4882a593Smuzhiyun int ret = __regulatory_set_wiphy_regd(wiphy, rd);
3965*4882a593Smuzhiyun
3966*4882a593Smuzhiyun if (ret)
3967*4882a593Smuzhiyun return ret;
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun schedule_work(®_work);
3970*4882a593Smuzhiyun return 0;
3971*4882a593Smuzhiyun }
3972*4882a593Smuzhiyun EXPORT_SYMBOL(regulatory_set_wiphy_regd);
3973*4882a593Smuzhiyun
regulatory_set_wiphy_regd_sync_rtnl(struct wiphy * wiphy,struct ieee80211_regdomain * rd)3974*4882a593Smuzhiyun int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
3975*4882a593Smuzhiyun struct ieee80211_regdomain *rd)
3976*4882a593Smuzhiyun {
3977*4882a593Smuzhiyun int ret;
3978*4882a593Smuzhiyun
3979*4882a593Smuzhiyun ASSERT_RTNL();
3980*4882a593Smuzhiyun
3981*4882a593Smuzhiyun ret = __regulatory_set_wiphy_regd(wiphy, rd);
3982*4882a593Smuzhiyun if (ret)
3983*4882a593Smuzhiyun return ret;
3984*4882a593Smuzhiyun
3985*4882a593Smuzhiyun /* process the request immediately */
3986*4882a593Smuzhiyun reg_process_self_managed_hints();
3987*4882a593Smuzhiyun return 0;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync_rtnl);
3990*4882a593Smuzhiyun
wiphy_regulatory_register(struct wiphy * wiphy)3991*4882a593Smuzhiyun void wiphy_regulatory_register(struct wiphy *wiphy)
3992*4882a593Smuzhiyun {
3993*4882a593Smuzhiyun struct regulatory_request *lr = get_last_request();
3994*4882a593Smuzhiyun
3995*4882a593Smuzhiyun /* self-managed devices ignore beacon hints and country IE */
3996*4882a593Smuzhiyun if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
3997*4882a593Smuzhiyun wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
3998*4882a593Smuzhiyun REGULATORY_COUNTRY_IE_IGNORE;
3999*4882a593Smuzhiyun
4000*4882a593Smuzhiyun /*
4001*4882a593Smuzhiyun * The last request may have been received before this
4002*4882a593Smuzhiyun * registration call. Call the driver notifier if
4003*4882a593Smuzhiyun * initiator is USER.
4004*4882a593Smuzhiyun */
4005*4882a593Smuzhiyun if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
4006*4882a593Smuzhiyun reg_call_notifier(wiphy, lr);
4007*4882a593Smuzhiyun }
4008*4882a593Smuzhiyun
4009*4882a593Smuzhiyun if (!reg_dev_ignore_cell_hint(wiphy))
4010*4882a593Smuzhiyun reg_num_devs_support_basehint++;
4011*4882a593Smuzhiyun
4012*4882a593Smuzhiyun wiphy_update_regulatory(wiphy, lr->initiator);
4013*4882a593Smuzhiyun wiphy_all_share_dfs_chan_state(wiphy);
4014*4882a593Smuzhiyun reg_process_self_managed_hints();
4015*4882a593Smuzhiyun }
4016*4882a593Smuzhiyun
wiphy_regulatory_deregister(struct wiphy * wiphy)4017*4882a593Smuzhiyun void wiphy_regulatory_deregister(struct wiphy *wiphy)
4018*4882a593Smuzhiyun {
4019*4882a593Smuzhiyun struct wiphy *request_wiphy = NULL;
4020*4882a593Smuzhiyun struct regulatory_request *lr;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun lr = get_last_request();
4023*4882a593Smuzhiyun
4024*4882a593Smuzhiyun if (!reg_dev_ignore_cell_hint(wiphy))
4025*4882a593Smuzhiyun reg_num_devs_support_basehint--;
4026*4882a593Smuzhiyun
4027*4882a593Smuzhiyun rcu_free_regdom(get_wiphy_regdom(wiphy));
4028*4882a593Smuzhiyun RCU_INIT_POINTER(wiphy->regd, NULL);
4029*4882a593Smuzhiyun
4030*4882a593Smuzhiyun if (lr)
4031*4882a593Smuzhiyun request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
4032*4882a593Smuzhiyun
4033*4882a593Smuzhiyun if (!request_wiphy || request_wiphy != wiphy)
4034*4882a593Smuzhiyun return;
4035*4882a593Smuzhiyun
4036*4882a593Smuzhiyun lr->wiphy_idx = WIPHY_IDX_INVALID;
4037*4882a593Smuzhiyun lr->country_ie_env = ENVIRON_ANY;
4038*4882a593Smuzhiyun }
4039*4882a593Smuzhiyun
4040*4882a593Smuzhiyun /*
4041*4882a593Smuzhiyun * See FCC notices for UNII band definitions
4042*4882a593Smuzhiyun * 5GHz: https://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii
4043*4882a593Smuzhiyun * 6GHz: https://www.fcc.gov/document/fcc-proposes-more-spectrum-unlicensed-use-0
4044*4882a593Smuzhiyun */
cfg80211_get_unii(int freq)4045*4882a593Smuzhiyun int cfg80211_get_unii(int freq)
4046*4882a593Smuzhiyun {
4047*4882a593Smuzhiyun /* UNII-1 */
4048*4882a593Smuzhiyun if (freq >= 5150 && freq <= 5250)
4049*4882a593Smuzhiyun return 0;
4050*4882a593Smuzhiyun
4051*4882a593Smuzhiyun /* UNII-2A */
4052*4882a593Smuzhiyun if (freq > 5250 && freq <= 5350)
4053*4882a593Smuzhiyun return 1;
4054*4882a593Smuzhiyun
4055*4882a593Smuzhiyun /* UNII-2B */
4056*4882a593Smuzhiyun if (freq > 5350 && freq <= 5470)
4057*4882a593Smuzhiyun return 2;
4058*4882a593Smuzhiyun
4059*4882a593Smuzhiyun /* UNII-2C */
4060*4882a593Smuzhiyun if (freq > 5470 && freq <= 5725)
4061*4882a593Smuzhiyun return 3;
4062*4882a593Smuzhiyun
4063*4882a593Smuzhiyun /* UNII-3 */
4064*4882a593Smuzhiyun if (freq > 5725 && freq <= 5825)
4065*4882a593Smuzhiyun return 4;
4066*4882a593Smuzhiyun
4067*4882a593Smuzhiyun /* UNII-5 */
4068*4882a593Smuzhiyun if (freq > 5925 && freq <= 6425)
4069*4882a593Smuzhiyun return 5;
4070*4882a593Smuzhiyun
4071*4882a593Smuzhiyun /* UNII-6 */
4072*4882a593Smuzhiyun if (freq > 6425 && freq <= 6525)
4073*4882a593Smuzhiyun return 6;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun /* UNII-7 */
4076*4882a593Smuzhiyun if (freq > 6525 && freq <= 6875)
4077*4882a593Smuzhiyun return 7;
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun /* UNII-8 */
4080*4882a593Smuzhiyun if (freq > 6875 && freq <= 7125)
4081*4882a593Smuzhiyun return 8;
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun return -EINVAL;
4084*4882a593Smuzhiyun }
4085*4882a593Smuzhiyun
regulatory_indoor_allowed(void)4086*4882a593Smuzhiyun bool regulatory_indoor_allowed(void)
4087*4882a593Smuzhiyun {
4088*4882a593Smuzhiyun return reg_is_indoor;
4089*4882a593Smuzhiyun }
4090*4882a593Smuzhiyun
regulatory_pre_cac_allowed(struct wiphy * wiphy)4091*4882a593Smuzhiyun bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
4092*4882a593Smuzhiyun {
4093*4882a593Smuzhiyun const struct ieee80211_regdomain *regd = NULL;
4094*4882a593Smuzhiyun const struct ieee80211_regdomain *wiphy_regd = NULL;
4095*4882a593Smuzhiyun bool pre_cac_allowed = false;
4096*4882a593Smuzhiyun
4097*4882a593Smuzhiyun rcu_read_lock();
4098*4882a593Smuzhiyun
4099*4882a593Smuzhiyun regd = rcu_dereference(cfg80211_regdomain);
4100*4882a593Smuzhiyun wiphy_regd = rcu_dereference(wiphy->regd);
4101*4882a593Smuzhiyun if (!wiphy_regd) {
4102*4882a593Smuzhiyun if (regd->dfs_region == NL80211_DFS_ETSI)
4103*4882a593Smuzhiyun pre_cac_allowed = true;
4104*4882a593Smuzhiyun
4105*4882a593Smuzhiyun rcu_read_unlock();
4106*4882a593Smuzhiyun
4107*4882a593Smuzhiyun return pre_cac_allowed;
4108*4882a593Smuzhiyun }
4109*4882a593Smuzhiyun
4110*4882a593Smuzhiyun if (regd->dfs_region == wiphy_regd->dfs_region &&
4111*4882a593Smuzhiyun wiphy_regd->dfs_region == NL80211_DFS_ETSI)
4112*4882a593Smuzhiyun pre_cac_allowed = true;
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun rcu_read_unlock();
4115*4882a593Smuzhiyun
4116*4882a593Smuzhiyun return pre_cac_allowed;
4117*4882a593Smuzhiyun }
4118*4882a593Smuzhiyun EXPORT_SYMBOL(regulatory_pre_cac_allowed);
4119*4882a593Smuzhiyun
cfg80211_check_and_end_cac(struct cfg80211_registered_device * rdev)4120*4882a593Smuzhiyun static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
4121*4882a593Smuzhiyun {
4122*4882a593Smuzhiyun struct wireless_dev *wdev;
4123*4882a593Smuzhiyun /* If we finished CAC or received radar, we should end any
4124*4882a593Smuzhiyun * CAC running on the same channels.
4125*4882a593Smuzhiyun * the check !cfg80211_chandef_dfs_usable contain 2 options:
4126*4882a593Smuzhiyun * either all channels are available - those the CAC_FINISHED
4127*4882a593Smuzhiyun * event has effected another wdev state, or there is a channel
4128*4882a593Smuzhiyun * in unavailable state in wdev chandef - those the RADAR_DETECTED
4129*4882a593Smuzhiyun * event has effected another wdev state.
4130*4882a593Smuzhiyun * In both cases we should end the CAC on the wdev.
4131*4882a593Smuzhiyun */
4132*4882a593Smuzhiyun list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
4133*4882a593Smuzhiyun if (wdev->cac_started &&
4134*4882a593Smuzhiyun !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef))
4135*4882a593Smuzhiyun rdev_end_cac(rdev, wdev->netdev);
4136*4882a593Smuzhiyun }
4137*4882a593Smuzhiyun }
4138*4882a593Smuzhiyun
regulatory_propagate_dfs_state(struct wiphy * wiphy,struct cfg80211_chan_def * chandef,enum nl80211_dfs_state dfs_state,enum nl80211_radar_event event)4139*4882a593Smuzhiyun void regulatory_propagate_dfs_state(struct wiphy *wiphy,
4140*4882a593Smuzhiyun struct cfg80211_chan_def *chandef,
4141*4882a593Smuzhiyun enum nl80211_dfs_state dfs_state,
4142*4882a593Smuzhiyun enum nl80211_radar_event event)
4143*4882a593Smuzhiyun {
4144*4882a593Smuzhiyun struct cfg80211_registered_device *rdev;
4145*4882a593Smuzhiyun
4146*4882a593Smuzhiyun ASSERT_RTNL();
4147*4882a593Smuzhiyun
4148*4882a593Smuzhiyun if (WARN_ON(!cfg80211_chandef_valid(chandef)))
4149*4882a593Smuzhiyun return;
4150*4882a593Smuzhiyun
4151*4882a593Smuzhiyun list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
4152*4882a593Smuzhiyun if (wiphy == &rdev->wiphy)
4153*4882a593Smuzhiyun continue;
4154*4882a593Smuzhiyun
4155*4882a593Smuzhiyun if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
4156*4882a593Smuzhiyun continue;
4157*4882a593Smuzhiyun
4158*4882a593Smuzhiyun if (!ieee80211_get_channel(&rdev->wiphy,
4159*4882a593Smuzhiyun chandef->chan->center_freq))
4160*4882a593Smuzhiyun continue;
4161*4882a593Smuzhiyun
4162*4882a593Smuzhiyun cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state);
4163*4882a593Smuzhiyun
4164*4882a593Smuzhiyun if (event == NL80211_RADAR_DETECTED ||
4165*4882a593Smuzhiyun event == NL80211_RADAR_CAC_FINISHED) {
4166*4882a593Smuzhiyun cfg80211_sched_dfs_chan_update(rdev);
4167*4882a593Smuzhiyun cfg80211_check_and_end_cac(rdev);
4168*4882a593Smuzhiyun }
4169*4882a593Smuzhiyun
4170*4882a593Smuzhiyun nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL);
4171*4882a593Smuzhiyun }
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun
regulatory_init_db(void)4174*4882a593Smuzhiyun static int __init regulatory_init_db(void)
4175*4882a593Smuzhiyun {
4176*4882a593Smuzhiyun int err;
4177*4882a593Smuzhiyun
4178*4882a593Smuzhiyun /*
4179*4882a593Smuzhiyun * It's possible that - due to other bugs/issues - cfg80211
4180*4882a593Smuzhiyun * never called regulatory_init() below, or that it failed;
4181*4882a593Smuzhiyun * in that case, don't try to do any further work here as
4182*4882a593Smuzhiyun * it's doomed to lead to crashes.
4183*4882a593Smuzhiyun */
4184*4882a593Smuzhiyun if (IS_ERR_OR_NULL(reg_pdev))
4185*4882a593Smuzhiyun return -EINVAL;
4186*4882a593Smuzhiyun
4187*4882a593Smuzhiyun err = load_builtin_regdb_keys();
4188*4882a593Smuzhiyun if (err)
4189*4882a593Smuzhiyun return err;
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun /* We always try to get an update for the static regdomain */
4192*4882a593Smuzhiyun err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
4193*4882a593Smuzhiyun if (err) {
4194*4882a593Smuzhiyun if (err == -ENOMEM) {
4195*4882a593Smuzhiyun platform_device_unregister(reg_pdev);
4196*4882a593Smuzhiyun return err;
4197*4882a593Smuzhiyun }
4198*4882a593Smuzhiyun /*
4199*4882a593Smuzhiyun * N.B. kobject_uevent_env() can fail mainly for when we're out
4200*4882a593Smuzhiyun * memory which is handled and propagated appropriately above
4201*4882a593Smuzhiyun * but it can also fail during a netlink_broadcast() or during
4202*4882a593Smuzhiyun * early boot for call_usermodehelper(). For now treat these
4203*4882a593Smuzhiyun * errors as non-fatal.
4204*4882a593Smuzhiyun */
4205*4882a593Smuzhiyun pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
4206*4882a593Smuzhiyun }
4207*4882a593Smuzhiyun
4208*4882a593Smuzhiyun /*
4209*4882a593Smuzhiyun * Finally, if the user set the module parameter treat it
4210*4882a593Smuzhiyun * as a user hint.
4211*4882a593Smuzhiyun */
4212*4882a593Smuzhiyun if (!is_world_regdom(ieee80211_regdom))
4213*4882a593Smuzhiyun regulatory_hint_user(ieee80211_regdom,
4214*4882a593Smuzhiyun NL80211_USER_REG_HINT_USER);
4215*4882a593Smuzhiyun
4216*4882a593Smuzhiyun return 0;
4217*4882a593Smuzhiyun }
4218*4882a593Smuzhiyun #ifndef MODULE
4219*4882a593Smuzhiyun late_initcall(regulatory_init_db);
4220*4882a593Smuzhiyun #endif
4221*4882a593Smuzhiyun
regulatory_init(void)4222*4882a593Smuzhiyun int __init regulatory_init(void)
4223*4882a593Smuzhiyun {
4224*4882a593Smuzhiyun reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
4225*4882a593Smuzhiyun if (IS_ERR(reg_pdev))
4226*4882a593Smuzhiyun return PTR_ERR(reg_pdev);
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun spin_lock_init(®_requests_lock);
4229*4882a593Smuzhiyun spin_lock_init(®_pending_beacons_lock);
4230*4882a593Smuzhiyun spin_lock_init(®_indoor_lock);
4231*4882a593Smuzhiyun
4232*4882a593Smuzhiyun rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
4233*4882a593Smuzhiyun
4234*4882a593Smuzhiyun user_alpha2[0] = '9';
4235*4882a593Smuzhiyun user_alpha2[1] = '7';
4236*4882a593Smuzhiyun
4237*4882a593Smuzhiyun #ifdef MODULE
4238*4882a593Smuzhiyun return regulatory_init_db();
4239*4882a593Smuzhiyun #else
4240*4882a593Smuzhiyun return 0;
4241*4882a593Smuzhiyun #endif
4242*4882a593Smuzhiyun }
4243*4882a593Smuzhiyun
regulatory_exit(void)4244*4882a593Smuzhiyun void regulatory_exit(void)
4245*4882a593Smuzhiyun {
4246*4882a593Smuzhiyun struct regulatory_request *reg_request, *tmp;
4247*4882a593Smuzhiyun struct reg_beacon *reg_beacon, *btmp;
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun cancel_work_sync(®_work);
4250*4882a593Smuzhiyun cancel_crda_timeout_sync();
4251*4882a593Smuzhiyun cancel_delayed_work_sync(®_check_chans);
4252*4882a593Smuzhiyun
4253*4882a593Smuzhiyun /* Lock to suppress warnings */
4254*4882a593Smuzhiyun rtnl_lock();
4255*4882a593Smuzhiyun reset_regdomains(true, NULL);
4256*4882a593Smuzhiyun rtnl_unlock();
4257*4882a593Smuzhiyun
4258*4882a593Smuzhiyun dev_set_uevent_suppress(®_pdev->dev, true);
4259*4882a593Smuzhiyun
4260*4882a593Smuzhiyun platform_device_unregister(reg_pdev);
4261*4882a593Smuzhiyun
4262*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) {
4263*4882a593Smuzhiyun list_del(®_beacon->list);
4264*4882a593Smuzhiyun kfree(reg_beacon);
4265*4882a593Smuzhiyun }
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) {
4268*4882a593Smuzhiyun list_del(®_beacon->list);
4269*4882a593Smuzhiyun kfree(reg_beacon);
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun
4272*4882a593Smuzhiyun list_for_each_entry_safe(reg_request, tmp, ®_requests_list, list) {
4273*4882a593Smuzhiyun list_del(®_request->list);
4274*4882a593Smuzhiyun kfree(reg_request);
4275*4882a593Smuzhiyun }
4276*4882a593Smuzhiyun
4277*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(regdb))
4278*4882a593Smuzhiyun kfree(regdb);
4279*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(cfg80211_user_regdom))
4280*4882a593Smuzhiyun kfree(cfg80211_user_regdom);
4281*4882a593Smuzhiyun
4282*4882a593Smuzhiyun free_regdb_keyring();
4283*4882a593Smuzhiyun }
4284