1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2006 - 2007 Ivo van Doorn
4*4882a593Smuzhiyun * Copyright (C) 2007 Dmitry Torokhov
5*4882a593Smuzhiyun * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <linux/capability.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/rfkill.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun #include <linux/miscdevice.h>
20*4882a593Smuzhiyun #include <linux/wait.h>
21*4882a593Smuzhiyun #include <linux/poll.h>
22*4882a593Smuzhiyun #include <linux/fs.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "rfkill.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define POLL_INTERVAL (5 * HZ)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define RFKILL_BLOCK_HW BIT(0)
30*4882a593Smuzhiyun #define RFKILL_BLOCK_SW BIT(1)
31*4882a593Smuzhiyun #define RFKILL_BLOCK_SW_PREV BIT(2)
32*4882a593Smuzhiyun #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
33*4882a593Smuzhiyun RFKILL_BLOCK_SW |\
34*4882a593Smuzhiyun RFKILL_BLOCK_SW_PREV)
35*4882a593Smuzhiyun #define RFKILL_BLOCK_SW_SETCALL BIT(31)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct rfkill {
38*4882a593Smuzhiyun spinlock_t lock;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun enum rfkill_type type;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun unsigned long state;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun u32 idx;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun bool registered;
47*4882a593Smuzhiyun bool persistent;
48*4882a593Smuzhiyun bool polling_paused;
49*4882a593Smuzhiyun bool suspended;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun const struct rfkill_ops *ops;
52*4882a593Smuzhiyun void *data;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_LEDS
55*4882a593Smuzhiyun struct led_trigger led_trigger;
56*4882a593Smuzhiyun const char *ledtrigname;
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct device dev;
60*4882a593Smuzhiyun struct list_head node;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct delayed_work poll_work;
63*4882a593Smuzhiyun struct work_struct uevent_work;
64*4882a593Smuzhiyun struct work_struct sync_work;
65*4882a593Smuzhiyun char name[];
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun #define to_rfkill(d) container_of(d, struct rfkill, dev)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct rfkill_int_event {
70*4882a593Smuzhiyun struct list_head list;
71*4882a593Smuzhiyun struct rfkill_event ev;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun struct rfkill_data {
75*4882a593Smuzhiyun struct list_head list;
76*4882a593Smuzhiyun struct list_head events;
77*4882a593Smuzhiyun struct mutex mtx;
78*4882a593Smuzhiyun wait_queue_head_t read_wait;
79*4882a593Smuzhiyun bool input_handler;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
84*4882a593Smuzhiyun MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
85*4882a593Smuzhiyun MODULE_DESCRIPTION("RF switch support");
86*4882a593Smuzhiyun MODULE_LICENSE("GPL");
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * The locking here should be made much smarter, we currently have
91*4882a593Smuzhiyun * a bit of a stupid situation because drivers might want to register
92*4882a593Smuzhiyun * the rfkill struct under their own lock, and take this lock during
93*4882a593Smuzhiyun * rfkill method calls -- which will cause an AB-BA deadlock situation.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * To fix that, we need to rework this code here to be mostly lock-free
96*4882a593Smuzhiyun * and only use the mutex for list manipulations, not to protect the
97*4882a593Smuzhiyun * various other global variables. Then we can avoid holding the mutex
98*4882a593Smuzhiyun * around driver operations, and all is happy.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun static LIST_HEAD(rfkill_list); /* list of registered rf switches */
101*4882a593Smuzhiyun static DEFINE_MUTEX(rfkill_global_mutex);
102*4882a593Smuzhiyun static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun static unsigned int rfkill_default_state = 1;
105*4882a593Smuzhiyun module_param_named(default_state, rfkill_default_state, uint, 0444);
106*4882a593Smuzhiyun MODULE_PARM_DESC(default_state,
107*4882a593Smuzhiyun "Default initial state for all radio types, 0 = radio off");
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun static struct {
110*4882a593Smuzhiyun bool cur, sav;
111*4882a593Smuzhiyun } rfkill_global_states[NUM_RFKILL_TYPES];
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun static bool rfkill_epo_lock_active;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_LEDS
rfkill_led_trigger_event(struct rfkill * rfkill)117*4882a593Smuzhiyun static void rfkill_led_trigger_event(struct rfkill *rfkill)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct led_trigger *trigger;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!rfkill->registered)
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun trigger = &rfkill->led_trigger;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (rfkill->state & RFKILL_BLOCK_ANY)
127*4882a593Smuzhiyun led_trigger_event(trigger, LED_OFF);
128*4882a593Smuzhiyun else
129*4882a593Smuzhiyun led_trigger_event(trigger, LED_FULL);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
rfkill_led_trigger_activate(struct led_classdev * led)132*4882a593Smuzhiyun static int rfkill_led_trigger_activate(struct led_classdev *led)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct rfkill *rfkill;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun rfkill = container_of(led->trigger, struct rfkill, led_trigger);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun rfkill_led_trigger_event(rfkill);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
rfkill_get_led_trigger_name(struct rfkill * rfkill)143*4882a593Smuzhiyun const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return rfkill->led_trigger.name;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_get_led_trigger_name);
148*4882a593Smuzhiyun
rfkill_set_led_trigger_name(struct rfkill * rfkill,const char * name)149*4882a593Smuzhiyun void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun BUG_ON(!rfkill);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun rfkill->ledtrigname = name;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_set_led_trigger_name);
156*4882a593Smuzhiyun
rfkill_led_trigger_register(struct rfkill * rfkill)157*4882a593Smuzhiyun static int rfkill_led_trigger_register(struct rfkill *rfkill)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun rfkill->led_trigger.name = rfkill->ledtrigname
160*4882a593Smuzhiyun ? : dev_name(&rfkill->dev);
161*4882a593Smuzhiyun rfkill->led_trigger.activate = rfkill_led_trigger_activate;
162*4882a593Smuzhiyun return led_trigger_register(&rfkill->led_trigger);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
rfkill_led_trigger_unregister(struct rfkill * rfkill)165*4882a593Smuzhiyun static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun led_trigger_unregister(&rfkill->led_trigger);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun static struct led_trigger rfkill_any_led_trigger;
171*4882a593Smuzhiyun static struct led_trigger rfkill_none_led_trigger;
172*4882a593Smuzhiyun static struct work_struct rfkill_global_led_trigger_work;
173*4882a593Smuzhiyun
rfkill_global_led_trigger_worker(struct work_struct * work)174*4882a593Smuzhiyun static void rfkill_global_led_trigger_worker(struct work_struct *work)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun enum led_brightness brightness = LED_OFF;
177*4882a593Smuzhiyun struct rfkill *rfkill;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
180*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node) {
181*4882a593Smuzhiyun if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
182*4882a593Smuzhiyun brightness = LED_FULL;
183*4882a593Smuzhiyun break;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun led_trigger_event(&rfkill_any_led_trigger, brightness);
189*4882a593Smuzhiyun led_trigger_event(&rfkill_none_led_trigger,
190*4882a593Smuzhiyun brightness == LED_OFF ? LED_FULL : LED_OFF);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
rfkill_global_led_trigger_event(void)193*4882a593Smuzhiyun static void rfkill_global_led_trigger_event(void)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun schedule_work(&rfkill_global_led_trigger_work);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
rfkill_global_led_trigger_register(void)198*4882a593Smuzhiyun static int rfkill_global_led_trigger_register(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun int ret;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun INIT_WORK(&rfkill_global_led_trigger_work,
203*4882a593Smuzhiyun rfkill_global_led_trigger_worker);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun rfkill_any_led_trigger.name = "rfkill-any";
206*4882a593Smuzhiyun ret = led_trigger_register(&rfkill_any_led_trigger);
207*4882a593Smuzhiyun if (ret)
208*4882a593Smuzhiyun return ret;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun rfkill_none_led_trigger.name = "rfkill-none";
211*4882a593Smuzhiyun ret = led_trigger_register(&rfkill_none_led_trigger);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun led_trigger_unregister(&rfkill_any_led_trigger);
214*4882a593Smuzhiyun else
215*4882a593Smuzhiyun /* Delay activation until all global triggers are registered */
216*4882a593Smuzhiyun rfkill_global_led_trigger_event();
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
rfkill_global_led_trigger_unregister(void)221*4882a593Smuzhiyun static void rfkill_global_led_trigger_unregister(void)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun led_trigger_unregister(&rfkill_none_led_trigger);
224*4882a593Smuzhiyun led_trigger_unregister(&rfkill_any_led_trigger);
225*4882a593Smuzhiyun cancel_work_sync(&rfkill_global_led_trigger_work);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun #else
rfkill_led_trigger_event(struct rfkill * rfkill)228*4882a593Smuzhiyun static void rfkill_led_trigger_event(struct rfkill *rfkill)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
rfkill_led_trigger_register(struct rfkill * rfkill)232*4882a593Smuzhiyun static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun return 0;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
rfkill_led_trigger_unregister(struct rfkill * rfkill)237*4882a593Smuzhiyun static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
rfkill_global_led_trigger_event(void)241*4882a593Smuzhiyun static void rfkill_global_led_trigger_event(void)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
rfkill_global_led_trigger_register(void)245*4882a593Smuzhiyun static int rfkill_global_led_trigger_register(void)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
rfkill_global_led_trigger_unregister(void)250*4882a593Smuzhiyun static void rfkill_global_led_trigger_unregister(void)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun #endif /* CONFIG_RFKILL_LEDS */
254*4882a593Smuzhiyun
rfkill_fill_event(struct rfkill_event * ev,struct rfkill * rfkill,enum rfkill_operation op)255*4882a593Smuzhiyun static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
256*4882a593Smuzhiyun enum rfkill_operation op)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun unsigned long flags;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ev->idx = rfkill->idx;
261*4882a593Smuzhiyun ev->type = rfkill->type;
262*4882a593Smuzhiyun ev->op = op;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
265*4882a593Smuzhiyun ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
266*4882a593Smuzhiyun ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
267*4882a593Smuzhiyun RFKILL_BLOCK_SW_PREV));
268*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
rfkill_send_events(struct rfkill * rfkill,enum rfkill_operation op)271*4882a593Smuzhiyun static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct rfkill_data *data;
274*4882a593Smuzhiyun struct rfkill_int_event *ev;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun list_for_each_entry(data, &rfkill_fds, list) {
277*4882a593Smuzhiyun ev = kzalloc(sizeof(*ev), GFP_KERNEL);
278*4882a593Smuzhiyun if (!ev)
279*4882a593Smuzhiyun continue;
280*4882a593Smuzhiyun rfkill_fill_event(&ev->ev, rfkill, op);
281*4882a593Smuzhiyun mutex_lock(&data->mtx);
282*4882a593Smuzhiyun list_add_tail(&ev->list, &data->events);
283*4882a593Smuzhiyun mutex_unlock(&data->mtx);
284*4882a593Smuzhiyun wake_up_interruptible(&data->read_wait);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
rfkill_event(struct rfkill * rfkill)288*4882a593Smuzhiyun static void rfkill_event(struct rfkill *rfkill)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun if (!rfkill->registered)
291*4882a593Smuzhiyun return;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* also send event to /dev/rfkill */
296*4882a593Smuzhiyun rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * rfkill_set_block - wrapper for set_block method
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * @rfkill: the rfkill struct to use
303*4882a593Smuzhiyun * @blocked: the new software state
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * Calls the set_block method (when applicable) and handles notifications
306*4882a593Smuzhiyun * etc. as well.
307*4882a593Smuzhiyun */
rfkill_set_block(struct rfkill * rfkill,bool blocked)308*4882a593Smuzhiyun static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun unsigned long flags;
311*4882a593Smuzhiyun bool prev, curr;
312*4882a593Smuzhiyun int err;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * Some platforms (...!) generate input events which affect the
319*4882a593Smuzhiyun * _hard_ kill state -- whenever something tries to change the
320*4882a593Smuzhiyun * current software state query the hardware state too.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun if (rfkill->ops->query)
323*4882a593Smuzhiyun rfkill->ops->query(rfkill, rfkill->data);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
326*4882a593Smuzhiyun prev = rfkill->state & RFKILL_BLOCK_SW;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (prev)
329*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_SW_PREV;
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (blocked)
334*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_SW;
335*4882a593Smuzhiyun else
336*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_SW;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
339*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun err = rfkill->ops->set_block(rfkill->data, blocked);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
344*4882a593Smuzhiyun if (err) {
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * Failed -- reset status to _PREV, which may be different
347*4882a593Smuzhiyun * from what we have set _PREV to earlier in this function
348*4882a593Smuzhiyun * if rfkill_set_sw_state was invoked.
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun if (rfkill->state & RFKILL_BLOCK_SW_PREV)
351*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_SW;
352*4882a593Smuzhiyun else
353*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_SW;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
356*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
357*4882a593Smuzhiyun curr = rfkill->state & RFKILL_BLOCK_SW;
358*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun rfkill_led_trigger_event(rfkill);
361*4882a593Smuzhiyun rfkill_global_led_trigger_event();
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (prev != curr)
364*4882a593Smuzhiyun rfkill_event(rfkill);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
rfkill_update_global_state(enum rfkill_type type,bool blocked)367*4882a593Smuzhiyun static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun int i;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (type != RFKILL_TYPE_ALL) {
372*4882a593Smuzhiyun rfkill_global_states[type].cur = blocked;
373*4882a593Smuzhiyun return;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun for (i = 0; i < NUM_RFKILL_TYPES; i++)
377*4882a593Smuzhiyun rfkill_global_states[i].cur = blocked;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
381*4882a593Smuzhiyun static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun * __rfkill_switch_all - Toggle state of all switches of given type
385*4882a593Smuzhiyun * @type: type of interfaces to be affected
386*4882a593Smuzhiyun * @blocked: the new state
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * This function sets the state of all switches of given type,
389*4882a593Smuzhiyun * unless a specific switch is suspended.
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * Caller must have acquired rfkill_global_mutex.
392*4882a593Smuzhiyun */
__rfkill_switch_all(const enum rfkill_type type,bool blocked)393*4882a593Smuzhiyun static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct rfkill *rfkill;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun rfkill_update_global_state(type, blocked);
398*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node) {
399*4882a593Smuzhiyun if (rfkill->type != type && type != RFKILL_TYPE_ALL)
400*4882a593Smuzhiyun continue;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun rfkill_set_block(rfkill, blocked);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /**
407*4882a593Smuzhiyun * rfkill_switch_all - Toggle state of all switches of given type
408*4882a593Smuzhiyun * @type: type of interfaces to be affected
409*4882a593Smuzhiyun * @blocked: the new state
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
412*4882a593Smuzhiyun * Please refer to __rfkill_switch_all() for details.
413*4882a593Smuzhiyun *
414*4882a593Smuzhiyun * Does nothing if the EPO lock is active.
415*4882a593Smuzhiyun */
rfkill_switch_all(enum rfkill_type type,bool blocked)416*4882a593Smuzhiyun void rfkill_switch_all(enum rfkill_type type, bool blocked)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun if (atomic_read(&rfkill_input_disabled))
419*4882a593Smuzhiyun return;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!rfkill_epo_lock_active)
424*4882a593Smuzhiyun __rfkill_switch_all(type, blocked);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun * rfkill_epo - emergency power off all transmitters
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
433*4882a593Smuzhiyun * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * The global state before the EPO is saved and can be restored later
436*4882a593Smuzhiyun * using rfkill_restore_states().
437*4882a593Smuzhiyun */
rfkill_epo(void)438*4882a593Smuzhiyun void rfkill_epo(void)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun struct rfkill *rfkill;
441*4882a593Smuzhiyun int i;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (atomic_read(&rfkill_input_disabled))
444*4882a593Smuzhiyun return;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun rfkill_epo_lock_active = true;
449*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node)
450*4882a593Smuzhiyun rfkill_set_block(rfkill, true);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun for (i = 0; i < NUM_RFKILL_TYPES; i++) {
453*4882a593Smuzhiyun rfkill_global_states[i].sav = rfkill_global_states[i].cur;
454*4882a593Smuzhiyun rfkill_global_states[i].cur = true;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * rfkill_restore_states - restore global states
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Restore (and sync switches to) the global state from the
464*4882a593Smuzhiyun * states in rfkill_default_states. This can undo the effects of
465*4882a593Smuzhiyun * a call to rfkill_epo().
466*4882a593Smuzhiyun */
rfkill_restore_states(void)467*4882a593Smuzhiyun void rfkill_restore_states(void)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun int i;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (atomic_read(&rfkill_input_disabled))
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun rfkill_epo_lock_active = false;
477*4882a593Smuzhiyun for (i = 0; i < NUM_RFKILL_TYPES; i++)
478*4882a593Smuzhiyun __rfkill_switch_all(i, rfkill_global_states[i].sav);
479*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /**
483*4882a593Smuzhiyun * rfkill_remove_epo_lock - unlock state changes
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * Used by rfkill-input manually unlock state changes, when
486*4882a593Smuzhiyun * the EPO switch is deactivated.
487*4882a593Smuzhiyun */
rfkill_remove_epo_lock(void)488*4882a593Smuzhiyun void rfkill_remove_epo_lock(void)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun if (atomic_read(&rfkill_input_disabled))
491*4882a593Smuzhiyun return;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
494*4882a593Smuzhiyun rfkill_epo_lock_active = false;
495*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun * rfkill_is_epo_lock_active - returns true EPO is active
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * Returns 0 (false) if there is NOT an active EPO condition,
502*4882a593Smuzhiyun * and 1 (true) if there is an active EPO condition, which
503*4882a593Smuzhiyun * locks all radios in one of the BLOCKED states.
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun * Can be called in atomic context.
506*4882a593Smuzhiyun */
rfkill_is_epo_lock_active(void)507*4882a593Smuzhiyun bool rfkill_is_epo_lock_active(void)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun return rfkill_epo_lock_active;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /**
513*4882a593Smuzhiyun * rfkill_get_global_sw_state - returns global state for a type
514*4882a593Smuzhiyun * @type: the type to get the global state of
515*4882a593Smuzhiyun *
516*4882a593Smuzhiyun * Returns the current global state for a given wireless
517*4882a593Smuzhiyun * device type.
518*4882a593Smuzhiyun */
rfkill_get_global_sw_state(const enum rfkill_type type)519*4882a593Smuzhiyun bool rfkill_get_global_sw_state(const enum rfkill_type type)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun return rfkill_global_states[type].cur;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun #endif
524*4882a593Smuzhiyun
rfkill_set_hw_state(struct rfkill * rfkill,bool blocked)525*4882a593Smuzhiyun bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun unsigned long flags;
528*4882a593Smuzhiyun bool ret, prev;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun BUG_ON(!rfkill);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
533*4882a593Smuzhiyun prev = !!(rfkill->state & RFKILL_BLOCK_HW);
534*4882a593Smuzhiyun if (blocked)
535*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_HW;
536*4882a593Smuzhiyun else
537*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_HW;
538*4882a593Smuzhiyun ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
539*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun rfkill_led_trigger_event(rfkill);
542*4882a593Smuzhiyun rfkill_global_led_trigger_event();
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (rfkill->registered && prev != blocked)
545*4882a593Smuzhiyun schedule_work(&rfkill->uevent_work);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return ret;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_set_hw_state);
550*4882a593Smuzhiyun
__rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)551*4882a593Smuzhiyun static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun u32 bit = RFKILL_BLOCK_SW;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* if in a ops->set_block right now, use other bit */
556*4882a593Smuzhiyun if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
557*4882a593Smuzhiyun bit = RFKILL_BLOCK_SW_PREV;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (blocked)
560*4882a593Smuzhiyun rfkill->state |= bit;
561*4882a593Smuzhiyun else
562*4882a593Smuzhiyun rfkill->state &= ~bit;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)565*4882a593Smuzhiyun bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun unsigned long flags;
568*4882a593Smuzhiyun bool prev, hwblock;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun BUG_ON(!rfkill);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
573*4882a593Smuzhiyun prev = !!(rfkill->state & RFKILL_BLOCK_SW);
574*4882a593Smuzhiyun __rfkill_set_sw_state(rfkill, blocked);
575*4882a593Smuzhiyun hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
576*4882a593Smuzhiyun blocked = blocked || hwblock;
577*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (!rfkill->registered)
580*4882a593Smuzhiyun return blocked;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (prev != blocked && !hwblock)
583*4882a593Smuzhiyun schedule_work(&rfkill->uevent_work);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun rfkill_led_trigger_event(rfkill);
586*4882a593Smuzhiyun rfkill_global_led_trigger_event();
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return blocked;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_set_sw_state);
591*4882a593Smuzhiyun
rfkill_init_sw_state(struct rfkill * rfkill,bool blocked)592*4882a593Smuzhiyun void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun unsigned long flags;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun BUG_ON(!rfkill);
597*4882a593Smuzhiyun BUG_ON(rfkill->registered);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
600*4882a593Smuzhiyun __rfkill_set_sw_state(rfkill, blocked);
601*4882a593Smuzhiyun rfkill->persistent = true;
602*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_init_sw_state);
605*4882a593Smuzhiyun
rfkill_set_states(struct rfkill * rfkill,bool sw,bool hw)606*4882a593Smuzhiyun void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun unsigned long flags;
609*4882a593Smuzhiyun bool swprev, hwprev;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun BUG_ON(!rfkill);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * No need to care about prev/setblock ... this is for uevent only
617*4882a593Smuzhiyun * and that will get triggered by rfkill_set_block anyway.
618*4882a593Smuzhiyun */
619*4882a593Smuzhiyun swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
620*4882a593Smuzhiyun hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
621*4882a593Smuzhiyun __rfkill_set_sw_state(rfkill, sw);
622*4882a593Smuzhiyun if (hw)
623*4882a593Smuzhiyun rfkill->state |= RFKILL_BLOCK_HW;
624*4882a593Smuzhiyun else
625*4882a593Smuzhiyun rfkill->state &= ~RFKILL_BLOCK_HW;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (!rfkill->registered) {
630*4882a593Smuzhiyun rfkill->persistent = true;
631*4882a593Smuzhiyun } else {
632*4882a593Smuzhiyun if (swprev != sw || hwprev != hw)
633*4882a593Smuzhiyun schedule_work(&rfkill->uevent_work);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun rfkill_led_trigger_event(rfkill);
636*4882a593Smuzhiyun rfkill_global_led_trigger_event();
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_set_states);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun static const char * const rfkill_types[] = {
642*4882a593Smuzhiyun NULL, /* RFKILL_TYPE_ALL */
643*4882a593Smuzhiyun "wlan",
644*4882a593Smuzhiyun "bluetooth",
645*4882a593Smuzhiyun "ultrawideband",
646*4882a593Smuzhiyun "wimax",
647*4882a593Smuzhiyun "wwan",
648*4882a593Smuzhiyun "gps",
649*4882a593Smuzhiyun "fm",
650*4882a593Smuzhiyun "nfc",
651*4882a593Smuzhiyun };
652*4882a593Smuzhiyun
rfkill_find_type(const char * name)653*4882a593Smuzhiyun enum rfkill_type rfkill_find_type(const char *name)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun int i;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (!name)
660*4882a593Smuzhiyun return RFKILL_TYPE_ALL;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (i = 1; i < NUM_RFKILL_TYPES; i++)
663*4882a593Smuzhiyun if (!strcmp(name, rfkill_types[i]))
664*4882a593Smuzhiyun return i;
665*4882a593Smuzhiyun return RFKILL_TYPE_ALL;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_find_type);
668*4882a593Smuzhiyun
name_show(struct device * dev,struct device_attribute * attr,char * buf)669*4882a593Smuzhiyun static ssize_t name_show(struct device *dev, struct device_attribute *attr,
670*4882a593Smuzhiyun char *buf)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return sprintf(buf, "%s\n", rfkill->name);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun static DEVICE_ATTR_RO(name);
677*4882a593Smuzhiyun
type_show(struct device * dev,struct device_attribute * attr,char * buf)678*4882a593Smuzhiyun static ssize_t type_show(struct device *dev, struct device_attribute *attr,
679*4882a593Smuzhiyun char *buf)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun static DEVICE_ATTR_RO(type);
686*4882a593Smuzhiyun
index_show(struct device * dev,struct device_attribute * attr,char * buf)687*4882a593Smuzhiyun static ssize_t index_show(struct device *dev, struct device_attribute *attr,
688*4882a593Smuzhiyun char *buf)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun return sprintf(buf, "%d\n", rfkill->idx);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun static DEVICE_ATTR_RO(index);
695*4882a593Smuzhiyun
persistent_show(struct device * dev,struct device_attribute * attr,char * buf)696*4882a593Smuzhiyun static ssize_t persistent_show(struct device *dev,
697*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun return sprintf(buf, "%d\n", rfkill->persistent);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun static DEVICE_ATTR_RO(persistent);
704*4882a593Smuzhiyun
hard_show(struct device * dev,struct device_attribute * attr,char * buf)705*4882a593Smuzhiyun static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
706*4882a593Smuzhiyun char *buf)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun static DEVICE_ATTR_RO(hard);
713*4882a593Smuzhiyun
soft_show(struct device * dev,struct device_attribute * attr,char * buf)714*4882a593Smuzhiyun static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
715*4882a593Smuzhiyun char *buf)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
soft_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)722*4882a593Smuzhiyun static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
723*4882a593Smuzhiyun const char *buf, size_t count)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
726*4882a593Smuzhiyun unsigned long state;
727*4882a593Smuzhiyun int err;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
730*4882a593Smuzhiyun return -EPERM;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun err = kstrtoul(buf, 0, &state);
733*4882a593Smuzhiyun if (err)
734*4882a593Smuzhiyun return err;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if (state > 1 )
737*4882a593Smuzhiyun return -EINVAL;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
740*4882a593Smuzhiyun rfkill_set_block(rfkill, state);
741*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun return count;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun static DEVICE_ATTR_RW(soft);
746*4882a593Smuzhiyun
user_state_from_blocked(unsigned long state)747*4882a593Smuzhiyun static u8 user_state_from_blocked(unsigned long state)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun if (state & RFKILL_BLOCK_HW)
750*4882a593Smuzhiyun return RFKILL_USER_STATE_HARD_BLOCKED;
751*4882a593Smuzhiyun if (state & RFKILL_BLOCK_SW)
752*4882a593Smuzhiyun return RFKILL_USER_STATE_SOFT_BLOCKED;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return RFKILL_USER_STATE_UNBLOCKED;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
state_show(struct device * dev,struct device_attribute * attr,char * buf)757*4882a593Smuzhiyun static ssize_t state_show(struct device *dev, struct device_attribute *attr,
758*4882a593Smuzhiyun char *buf)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)765*4882a593Smuzhiyun static ssize_t state_store(struct device *dev, struct device_attribute *attr,
766*4882a593Smuzhiyun const char *buf, size_t count)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
769*4882a593Smuzhiyun unsigned long state;
770*4882a593Smuzhiyun int err;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
773*4882a593Smuzhiyun return -EPERM;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun err = kstrtoul(buf, 0, &state);
776*4882a593Smuzhiyun if (err)
777*4882a593Smuzhiyun return err;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
780*4882a593Smuzhiyun state != RFKILL_USER_STATE_UNBLOCKED)
781*4882a593Smuzhiyun return -EINVAL;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
784*4882a593Smuzhiyun rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
785*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return count;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun static DEVICE_ATTR_RW(state);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun static struct attribute *rfkill_dev_attrs[] = {
792*4882a593Smuzhiyun &dev_attr_name.attr,
793*4882a593Smuzhiyun &dev_attr_type.attr,
794*4882a593Smuzhiyun &dev_attr_index.attr,
795*4882a593Smuzhiyun &dev_attr_persistent.attr,
796*4882a593Smuzhiyun &dev_attr_state.attr,
797*4882a593Smuzhiyun &dev_attr_soft.attr,
798*4882a593Smuzhiyun &dev_attr_hard.attr,
799*4882a593Smuzhiyun NULL,
800*4882a593Smuzhiyun };
801*4882a593Smuzhiyun ATTRIBUTE_GROUPS(rfkill_dev);
802*4882a593Smuzhiyun
rfkill_release(struct device * dev)803*4882a593Smuzhiyun static void rfkill_release(struct device *dev)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun kfree(rfkill);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
rfkill_dev_uevent(struct device * dev,struct kobj_uevent_env * env)810*4882a593Smuzhiyun static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
813*4882a593Smuzhiyun unsigned long flags;
814*4882a593Smuzhiyun u32 state;
815*4882a593Smuzhiyun int error;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
818*4882a593Smuzhiyun if (error)
819*4882a593Smuzhiyun return error;
820*4882a593Smuzhiyun error = add_uevent_var(env, "RFKILL_TYPE=%s",
821*4882a593Smuzhiyun rfkill_types[rfkill->type]);
822*4882a593Smuzhiyun if (error)
823*4882a593Smuzhiyun return error;
824*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
825*4882a593Smuzhiyun state = rfkill->state;
826*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
827*4882a593Smuzhiyun error = add_uevent_var(env, "RFKILL_STATE=%d",
828*4882a593Smuzhiyun user_state_from_blocked(state));
829*4882a593Smuzhiyun return error;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
rfkill_pause_polling(struct rfkill * rfkill)832*4882a593Smuzhiyun void rfkill_pause_polling(struct rfkill *rfkill)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun BUG_ON(!rfkill);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (!rfkill->ops->poll)
837*4882a593Smuzhiyun return;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun rfkill->polling_paused = true;
840*4882a593Smuzhiyun cancel_delayed_work_sync(&rfkill->poll_work);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_pause_polling);
843*4882a593Smuzhiyun
rfkill_resume_polling(struct rfkill * rfkill)844*4882a593Smuzhiyun void rfkill_resume_polling(struct rfkill *rfkill)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun BUG_ON(!rfkill);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (!rfkill->ops->poll)
849*4882a593Smuzhiyun return;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun rfkill->polling_paused = false;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (rfkill->suspended)
854*4882a593Smuzhiyun return;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun queue_delayed_work(system_power_efficient_wq,
857*4882a593Smuzhiyun &rfkill->poll_work, 0);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_resume_polling);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
rfkill_suspend(struct device * dev)862*4882a593Smuzhiyun static int rfkill_suspend(struct device *dev)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun rfkill->suspended = true;
867*4882a593Smuzhiyun cancel_delayed_work_sync(&rfkill->poll_work);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun return 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
rfkill_resume(struct device * dev)872*4882a593Smuzhiyun static int rfkill_resume(struct device *dev)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct rfkill *rfkill = to_rfkill(dev);
875*4882a593Smuzhiyun bool cur;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun rfkill->suspended = false;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (!rfkill->registered)
880*4882a593Smuzhiyun return 0;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (!rfkill->persistent) {
883*4882a593Smuzhiyun cur = !!(rfkill->state & RFKILL_BLOCK_SW);
884*4882a593Smuzhiyun rfkill_set_block(rfkill, cur);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (rfkill->ops->poll && !rfkill->polling_paused)
888*4882a593Smuzhiyun queue_delayed_work(system_power_efficient_wq,
889*4882a593Smuzhiyun &rfkill->poll_work, 0);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun return 0;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
895*4882a593Smuzhiyun #define RFKILL_PM_OPS (&rfkill_pm_ops)
896*4882a593Smuzhiyun #else
897*4882a593Smuzhiyun #define RFKILL_PM_OPS NULL
898*4882a593Smuzhiyun #endif
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun static struct class rfkill_class = {
901*4882a593Smuzhiyun .name = "rfkill",
902*4882a593Smuzhiyun .dev_release = rfkill_release,
903*4882a593Smuzhiyun .dev_groups = rfkill_dev_groups,
904*4882a593Smuzhiyun .dev_uevent = rfkill_dev_uevent,
905*4882a593Smuzhiyun .pm = RFKILL_PM_OPS,
906*4882a593Smuzhiyun };
907*4882a593Smuzhiyun
rfkill_blocked(struct rfkill * rfkill)908*4882a593Smuzhiyun bool rfkill_blocked(struct rfkill *rfkill)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun unsigned long flags;
911*4882a593Smuzhiyun u32 state;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun spin_lock_irqsave(&rfkill->lock, flags);
914*4882a593Smuzhiyun state = rfkill->state;
915*4882a593Smuzhiyun spin_unlock_irqrestore(&rfkill->lock, flags);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun return !!(state & RFKILL_BLOCK_ANY);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_blocked);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun
rfkill_alloc(const char * name,struct device * parent,const enum rfkill_type type,const struct rfkill_ops * ops,void * ops_data)922*4882a593Smuzhiyun struct rfkill * __must_check rfkill_alloc(const char *name,
923*4882a593Smuzhiyun struct device *parent,
924*4882a593Smuzhiyun const enum rfkill_type type,
925*4882a593Smuzhiyun const struct rfkill_ops *ops,
926*4882a593Smuzhiyun void *ops_data)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun struct rfkill *rfkill;
929*4882a593Smuzhiyun struct device *dev;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (WARN_ON(!ops))
932*4882a593Smuzhiyun return NULL;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun if (WARN_ON(!ops->set_block))
935*4882a593Smuzhiyun return NULL;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (WARN_ON(!name))
938*4882a593Smuzhiyun return NULL;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
941*4882a593Smuzhiyun return NULL;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
944*4882a593Smuzhiyun if (!rfkill)
945*4882a593Smuzhiyun return NULL;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun spin_lock_init(&rfkill->lock);
948*4882a593Smuzhiyun INIT_LIST_HEAD(&rfkill->node);
949*4882a593Smuzhiyun rfkill->type = type;
950*4882a593Smuzhiyun strcpy(rfkill->name, name);
951*4882a593Smuzhiyun rfkill->ops = ops;
952*4882a593Smuzhiyun rfkill->data = ops_data;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun dev = &rfkill->dev;
955*4882a593Smuzhiyun dev->class = &rfkill_class;
956*4882a593Smuzhiyun dev->parent = parent;
957*4882a593Smuzhiyun device_initialize(dev);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun return rfkill;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_alloc);
962*4882a593Smuzhiyun
rfkill_poll(struct work_struct * work)963*4882a593Smuzhiyun static void rfkill_poll(struct work_struct *work)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun struct rfkill *rfkill;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun rfkill = container_of(work, struct rfkill, poll_work.work);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * Poll hardware state -- driver will use one of the
971*4882a593Smuzhiyun * rfkill_set{,_hw,_sw}_state functions and use its
972*4882a593Smuzhiyun * return value to update the current status.
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun rfkill->ops->poll(rfkill, rfkill->data);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun queue_delayed_work(system_power_efficient_wq,
977*4882a593Smuzhiyun &rfkill->poll_work,
978*4882a593Smuzhiyun round_jiffies_relative(POLL_INTERVAL));
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
rfkill_uevent_work(struct work_struct * work)981*4882a593Smuzhiyun static void rfkill_uevent_work(struct work_struct *work)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct rfkill *rfkill;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun rfkill = container_of(work, struct rfkill, uevent_work);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
988*4882a593Smuzhiyun rfkill_event(rfkill);
989*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
rfkill_sync_work(struct work_struct * work)992*4882a593Smuzhiyun static void rfkill_sync_work(struct work_struct *work)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun struct rfkill *rfkill;
995*4882a593Smuzhiyun bool cur;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun rfkill = container_of(work, struct rfkill, sync_work);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1000*4882a593Smuzhiyun cur = rfkill_global_states[rfkill->type].cur;
1001*4882a593Smuzhiyun rfkill_set_block(rfkill, cur);
1002*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
rfkill_register(struct rfkill * rfkill)1005*4882a593Smuzhiyun int __must_check rfkill_register(struct rfkill *rfkill)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun static unsigned long rfkill_no;
1008*4882a593Smuzhiyun struct device *dev;
1009*4882a593Smuzhiyun int error;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (!rfkill)
1012*4882a593Smuzhiyun return -EINVAL;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun dev = &rfkill->dev;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (rfkill->registered) {
1019*4882a593Smuzhiyun error = -EALREADY;
1020*4882a593Smuzhiyun goto unlock;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun rfkill->idx = rfkill_no;
1024*4882a593Smuzhiyun dev_set_name(dev, "rfkill%lu", rfkill_no);
1025*4882a593Smuzhiyun rfkill_no++;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun list_add_tail(&rfkill->node, &rfkill_list);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun error = device_add(dev);
1030*4882a593Smuzhiyun if (error)
1031*4882a593Smuzhiyun goto remove;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun error = rfkill_led_trigger_register(rfkill);
1034*4882a593Smuzhiyun if (error)
1035*4882a593Smuzhiyun goto devdel;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun rfkill->registered = true;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1040*4882a593Smuzhiyun INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1041*4882a593Smuzhiyun INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (rfkill->ops->poll)
1044*4882a593Smuzhiyun queue_delayed_work(system_power_efficient_wq,
1045*4882a593Smuzhiyun &rfkill->poll_work,
1046*4882a593Smuzhiyun round_jiffies_relative(POLL_INTERVAL));
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (!rfkill->persistent || rfkill_epo_lock_active) {
1049*4882a593Smuzhiyun schedule_work(&rfkill->sync_work);
1050*4882a593Smuzhiyun } else {
1051*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1052*4882a593Smuzhiyun bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (!atomic_read(&rfkill_input_disabled))
1055*4882a593Smuzhiyun __rfkill_switch_all(rfkill->type, soft_blocked);
1056*4882a593Smuzhiyun #endif
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun rfkill_global_led_trigger_event();
1060*4882a593Smuzhiyun rfkill_send_events(rfkill, RFKILL_OP_ADD);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1063*4882a593Smuzhiyun return 0;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun devdel:
1066*4882a593Smuzhiyun device_del(&rfkill->dev);
1067*4882a593Smuzhiyun remove:
1068*4882a593Smuzhiyun list_del_init(&rfkill->node);
1069*4882a593Smuzhiyun unlock:
1070*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1071*4882a593Smuzhiyun return error;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_register);
1074*4882a593Smuzhiyun
rfkill_unregister(struct rfkill * rfkill)1075*4882a593Smuzhiyun void rfkill_unregister(struct rfkill *rfkill)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun BUG_ON(!rfkill);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (rfkill->ops->poll)
1080*4882a593Smuzhiyun cancel_delayed_work_sync(&rfkill->poll_work);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun cancel_work_sync(&rfkill->uevent_work);
1083*4882a593Smuzhiyun cancel_work_sync(&rfkill->sync_work);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun rfkill->registered = false;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun device_del(&rfkill->dev);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1090*4882a593Smuzhiyun rfkill_send_events(rfkill, RFKILL_OP_DEL);
1091*4882a593Smuzhiyun list_del_init(&rfkill->node);
1092*4882a593Smuzhiyun rfkill_global_led_trigger_event();
1093*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun rfkill_led_trigger_unregister(rfkill);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_unregister);
1098*4882a593Smuzhiyun
rfkill_destroy(struct rfkill * rfkill)1099*4882a593Smuzhiyun void rfkill_destroy(struct rfkill *rfkill)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun if (rfkill)
1102*4882a593Smuzhiyun put_device(&rfkill->dev);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun EXPORT_SYMBOL(rfkill_destroy);
1105*4882a593Smuzhiyun
rfkill_fop_open(struct inode * inode,struct file * file)1106*4882a593Smuzhiyun static int rfkill_fop_open(struct inode *inode, struct file *file)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun struct rfkill_data *data;
1109*4882a593Smuzhiyun struct rfkill *rfkill;
1110*4882a593Smuzhiyun struct rfkill_int_event *ev, *tmp;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
1113*4882a593Smuzhiyun if (!data)
1114*4882a593Smuzhiyun return -ENOMEM;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun INIT_LIST_HEAD(&data->events);
1117*4882a593Smuzhiyun mutex_init(&data->mtx);
1118*4882a593Smuzhiyun init_waitqueue_head(&data->read_wait);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1121*4882a593Smuzhiyun mutex_lock(&data->mtx);
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * start getting events from elsewhere but hold mtx to get
1124*4882a593Smuzhiyun * startup events added first
1125*4882a593Smuzhiyun */
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node) {
1128*4882a593Smuzhiyun ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1129*4882a593Smuzhiyun if (!ev)
1130*4882a593Smuzhiyun goto free;
1131*4882a593Smuzhiyun rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1132*4882a593Smuzhiyun list_add_tail(&ev->list, &data->events);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun list_add(&data->list, &rfkill_fds);
1135*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1136*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun file->private_data = data;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun return stream_open(inode, file);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun free:
1143*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1144*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1145*4882a593Smuzhiyun mutex_destroy(&data->mtx);
1146*4882a593Smuzhiyun list_for_each_entry_safe(ev, tmp, &data->events, list)
1147*4882a593Smuzhiyun kfree(ev);
1148*4882a593Smuzhiyun kfree(data);
1149*4882a593Smuzhiyun return -ENOMEM;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
rfkill_fop_poll(struct file * file,poll_table * wait)1152*4882a593Smuzhiyun static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun struct rfkill_data *data = file->private_data;
1155*4882a593Smuzhiyun __poll_t res = EPOLLOUT | EPOLLWRNORM;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun poll_wait(file, &data->read_wait, wait);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun mutex_lock(&data->mtx);
1160*4882a593Smuzhiyun if (!list_empty(&data->events))
1161*4882a593Smuzhiyun res = EPOLLIN | EPOLLRDNORM;
1162*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun return res;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
rfkill_fop_read(struct file * file,char __user * buf,size_t count,loff_t * pos)1167*4882a593Smuzhiyun static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1168*4882a593Smuzhiyun size_t count, loff_t *pos)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun struct rfkill_data *data = file->private_data;
1171*4882a593Smuzhiyun struct rfkill_int_event *ev;
1172*4882a593Smuzhiyun unsigned long sz;
1173*4882a593Smuzhiyun int ret;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun mutex_lock(&data->mtx);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun while (list_empty(&data->events)) {
1178*4882a593Smuzhiyun if (file->f_flags & O_NONBLOCK) {
1179*4882a593Smuzhiyun ret = -EAGAIN;
1180*4882a593Smuzhiyun goto out;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1183*4882a593Smuzhiyun /* since we re-check and it just compares pointers,
1184*4882a593Smuzhiyun * using !list_empty() without locking isn't a problem
1185*4882a593Smuzhiyun */
1186*4882a593Smuzhiyun ret = wait_event_interruptible(data->read_wait,
1187*4882a593Smuzhiyun !list_empty(&data->events));
1188*4882a593Smuzhiyun mutex_lock(&data->mtx);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (ret)
1191*4882a593Smuzhiyun goto out;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun ev = list_first_entry(&data->events, struct rfkill_int_event,
1195*4882a593Smuzhiyun list);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun sz = min_t(unsigned long, sizeof(ev->ev), count);
1198*4882a593Smuzhiyun ret = sz;
1199*4882a593Smuzhiyun if (copy_to_user(buf, &ev->ev, sz))
1200*4882a593Smuzhiyun ret = -EFAULT;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun list_del(&ev->list);
1203*4882a593Smuzhiyun kfree(ev);
1204*4882a593Smuzhiyun out:
1205*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1206*4882a593Smuzhiyun return ret;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
rfkill_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)1209*4882a593Smuzhiyun static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1210*4882a593Smuzhiyun size_t count, loff_t *pos)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun struct rfkill *rfkill;
1213*4882a593Smuzhiyun struct rfkill_event ev;
1214*4882a593Smuzhiyun int ret;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* we don't need the 'hard' variable but accept it */
1217*4882a593Smuzhiyun if (count < RFKILL_EVENT_SIZE_V1 - 1)
1218*4882a593Smuzhiyun return -EINVAL;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /*
1221*4882a593Smuzhiyun * Copy as much data as we can accept into our 'ev' buffer,
1222*4882a593Smuzhiyun * but tell userspace how much we've copied so it can determine
1223*4882a593Smuzhiyun * our API version even in a write() call, if it cares.
1224*4882a593Smuzhiyun */
1225*4882a593Smuzhiyun count = min(count, sizeof(ev));
1226*4882a593Smuzhiyun if (copy_from_user(&ev, buf, count))
1227*4882a593Smuzhiyun return -EFAULT;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (ev.type >= NUM_RFKILL_TYPES)
1230*4882a593Smuzhiyun return -EINVAL;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun switch (ev.op) {
1235*4882a593Smuzhiyun case RFKILL_OP_CHANGE_ALL:
1236*4882a593Smuzhiyun rfkill_update_global_state(ev.type, ev.soft);
1237*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node)
1238*4882a593Smuzhiyun if (rfkill->type == ev.type ||
1239*4882a593Smuzhiyun ev.type == RFKILL_TYPE_ALL)
1240*4882a593Smuzhiyun rfkill_set_block(rfkill, ev.soft);
1241*4882a593Smuzhiyun ret = 0;
1242*4882a593Smuzhiyun break;
1243*4882a593Smuzhiyun case RFKILL_OP_CHANGE:
1244*4882a593Smuzhiyun list_for_each_entry(rfkill, &rfkill_list, node)
1245*4882a593Smuzhiyun if (rfkill->idx == ev.idx &&
1246*4882a593Smuzhiyun (rfkill->type == ev.type ||
1247*4882a593Smuzhiyun ev.type == RFKILL_TYPE_ALL))
1248*4882a593Smuzhiyun rfkill_set_block(rfkill, ev.soft);
1249*4882a593Smuzhiyun ret = 0;
1250*4882a593Smuzhiyun break;
1251*4882a593Smuzhiyun default:
1252*4882a593Smuzhiyun ret = -EINVAL;
1253*4882a593Smuzhiyun break;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun return ret ?: count;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
rfkill_fop_release(struct inode * inode,struct file * file)1261*4882a593Smuzhiyun static int rfkill_fop_release(struct inode *inode, struct file *file)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct rfkill_data *data = file->private_data;
1264*4882a593Smuzhiyun struct rfkill_int_event *ev, *tmp;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun mutex_lock(&rfkill_global_mutex);
1267*4882a593Smuzhiyun list_del(&data->list);
1268*4882a593Smuzhiyun mutex_unlock(&rfkill_global_mutex);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun mutex_destroy(&data->mtx);
1271*4882a593Smuzhiyun list_for_each_entry_safe(ev, tmp, &data->events, list)
1272*4882a593Smuzhiyun kfree(ev);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1275*4882a593Smuzhiyun if (data->input_handler)
1276*4882a593Smuzhiyun if (atomic_dec_return(&rfkill_input_disabled) == 0)
1277*4882a593Smuzhiyun printk(KERN_DEBUG "rfkill: input handler enabled\n");
1278*4882a593Smuzhiyun #endif
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun kfree(data);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun return 0;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
rfkill_fop_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1286*4882a593Smuzhiyun static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1287*4882a593Smuzhiyun unsigned long arg)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct rfkill_data *data = file->private_data;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1292*4882a593Smuzhiyun return -ENOSYS;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1295*4882a593Smuzhiyun return -ENOSYS;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun mutex_lock(&data->mtx);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (!data->input_handler) {
1300*4882a593Smuzhiyun if (atomic_inc_return(&rfkill_input_disabled) == 1)
1301*4882a593Smuzhiyun printk(KERN_DEBUG "rfkill: input handler disabled\n");
1302*4882a593Smuzhiyun data->input_handler = true;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun mutex_unlock(&data->mtx);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return 0;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun #endif
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun static const struct file_operations rfkill_fops = {
1312*4882a593Smuzhiyun .owner = THIS_MODULE,
1313*4882a593Smuzhiyun .open = rfkill_fop_open,
1314*4882a593Smuzhiyun .read = rfkill_fop_read,
1315*4882a593Smuzhiyun .write = rfkill_fop_write,
1316*4882a593Smuzhiyun .poll = rfkill_fop_poll,
1317*4882a593Smuzhiyun .release = rfkill_fop_release,
1318*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1319*4882a593Smuzhiyun .unlocked_ioctl = rfkill_fop_ioctl,
1320*4882a593Smuzhiyun .compat_ioctl = compat_ptr_ioctl,
1321*4882a593Smuzhiyun #endif
1322*4882a593Smuzhiyun .llseek = no_llseek,
1323*4882a593Smuzhiyun };
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun #define RFKILL_NAME "rfkill"
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun static struct miscdevice rfkill_miscdev = {
1328*4882a593Smuzhiyun .fops = &rfkill_fops,
1329*4882a593Smuzhiyun .name = RFKILL_NAME,
1330*4882a593Smuzhiyun .minor = RFKILL_MINOR,
1331*4882a593Smuzhiyun };
1332*4882a593Smuzhiyun
rfkill_init(void)1333*4882a593Smuzhiyun static int __init rfkill_init(void)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun int error;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun error = class_register(&rfkill_class);
1340*4882a593Smuzhiyun if (error)
1341*4882a593Smuzhiyun goto error_class;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun error = misc_register(&rfkill_miscdev);
1344*4882a593Smuzhiyun if (error)
1345*4882a593Smuzhiyun goto error_misc;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun error = rfkill_global_led_trigger_register();
1348*4882a593Smuzhiyun if (error)
1349*4882a593Smuzhiyun goto error_led_trigger;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1352*4882a593Smuzhiyun error = rfkill_handler_init();
1353*4882a593Smuzhiyun if (error)
1354*4882a593Smuzhiyun goto error_input;
1355*4882a593Smuzhiyun #endif
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun return 0;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1360*4882a593Smuzhiyun error_input:
1361*4882a593Smuzhiyun rfkill_global_led_trigger_unregister();
1362*4882a593Smuzhiyun #endif
1363*4882a593Smuzhiyun error_led_trigger:
1364*4882a593Smuzhiyun misc_deregister(&rfkill_miscdev);
1365*4882a593Smuzhiyun error_misc:
1366*4882a593Smuzhiyun class_unregister(&rfkill_class);
1367*4882a593Smuzhiyun error_class:
1368*4882a593Smuzhiyun return error;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun subsys_initcall(rfkill_init);
1371*4882a593Smuzhiyun
rfkill_exit(void)1372*4882a593Smuzhiyun static void __exit rfkill_exit(void)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun #ifdef CONFIG_RFKILL_INPUT
1375*4882a593Smuzhiyun rfkill_handler_exit();
1376*4882a593Smuzhiyun #endif
1377*4882a593Smuzhiyun rfkill_global_led_trigger_unregister();
1378*4882a593Smuzhiyun misc_deregister(&rfkill_miscdev);
1379*4882a593Smuzhiyun class_unregister(&rfkill_class);
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun module_exit(rfkill_exit);
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
1384*4882a593Smuzhiyun MODULE_ALIAS("devname:" RFKILL_NAME);
1385