• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2006 - 2007 Ivo van Doorn
3  * Copyright (C) 2007 Dmitry Torokhov
4  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/workqueue.h>
24 #include <linux/capability.h>
25 #include <linux/list.h>
26 #include <linux/mutex.h>
27 #include <linux/rfkill.h>
28 #include <linux/sched.h>
29 #include <linux/spinlock.h>
30 #include <linux/device.h>
31 #include <linux/miscdevice.h>
32 #include <linux/wait.h>
33 #include <linux/poll.h>
34 #include <linux/fs.h>
35 #include <linux/slab.h>
36 
37 #include "rfkill.h"
38 
39 #define POLL_INTERVAL		(5 * HZ)
40 
41 #define RFKILL_BLOCK_HW		BIT(0)
42 #define RFKILL_BLOCK_SW		BIT(1)
43 #define RFKILL_BLOCK_SW_PREV	BIT(2)
44 #define RFKILL_BLOCK_ANY	(RFKILL_BLOCK_HW |\
45 				 RFKILL_BLOCK_SW |\
46 				 RFKILL_BLOCK_SW_PREV)
47 #define RFKILL_BLOCK_SW_SETCALL	BIT(31)
48 
49 struct rfkill {
50 	spinlock_t		lock;
51 
52 	enum rfkill_type	type;
53 
54 	unsigned long		state;
55 
56 	u32			idx;
57 
58 	bool			registered;
59 	bool			persistent;
60 	bool			polling_paused;
61 	bool			suspended;
62 
63 	const struct rfkill_ops	*ops;
64 	void			*data;
65 
66 #ifdef CONFIG_RFKILL_LEDS
67 	struct led_trigger	led_trigger;
68 	const char		*ledtrigname;
69 #endif
70 
71 	struct device		dev;
72 	struct list_head	node;
73 
74 	struct delayed_work	poll_work;
75 	struct work_struct	uevent_work;
76 	struct work_struct	sync_work;
77 	char			name[];
78 };
79 #define to_rfkill(d)	container_of(d, struct rfkill, dev)
80 
81 struct rfkill_int_event {
82 	struct list_head	list;
83 	struct rfkill_event	ev;
84 };
85 
86 struct rfkill_data {
87 	struct list_head	list;
88 	struct list_head	events;
89 	struct mutex		mtx;
90 	wait_queue_head_t	read_wait;
91 	bool			input_handler;
92 };
93 
94 
95 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
96 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
97 MODULE_DESCRIPTION("RF switch support");
98 MODULE_LICENSE("GPL");
99 
100 
101 /*
102  * The locking here should be made much smarter, we currently have
103  * a bit of a stupid situation because drivers might want to register
104  * the rfkill struct under their own lock, and take this lock during
105  * rfkill method calls -- which will cause an AB-BA deadlock situation.
106  *
107  * To fix that, we need to rework this code here to be mostly lock-free
108  * and only use the mutex for list manipulations, not to protect the
109  * various other global variables. Then we can avoid holding the mutex
110  * around driver operations, and all is happy.
111  */
112 static LIST_HEAD(rfkill_list);	/* list of registered rf switches */
113 static DEFINE_MUTEX(rfkill_global_mutex);
114 static LIST_HEAD(rfkill_fds);	/* list of open fds of /dev/rfkill */
115 
116 static unsigned int rfkill_default_state = 1;
117 module_param_named(default_state, rfkill_default_state, uint, 0444);
118 MODULE_PARM_DESC(default_state,
119 		 "Default initial state for all radio types, 0 = radio off");
120 
121 static struct {
122 	bool cur, sav;
123 } rfkill_global_states[NUM_RFKILL_TYPES];
124 
125 static bool rfkill_epo_lock_active;
126 
127 
128 #ifdef CONFIG_RFKILL_LEDS
rfkill_led_trigger_event(struct rfkill * rfkill)129 static void rfkill_led_trigger_event(struct rfkill *rfkill)
130 {
131 	struct led_trigger *trigger;
132 
133 	if (!rfkill->registered)
134 		return;
135 
136 	trigger = &rfkill->led_trigger;
137 
138 	if (rfkill->state & RFKILL_BLOCK_ANY)
139 		led_trigger_event(trigger, LED_OFF);
140 	else
141 		led_trigger_event(trigger, LED_FULL);
142 }
143 
rfkill_led_trigger_activate(struct led_classdev * led)144 static void rfkill_led_trigger_activate(struct led_classdev *led)
145 {
146 	struct rfkill *rfkill;
147 
148 	rfkill = container_of(led->trigger, struct rfkill, led_trigger);
149 
150 	rfkill_led_trigger_event(rfkill);
151 }
152 
rfkill_get_led_trigger_name(struct rfkill * rfkill)153 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154 {
155 	return rfkill->led_trigger.name;
156 }
157 EXPORT_SYMBOL(rfkill_get_led_trigger_name);
158 
rfkill_set_led_trigger_name(struct rfkill * rfkill,const char * name)159 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160 {
161 	BUG_ON(!rfkill);
162 
163 	rfkill->ledtrigname = name;
164 }
165 EXPORT_SYMBOL(rfkill_set_led_trigger_name);
166 
rfkill_led_trigger_register(struct rfkill * rfkill)167 static int rfkill_led_trigger_register(struct rfkill *rfkill)
168 {
169 	rfkill->led_trigger.name = rfkill->ledtrigname
170 					? : dev_name(&rfkill->dev);
171 	rfkill->led_trigger.activate = rfkill_led_trigger_activate;
172 	return led_trigger_register(&rfkill->led_trigger);
173 }
174 
rfkill_led_trigger_unregister(struct rfkill * rfkill)175 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
176 {
177 	led_trigger_unregister(&rfkill->led_trigger);
178 }
179 
180 static struct led_trigger rfkill_any_led_trigger;
181 static struct work_struct rfkill_any_work;
182 
rfkill_any_led_trigger_worker(struct work_struct * work)183 static void rfkill_any_led_trigger_worker(struct work_struct *work)
184 {
185 	enum led_brightness brightness = LED_OFF;
186 	struct rfkill *rfkill;
187 
188 	mutex_lock(&rfkill_global_mutex);
189 	list_for_each_entry(rfkill, &rfkill_list, node) {
190 		if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
191 			brightness = LED_FULL;
192 			break;
193 		}
194 	}
195 	mutex_unlock(&rfkill_global_mutex);
196 
197 	led_trigger_event(&rfkill_any_led_trigger, brightness);
198 }
199 
rfkill_any_led_trigger_event(void)200 static void rfkill_any_led_trigger_event(void)
201 {
202 	schedule_work(&rfkill_any_work);
203 }
204 
rfkill_any_led_trigger_activate(struct led_classdev * led_cdev)205 static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev)
206 {
207 	rfkill_any_led_trigger_event();
208 }
209 
rfkill_any_led_trigger_register(void)210 static int rfkill_any_led_trigger_register(void)
211 {
212 	INIT_WORK(&rfkill_any_work, rfkill_any_led_trigger_worker);
213 	rfkill_any_led_trigger.name = "rfkill-any";
214 	rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate;
215 	return led_trigger_register(&rfkill_any_led_trigger);
216 }
217 
rfkill_any_led_trigger_unregister(void)218 static void rfkill_any_led_trigger_unregister(void)
219 {
220 	led_trigger_unregister(&rfkill_any_led_trigger);
221 	cancel_work_sync(&rfkill_any_work);
222 }
223 #else
rfkill_led_trigger_event(struct rfkill * rfkill)224 static void rfkill_led_trigger_event(struct rfkill *rfkill)
225 {
226 }
227 
rfkill_led_trigger_register(struct rfkill * rfkill)228 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
229 {
230 	return 0;
231 }
232 
rfkill_led_trigger_unregister(struct rfkill * rfkill)233 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
234 {
235 }
236 
rfkill_any_led_trigger_event(void)237 static void rfkill_any_led_trigger_event(void)
238 {
239 }
240 
rfkill_any_led_trigger_register(void)241 static int rfkill_any_led_trigger_register(void)
242 {
243 	return 0;
244 }
245 
rfkill_any_led_trigger_unregister(void)246 static void rfkill_any_led_trigger_unregister(void)
247 {
248 }
249 #endif /* CONFIG_RFKILL_LEDS */
250 
rfkill_fill_event(struct rfkill_event * ev,struct rfkill * rfkill,enum rfkill_operation op)251 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
252 			      enum rfkill_operation op)
253 {
254 	unsigned long flags;
255 
256 	ev->idx = rfkill->idx;
257 	ev->type = rfkill->type;
258 	ev->op = op;
259 
260 	spin_lock_irqsave(&rfkill->lock, flags);
261 	ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
262 	ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
263 					RFKILL_BLOCK_SW_PREV));
264 	spin_unlock_irqrestore(&rfkill->lock, flags);
265 }
266 
rfkill_send_events(struct rfkill * rfkill,enum rfkill_operation op)267 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
268 {
269 	struct rfkill_data *data;
270 	struct rfkill_int_event *ev;
271 
272 	list_for_each_entry(data, &rfkill_fds, list) {
273 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
274 		if (!ev)
275 			continue;
276 		rfkill_fill_event(&ev->ev, rfkill, op);
277 		mutex_lock(&data->mtx);
278 		list_add_tail(&ev->list, &data->events);
279 		mutex_unlock(&data->mtx);
280 		wake_up_interruptible(&data->read_wait);
281 	}
282 }
283 
rfkill_event(struct rfkill * rfkill)284 static void rfkill_event(struct rfkill *rfkill)
285 {
286 	if (!rfkill->registered)
287 		return;
288 
289 	kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
290 
291 	/* also send event to /dev/rfkill */
292 	rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
293 }
294 
295 /**
296  * rfkill_set_block - wrapper for set_block method
297  *
298  * @rfkill: the rfkill struct to use
299  * @blocked: the new software state
300  *
301  * Calls the set_block method (when applicable) and handles notifications
302  * etc. as well.
303  */
rfkill_set_block(struct rfkill * rfkill,bool blocked)304 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
305 {
306 	unsigned long flags;
307 	bool prev, curr;
308 	int err;
309 
310 	if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
311 		return;
312 
313 	/*
314 	 * Some platforms (...!) generate input events which affect the
315 	 * _hard_ kill state -- whenever something tries to change the
316 	 * current software state query the hardware state too.
317 	 */
318 	if (rfkill->ops->query)
319 		rfkill->ops->query(rfkill, rfkill->data);
320 
321 	spin_lock_irqsave(&rfkill->lock, flags);
322 	prev = rfkill->state & RFKILL_BLOCK_SW;
323 
324 	if (prev)
325 		rfkill->state |= RFKILL_BLOCK_SW_PREV;
326 	else
327 		rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
328 
329 	if (blocked)
330 		rfkill->state |= RFKILL_BLOCK_SW;
331 	else
332 		rfkill->state &= ~RFKILL_BLOCK_SW;
333 
334 	rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
335 	spin_unlock_irqrestore(&rfkill->lock, flags);
336 
337 	err = rfkill->ops->set_block(rfkill->data, blocked);
338 
339 	spin_lock_irqsave(&rfkill->lock, flags);
340 	if (err) {
341 		/*
342 		 * Failed -- reset status to _PREV, which may be different
343 		 * from what we have set _PREV to earlier in this function
344 		 * if rfkill_set_sw_state was invoked.
345 		 */
346 		if (rfkill->state & RFKILL_BLOCK_SW_PREV)
347 			rfkill->state |= RFKILL_BLOCK_SW;
348 		else
349 			rfkill->state &= ~RFKILL_BLOCK_SW;
350 	}
351 	rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
352 	rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
353 	curr = rfkill->state & RFKILL_BLOCK_SW;
354 	spin_unlock_irqrestore(&rfkill->lock, flags);
355 
356 	rfkill_led_trigger_event(rfkill);
357 	rfkill_any_led_trigger_event();
358 
359 	if (prev != curr)
360 		rfkill_event(rfkill);
361 }
362 
rfkill_update_global_state(enum rfkill_type type,bool blocked)363 static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
364 {
365 	int i;
366 
367 	if (type != RFKILL_TYPE_ALL) {
368 		rfkill_global_states[type].cur = blocked;
369 		return;
370 	}
371 
372 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
373 		rfkill_global_states[i].cur = blocked;
374 }
375 
376 #ifdef CONFIG_RFKILL_INPUT
377 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
378 
379 /**
380  * __rfkill_switch_all - Toggle state of all switches of given type
381  * @type: type of interfaces to be affected
382  * @blocked: the new state
383  *
384  * This function sets the state of all switches of given type,
385  * unless a specific switch is suspended.
386  *
387  * Caller must have acquired rfkill_global_mutex.
388  */
__rfkill_switch_all(const enum rfkill_type type,bool blocked)389 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
390 {
391 	struct rfkill *rfkill;
392 
393 	rfkill_update_global_state(type, blocked);
394 	list_for_each_entry(rfkill, &rfkill_list, node) {
395 		if (rfkill->type != type && type != RFKILL_TYPE_ALL)
396 			continue;
397 
398 		rfkill_set_block(rfkill, blocked);
399 	}
400 }
401 
402 /**
403  * rfkill_switch_all - Toggle state of all switches of given type
404  * @type: type of interfaces to be affected
405  * @blocked: the new state
406  *
407  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
408  * Please refer to __rfkill_switch_all() for details.
409  *
410  * Does nothing if the EPO lock is active.
411  */
rfkill_switch_all(enum rfkill_type type,bool blocked)412 void rfkill_switch_all(enum rfkill_type type, bool blocked)
413 {
414 	if (atomic_read(&rfkill_input_disabled))
415 		return;
416 
417 	mutex_lock(&rfkill_global_mutex);
418 
419 	if (!rfkill_epo_lock_active)
420 		__rfkill_switch_all(type, blocked);
421 
422 	mutex_unlock(&rfkill_global_mutex);
423 }
424 
425 /**
426  * rfkill_epo - emergency power off all transmitters
427  *
428  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
429  * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
430  *
431  * The global state before the EPO is saved and can be restored later
432  * using rfkill_restore_states().
433  */
rfkill_epo(void)434 void rfkill_epo(void)
435 {
436 	struct rfkill *rfkill;
437 	int i;
438 
439 	if (atomic_read(&rfkill_input_disabled))
440 		return;
441 
442 	mutex_lock(&rfkill_global_mutex);
443 
444 	rfkill_epo_lock_active = true;
445 	list_for_each_entry(rfkill, &rfkill_list, node)
446 		rfkill_set_block(rfkill, true);
447 
448 	for (i = 0; i < NUM_RFKILL_TYPES; i++) {
449 		rfkill_global_states[i].sav = rfkill_global_states[i].cur;
450 		rfkill_global_states[i].cur = true;
451 	}
452 
453 	mutex_unlock(&rfkill_global_mutex);
454 }
455 
456 /**
457  * rfkill_restore_states - restore global states
458  *
459  * Restore (and sync switches to) the global state from the
460  * states in rfkill_default_states.  This can undo the effects of
461  * a call to rfkill_epo().
462  */
rfkill_restore_states(void)463 void rfkill_restore_states(void)
464 {
465 	int i;
466 
467 	if (atomic_read(&rfkill_input_disabled))
468 		return;
469 
470 	mutex_lock(&rfkill_global_mutex);
471 
472 	rfkill_epo_lock_active = false;
473 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
474 		__rfkill_switch_all(i, rfkill_global_states[i].sav);
475 	mutex_unlock(&rfkill_global_mutex);
476 }
477 
478 /**
479  * rfkill_remove_epo_lock - unlock state changes
480  *
481  * Used by rfkill-input manually unlock state changes, when
482  * the EPO switch is deactivated.
483  */
rfkill_remove_epo_lock(void)484 void rfkill_remove_epo_lock(void)
485 {
486 	if (atomic_read(&rfkill_input_disabled))
487 		return;
488 
489 	mutex_lock(&rfkill_global_mutex);
490 	rfkill_epo_lock_active = false;
491 	mutex_unlock(&rfkill_global_mutex);
492 }
493 
494 /**
495  * rfkill_is_epo_lock_active - returns true EPO is active
496  *
497  * Returns 0 (false) if there is NOT an active EPO contidion,
498  * and 1 (true) if there is an active EPO contition, which
499  * locks all radios in one of the BLOCKED states.
500  *
501  * Can be called in atomic context.
502  */
rfkill_is_epo_lock_active(void)503 bool rfkill_is_epo_lock_active(void)
504 {
505 	return rfkill_epo_lock_active;
506 }
507 
508 /**
509  * rfkill_get_global_sw_state - returns global state for a type
510  * @type: the type to get the global state of
511  *
512  * Returns the current global state for a given wireless
513  * device type.
514  */
rfkill_get_global_sw_state(const enum rfkill_type type)515 bool rfkill_get_global_sw_state(const enum rfkill_type type)
516 {
517 	return rfkill_global_states[type].cur;
518 }
519 #endif
520 
rfkill_set_hw_state(struct rfkill * rfkill,bool blocked)521 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
522 {
523 	unsigned long flags;
524 	bool ret, prev;
525 
526 	BUG_ON(!rfkill);
527 
528 	spin_lock_irqsave(&rfkill->lock, flags);
529 	prev = !!(rfkill->state & RFKILL_BLOCK_HW);
530 	if (blocked)
531 		rfkill->state |= RFKILL_BLOCK_HW;
532 	else
533 		rfkill->state &= ~RFKILL_BLOCK_HW;
534 	ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
535 	spin_unlock_irqrestore(&rfkill->lock, flags);
536 
537 	rfkill_led_trigger_event(rfkill);
538 	rfkill_any_led_trigger_event();
539 
540 	if (rfkill->registered && prev != blocked)
541 		schedule_work(&rfkill->uevent_work);
542 
543 	return ret;
544 }
545 EXPORT_SYMBOL(rfkill_set_hw_state);
546 
__rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)547 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
548 {
549 	u32 bit = RFKILL_BLOCK_SW;
550 
551 	/* if in a ops->set_block right now, use other bit */
552 	if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
553 		bit = RFKILL_BLOCK_SW_PREV;
554 
555 	if (blocked)
556 		rfkill->state |= bit;
557 	else
558 		rfkill->state &= ~bit;
559 }
560 
rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)561 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
562 {
563 	unsigned long flags;
564 	bool prev, hwblock;
565 
566 	BUG_ON(!rfkill);
567 
568 	spin_lock_irqsave(&rfkill->lock, flags);
569 	prev = !!(rfkill->state & RFKILL_BLOCK_SW);
570 	__rfkill_set_sw_state(rfkill, blocked);
571 	hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
572 	blocked = blocked || hwblock;
573 	spin_unlock_irqrestore(&rfkill->lock, flags);
574 
575 	if (!rfkill->registered)
576 		return blocked;
577 
578 	if (prev != blocked && !hwblock)
579 		schedule_work(&rfkill->uevent_work);
580 
581 	rfkill_led_trigger_event(rfkill);
582 	rfkill_any_led_trigger_event();
583 
584 	return blocked;
585 }
586 EXPORT_SYMBOL(rfkill_set_sw_state);
587 
rfkill_init_sw_state(struct rfkill * rfkill,bool blocked)588 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
589 {
590 	unsigned long flags;
591 
592 	BUG_ON(!rfkill);
593 	BUG_ON(rfkill->registered);
594 
595 	spin_lock_irqsave(&rfkill->lock, flags);
596 	__rfkill_set_sw_state(rfkill, blocked);
597 	rfkill->persistent = true;
598 	spin_unlock_irqrestore(&rfkill->lock, flags);
599 }
600 EXPORT_SYMBOL(rfkill_init_sw_state);
601 
rfkill_set_states(struct rfkill * rfkill,bool sw,bool hw)602 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
603 {
604 	unsigned long flags;
605 	bool swprev, hwprev;
606 
607 	BUG_ON(!rfkill);
608 
609 	spin_lock_irqsave(&rfkill->lock, flags);
610 
611 	/*
612 	 * No need to care about prev/setblock ... this is for uevent only
613 	 * and that will get triggered by rfkill_set_block anyway.
614 	 */
615 	swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
616 	hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
617 	__rfkill_set_sw_state(rfkill, sw);
618 	if (hw)
619 		rfkill->state |= RFKILL_BLOCK_HW;
620 	else
621 		rfkill->state &= ~RFKILL_BLOCK_HW;
622 
623 	spin_unlock_irqrestore(&rfkill->lock, flags);
624 
625 	if (!rfkill->registered) {
626 		rfkill->persistent = true;
627 	} else {
628 		if (swprev != sw || hwprev != hw)
629 			schedule_work(&rfkill->uevent_work);
630 
631 		rfkill_led_trigger_event(rfkill);
632 		rfkill_any_led_trigger_event();
633 	}
634 }
635 EXPORT_SYMBOL(rfkill_set_states);
636 
637 static const char * const rfkill_types[] = {
638 	NULL, /* RFKILL_TYPE_ALL */
639 	"wlan",
640 	"bluetooth",
641 	"ultrawideband",
642 	"wimax",
643 	"wwan",
644 	"gps",
645 	"fm",
646 	"nfc",
647 };
648 
rfkill_find_type(const char * name)649 enum rfkill_type rfkill_find_type(const char *name)
650 {
651 	int i;
652 
653 	BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
654 
655 	if (!name)
656 		return RFKILL_TYPE_ALL;
657 
658 	for (i = 1; i < NUM_RFKILL_TYPES; i++)
659 		if (!strcmp(name, rfkill_types[i]))
660 			return i;
661 	return RFKILL_TYPE_ALL;
662 }
663 EXPORT_SYMBOL(rfkill_find_type);
664 
name_show(struct device * dev,struct device_attribute * attr,char * buf)665 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
666 			 char *buf)
667 {
668 	struct rfkill *rfkill = to_rfkill(dev);
669 
670 	return sprintf(buf, "%s\n", rfkill->name);
671 }
672 static DEVICE_ATTR_RO(name);
673 
type_show(struct device * dev,struct device_attribute * attr,char * buf)674 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
675 			 char *buf)
676 {
677 	struct rfkill *rfkill = to_rfkill(dev);
678 
679 	return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
680 }
681 static DEVICE_ATTR_RO(type);
682 
index_show(struct device * dev,struct device_attribute * attr,char * buf)683 static ssize_t index_show(struct device *dev, struct device_attribute *attr,
684 			  char *buf)
685 {
686 	struct rfkill *rfkill = to_rfkill(dev);
687 
688 	return sprintf(buf, "%d\n", rfkill->idx);
689 }
690 static DEVICE_ATTR_RO(index);
691 
persistent_show(struct device * dev,struct device_attribute * attr,char * buf)692 static ssize_t persistent_show(struct device *dev,
693 			       struct device_attribute *attr, char *buf)
694 {
695 	struct rfkill *rfkill = to_rfkill(dev);
696 
697 	return sprintf(buf, "%d\n", rfkill->persistent);
698 }
699 static DEVICE_ATTR_RO(persistent);
700 
hard_show(struct device * dev,struct device_attribute * attr,char * buf)701 static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
702 			 char *buf)
703 {
704 	struct rfkill *rfkill = to_rfkill(dev);
705 
706 	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
707 }
708 static DEVICE_ATTR_RO(hard);
709 
soft_show(struct device * dev,struct device_attribute * attr,char * buf)710 static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
711 			 char *buf)
712 {
713 	struct rfkill *rfkill = to_rfkill(dev);
714 
715 	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
716 }
717 
soft_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)718 static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
719 			  const char *buf, size_t count)
720 {
721 	struct rfkill *rfkill = to_rfkill(dev);
722 	unsigned long state;
723 	int err;
724 
725 	if (!capable(CAP_NET_ADMIN))
726 		return -EPERM;
727 
728 	err = kstrtoul(buf, 0, &state);
729 	if (err)
730 		return err;
731 
732 	if (state > 1 )
733 		return -EINVAL;
734 
735 	mutex_lock(&rfkill_global_mutex);
736 	rfkill_set_block(rfkill, state);
737 	mutex_unlock(&rfkill_global_mutex);
738 
739 	return count;
740 }
741 static DEVICE_ATTR_RW(soft);
742 
user_state_from_blocked(unsigned long state)743 static u8 user_state_from_blocked(unsigned long state)
744 {
745 	if (state & RFKILL_BLOCK_HW)
746 		return RFKILL_USER_STATE_HARD_BLOCKED;
747 	if (state & RFKILL_BLOCK_SW)
748 		return RFKILL_USER_STATE_SOFT_BLOCKED;
749 
750 	return RFKILL_USER_STATE_UNBLOCKED;
751 }
752 
state_show(struct device * dev,struct device_attribute * attr,char * buf)753 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
754 			  char *buf)
755 {
756 	struct rfkill *rfkill = to_rfkill(dev);
757 
758 	return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
759 }
760 
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)761 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
762 			   const char *buf, size_t count)
763 {
764 	struct rfkill *rfkill = to_rfkill(dev);
765 	unsigned long state;
766 	int err;
767 
768 	if (!capable(CAP_NET_ADMIN))
769 		return -EPERM;
770 
771 	err = kstrtoul(buf, 0, &state);
772 	if (err)
773 		return err;
774 
775 	if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
776 	    state != RFKILL_USER_STATE_UNBLOCKED)
777 		return -EINVAL;
778 
779 	mutex_lock(&rfkill_global_mutex);
780 	rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
781 	mutex_unlock(&rfkill_global_mutex);
782 
783 	return count;
784 }
785 static DEVICE_ATTR_RW(state);
786 
787 static struct attribute *rfkill_dev_attrs[] = {
788 	&dev_attr_name.attr,
789 	&dev_attr_type.attr,
790 	&dev_attr_index.attr,
791 	&dev_attr_persistent.attr,
792 	&dev_attr_state.attr,
793 	&dev_attr_soft.attr,
794 	&dev_attr_hard.attr,
795 	NULL,
796 };
797 ATTRIBUTE_GROUPS(rfkill_dev);
798 
rfkill_release(struct device * dev)799 static void rfkill_release(struct device *dev)
800 {
801 	struct rfkill *rfkill = to_rfkill(dev);
802 
803 	kfree(rfkill);
804 }
805 
rfkill_dev_uevent(struct device * dev,struct kobj_uevent_env * env)806 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
807 {
808 	struct rfkill *rfkill = to_rfkill(dev);
809 	unsigned long flags;
810 	u32 state;
811 	int error;
812 
813 	error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
814 	if (error)
815 		return error;
816 	error = add_uevent_var(env, "RFKILL_TYPE=%s",
817 			       rfkill_types[rfkill->type]);
818 	if (error)
819 		return error;
820 	spin_lock_irqsave(&rfkill->lock, flags);
821 	state = rfkill->state;
822 	spin_unlock_irqrestore(&rfkill->lock, flags);
823 	error = add_uevent_var(env, "RFKILL_STATE=%d",
824 			       user_state_from_blocked(state));
825 	return error;
826 }
827 
rfkill_pause_polling(struct rfkill * rfkill)828 void rfkill_pause_polling(struct rfkill *rfkill)
829 {
830 	BUG_ON(!rfkill);
831 
832 	if (!rfkill->ops->poll)
833 		return;
834 
835 	rfkill->polling_paused = true;
836 	cancel_delayed_work_sync(&rfkill->poll_work);
837 }
838 EXPORT_SYMBOL(rfkill_pause_polling);
839 
rfkill_resume_polling(struct rfkill * rfkill)840 void rfkill_resume_polling(struct rfkill *rfkill)
841 {
842 	BUG_ON(!rfkill);
843 
844 	if (!rfkill->ops->poll)
845 		return;
846 
847 	rfkill->polling_paused = false;
848 
849 	if (rfkill->suspended)
850 		return;
851 
852 	queue_delayed_work(system_power_efficient_wq,
853 			   &rfkill->poll_work, 0);
854 }
855 EXPORT_SYMBOL(rfkill_resume_polling);
856 
rfkill_suspend(struct device * dev)857 static __maybe_unused int rfkill_suspend(struct device *dev)
858 {
859 	struct rfkill *rfkill = to_rfkill(dev);
860 
861 	rfkill->suspended = true;
862 	cancel_delayed_work_sync(&rfkill->poll_work);
863 
864 	return 0;
865 }
866 
rfkill_resume(struct device * dev)867 static __maybe_unused int rfkill_resume(struct device *dev)
868 {
869 	struct rfkill *rfkill = to_rfkill(dev);
870 	bool cur;
871 
872 	rfkill->suspended = false;
873 
874 	if (!rfkill->persistent) {
875 		cur = !!(rfkill->state & RFKILL_BLOCK_SW);
876 		rfkill_set_block(rfkill, cur);
877 	}
878 
879 	if (rfkill->ops->poll && !rfkill->polling_paused)
880 		queue_delayed_work(system_power_efficient_wq,
881 				   &rfkill->poll_work, 0);
882 
883 	return 0;
884 }
885 
886 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
887 
888 static struct class rfkill_class = {
889 	.name		= "rfkill",
890 	.dev_release	= rfkill_release,
891 	.dev_groups	= rfkill_dev_groups,
892 	.dev_uevent	= rfkill_dev_uevent,
893 	.pm		= IS_ENABLED(CONFIG_RFKILL_PM) ? &rfkill_pm_ops : NULL,
894 };
895 
rfkill_blocked(struct rfkill * rfkill)896 bool rfkill_blocked(struct rfkill *rfkill)
897 {
898 	unsigned long flags;
899 	u32 state;
900 
901 	spin_lock_irqsave(&rfkill->lock, flags);
902 	state = rfkill->state;
903 	spin_unlock_irqrestore(&rfkill->lock, flags);
904 
905 	return !!(state & RFKILL_BLOCK_ANY);
906 }
907 EXPORT_SYMBOL(rfkill_blocked);
908 
909 
rfkill_alloc(const char * name,struct device * parent,const enum rfkill_type type,const struct rfkill_ops * ops,void * ops_data)910 struct rfkill * __must_check rfkill_alloc(const char *name,
911 					  struct device *parent,
912 					  const enum rfkill_type type,
913 					  const struct rfkill_ops *ops,
914 					  void *ops_data)
915 {
916 	struct rfkill *rfkill;
917 	struct device *dev;
918 
919 	if (WARN_ON(!ops))
920 		return NULL;
921 
922 	if (WARN_ON(!ops->set_block))
923 		return NULL;
924 
925 	if (WARN_ON(!name))
926 		return NULL;
927 
928 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
929 		return NULL;
930 
931 	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
932 	if (!rfkill)
933 		return NULL;
934 
935 	spin_lock_init(&rfkill->lock);
936 	INIT_LIST_HEAD(&rfkill->node);
937 	rfkill->type = type;
938 	strcpy(rfkill->name, name);
939 	rfkill->ops = ops;
940 	rfkill->data = ops_data;
941 
942 	dev = &rfkill->dev;
943 	dev->class = &rfkill_class;
944 	dev->parent = parent;
945 	device_initialize(dev);
946 
947 	return rfkill;
948 }
949 EXPORT_SYMBOL(rfkill_alloc);
950 
rfkill_poll(struct work_struct * work)951 static void rfkill_poll(struct work_struct *work)
952 {
953 	struct rfkill *rfkill;
954 
955 	rfkill = container_of(work, struct rfkill, poll_work.work);
956 
957 	/*
958 	 * Poll hardware state -- driver will use one of the
959 	 * rfkill_set{,_hw,_sw}_state functions and use its
960 	 * return value to update the current status.
961 	 */
962 	rfkill->ops->poll(rfkill, rfkill->data);
963 
964 	queue_delayed_work(system_power_efficient_wq,
965 		&rfkill->poll_work,
966 		round_jiffies_relative(POLL_INTERVAL));
967 }
968 
rfkill_uevent_work(struct work_struct * work)969 static void rfkill_uevent_work(struct work_struct *work)
970 {
971 	struct rfkill *rfkill;
972 
973 	rfkill = container_of(work, struct rfkill, uevent_work);
974 
975 	mutex_lock(&rfkill_global_mutex);
976 	rfkill_event(rfkill);
977 	mutex_unlock(&rfkill_global_mutex);
978 }
979 
rfkill_sync_work(struct work_struct * work)980 static void rfkill_sync_work(struct work_struct *work)
981 {
982 	struct rfkill *rfkill;
983 	bool cur;
984 
985 	rfkill = container_of(work, struct rfkill, sync_work);
986 
987 	mutex_lock(&rfkill_global_mutex);
988 	cur = rfkill_global_states[rfkill->type].cur;
989 	rfkill_set_block(rfkill, cur);
990 	mutex_unlock(&rfkill_global_mutex);
991 }
992 
rfkill_register(struct rfkill * rfkill)993 int __must_check rfkill_register(struct rfkill *rfkill)
994 {
995 	static unsigned long rfkill_no;
996 	struct device *dev;
997 	int error;
998 
999 	if (!rfkill)
1000 		return -EINVAL;
1001 
1002 	dev = &rfkill->dev;
1003 
1004 	mutex_lock(&rfkill_global_mutex);
1005 
1006 	if (rfkill->registered) {
1007 		error = -EALREADY;
1008 		goto unlock;
1009 	}
1010 
1011 	rfkill->idx = rfkill_no;
1012 	dev_set_name(dev, "rfkill%lu", rfkill_no);
1013 	rfkill_no++;
1014 
1015 	list_add_tail(&rfkill->node, &rfkill_list);
1016 
1017 	error = device_add(dev);
1018 	if (error)
1019 		goto remove;
1020 
1021 	error = rfkill_led_trigger_register(rfkill);
1022 	if (error)
1023 		goto devdel;
1024 
1025 	rfkill->registered = true;
1026 
1027 	INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1028 	INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1029 	INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1030 
1031 	if (rfkill->ops->poll)
1032 		queue_delayed_work(system_power_efficient_wq,
1033 			&rfkill->poll_work,
1034 			round_jiffies_relative(POLL_INTERVAL));
1035 
1036 	if (!rfkill->persistent || rfkill_epo_lock_active) {
1037 		schedule_work(&rfkill->sync_work);
1038 	} else {
1039 #ifdef CONFIG_RFKILL_INPUT
1040 		bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1041 
1042 		if (!atomic_read(&rfkill_input_disabled))
1043 			__rfkill_switch_all(rfkill->type, soft_blocked);
1044 #endif
1045 	}
1046 
1047 	rfkill_any_led_trigger_event();
1048 	rfkill_send_events(rfkill, RFKILL_OP_ADD);
1049 
1050 	mutex_unlock(&rfkill_global_mutex);
1051 	return 0;
1052 
1053  devdel:
1054 	device_del(&rfkill->dev);
1055  remove:
1056 	list_del_init(&rfkill->node);
1057  unlock:
1058 	mutex_unlock(&rfkill_global_mutex);
1059 	return error;
1060 }
1061 EXPORT_SYMBOL(rfkill_register);
1062 
rfkill_unregister(struct rfkill * rfkill)1063 void rfkill_unregister(struct rfkill *rfkill)
1064 {
1065 	BUG_ON(!rfkill);
1066 
1067 	if (rfkill->ops->poll)
1068 		cancel_delayed_work_sync(&rfkill->poll_work);
1069 
1070 	cancel_work_sync(&rfkill->uevent_work);
1071 	cancel_work_sync(&rfkill->sync_work);
1072 
1073 	rfkill->registered = false;
1074 
1075 	device_del(&rfkill->dev);
1076 
1077 	mutex_lock(&rfkill_global_mutex);
1078 	rfkill_send_events(rfkill, RFKILL_OP_DEL);
1079 	list_del_init(&rfkill->node);
1080 	rfkill_any_led_trigger_event();
1081 	mutex_unlock(&rfkill_global_mutex);
1082 
1083 	rfkill_led_trigger_unregister(rfkill);
1084 }
1085 EXPORT_SYMBOL(rfkill_unregister);
1086 
rfkill_destroy(struct rfkill * rfkill)1087 void rfkill_destroy(struct rfkill *rfkill)
1088 {
1089 	if (rfkill)
1090 		put_device(&rfkill->dev);
1091 }
1092 EXPORT_SYMBOL(rfkill_destroy);
1093 
rfkill_fop_open(struct inode * inode,struct file * file)1094 static int rfkill_fop_open(struct inode *inode, struct file *file)
1095 {
1096 	struct rfkill_data *data;
1097 	struct rfkill *rfkill;
1098 	struct rfkill_int_event *ev, *tmp;
1099 
1100 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1101 	if (!data)
1102 		return -ENOMEM;
1103 
1104 	INIT_LIST_HEAD(&data->events);
1105 	mutex_init(&data->mtx);
1106 	init_waitqueue_head(&data->read_wait);
1107 
1108 	mutex_lock(&rfkill_global_mutex);
1109 	mutex_lock(&data->mtx);
1110 	/*
1111 	 * start getting events from elsewhere but hold mtx to get
1112 	 * startup events added first
1113 	 */
1114 
1115 	list_for_each_entry(rfkill, &rfkill_list, node) {
1116 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1117 		if (!ev)
1118 			goto free;
1119 		rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1120 		list_add_tail(&ev->list, &data->events);
1121 	}
1122 	list_add(&data->list, &rfkill_fds);
1123 	mutex_unlock(&data->mtx);
1124 	mutex_unlock(&rfkill_global_mutex);
1125 
1126 	file->private_data = data;
1127 
1128 	return nonseekable_open(inode, file);
1129 
1130  free:
1131 	mutex_unlock(&data->mtx);
1132 	mutex_unlock(&rfkill_global_mutex);
1133 	mutex_destroy(&data->mtx);
1134 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1135 		kfree(ev);
1136 	kfree(data);
1137 	return -ENOMEM;
1138 }
1139 
rfkill_fop_poll(struct file * file,poll_table * wait)1140 static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1141 {
1142 	struct rfkill_data *data = file->private_data;
1143 	unsigned int res = POLLOUT | POLLWRNORM;
1144 
1145 	poll_wait(file, &data->read_wait, wait);
1146 
1147 	mutex_lock(&data->mtx);
1148 	if (!list_empty(&data->events))
1149 		res = POLLIN | POLLRDNORM;
1150 	mutex_unlock(&data->mtx);
1151 
1152 	return res;
1153 }
1154 
rfkill_fop_read(struct file * file,char __user * buf,size_t count,loff_t * pos)1155 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1156 			       size_t count, loff_t *pos)
1157 {
1158 	struct rfkill_data *data = file->private_data;
1159 	struct rfkill_int_event *ev;
1160 	unsigned long sz;
1161 	int ret;
1162 
1163 	mutex_lock(&data->mtx);
1164 
1165 	while (list_empty(&data->events)) {
1166 		if (file->f_flags & O_NONBLOCK) {
1167 			ret = -EAGAIN;
1168 			goto out;
1169 		}
1170 		mutex_unlock(&data->mtx);
1171 		/* since we re-check and it just compares pointers,
1172 		 * using !list_empty() without locking isn't a problem
1173 		 */
1174 		ret = wait_event_interruptible(data->read_wait,
1175 					       !list_empty(&data->events));
1176 		mutex_lock(&data->mtx);
1177 
1178 		if (ret)
1179 			goto out;
1180 	}
1181 
1182 	ev = list_first_entry(&data->events, struct rfkill_int_event,
1183 				list);
1184 
1185 	sz = min_t(unsigned long, sizeof(ev->ev), count);
1186 	ret = sz;
1187 	if (copy_to_user(buf, &ev->ev, sz))
1188 		ret = -EFAULT;
1189 
1190 	list_del(&ev->list);
1191 	kfree(ev);
1192  out:
1193 	mutex_unlock(&data->mtx);
1194 	return ret;
1195 }
1196 
rfkill_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)1197 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1198 				size_t count, loff_t *pos)
1199 {
1200 	struct rfkill *rfkill;
1201 	struct rfkill_event ev;
1202 	int ret;
1203 
1204 	/* we don't need the 'hard' variable but accept it */
1205 	if (count < RFKILL_EVENT_SIZE_V1 - 1)
1206 		return -EINVAL;
1207 
1208 	/*
1209 	 * Copy as much data as we can accept into our 'ev' buffer,
1210 	 * but tell userspace how much we've copied so it can determine
1211 	 * our API version even in a write() call, if it cares.
1212 	 */
1213 	count = min(count, sizeof(ev));
1214 	if (copy_from_user(&ev, buf, count))
1215 		return -EFAULT;
1216 
1217 	if (ev.type >= NUM_RFKILL_TYPES)
1218 		return -EINVAL;
1219 
1220 	mutex_lock(&rfkill_global_mutex);
1221 
1222 	switch (ev.op) {
1223 	case RFKILL_OP_CHANGE_ALL:
1224 		rfkill_update_global_state(ev.type, ev.soft);
1225 		list_for_each_entry(rfkill, &rfkill_list, node)
1226 			if (rfkill->type == ev.type ||
1227 			    ev.type == RFKILL_TYPE_ALL)
1228 				rfkill_set_block(rfkill, ev.soft);
1229 		ret = 0;
1230 		break;
1231 	case RFKILL_OP_CHANGE:
1232 		list_for_each_entry(rfkill, &rfkill_list, node)
1233 			if (rfkill->idx == ev.idx &&
1234 			    (rfkill->type == ev.type ||
1235 			     ev.type == RFKILL_TYPE_ALL))
1236 				rfkill_set_block(rfkill, ev.soft);
1237 		ret = 0;
1238 		break;
1239 	default:
1240 		ret = -EINVAL;
1241 		break;
1242 	}
1243 
1244 	mutex_unlock(&rfkill_global_mutex);
1245 
1246 	return ret ?: count;
1247 }
1248 
rfkill_fop_release(struct inode * inode,struct file * file)1249 static int rfkill_fop_release(struct inode *inode, struct file *file)
1250 {
1251 	struct rfkill_data *data = file->private_data;
1252 	struct rfkill_int_event *ev, *tmp;
1253 
1254 	mutex_lock(&rfkill_global_mutex);
1255 	list_del(&data->list);
1256 	mutex_unlock(&rfkill_global_mutex);
1257 
1258 	mutex_destroy(&data->mtx);
1259 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1260 		kfree(ev);
1261 
1262 #ifdef CONFIG_RFKILL_INPUT
1263 	if (data->input_handler)
1264 		if (atomic_dec_return(&rfkill_input_disabled) == 0)
1265 			printk(KERN_DEBUG "rfkill: input handler enabled\n");
1266 #endif
1267 
1268 	kfree(data);
1269 
1270 	return 0;
1271 }
1272 
1273 #ifdef CONFIG_RFKILL_INPUT
rfkill_fop_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1274 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1275 			     unsigned long arg)
1276 {
1277 	struct rfkill_data *data = file->private_data;
1278 
1279 	if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1280 		return -ENOSYS;
1281 
1282 	if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1283 		return -ENOSYS;
1284 
1285 	mutex_lock(&data->mtx);
1286 
1287 	if (!data->input_handler) {
1288 		if (atomic_inc_return(&rfkill_input_disabled) == 1)
1289 			printk(KERN_DEBUG "rfkill: input handler disabled\n");
1290 		data->input_handler = true;
1291 	}
1292 
1293 	mutex_unlock(&data->mtx);
1294 
1295 	return 0;
1296 }
1297 #endif
1298 
1299 static const struct file_operations rfkill_fops = {
1300 	.owner		= THIS_MODULE,
1301 	.open		= rfkill_fop_open,
1302 	.read		= rfkill_fop_read,
1303 	.write		= rfkill_fop_write,
1304 	.poll		= rfkill_fop_poll,
1305 	.release	= rfkill_fop_release,
1306 #ifdef CONFIG_RFKILL_INPUT
1307 	.unlocked_ioctl	= rfkill_fop_ioctl,
1308 	.compat_ioctl	= rfkill_fop_ioctl,
1309 #endif
1310 	.llseek		= no_llseek,
1311 };
1312 
1313 #define RFKILL_NAME "rfkill"
1314 
1315 static struct miscdevice rfkill_miscdev = {
1316 	.fops	= &rfkill_fops,
1317 	.name	= RFKILL_NAME,
1318 	.minor	= RFKILL_MINOR,
1319 };
1320 
rfkill_init(void)1321 static int __init rfkill_init(void)
1322 {
1323 	int error;
1324 
1325 	rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1326 
1327 	error = class_register(&rfkill_class);
1328 	if (error)
1329 		goto error_class;
1330 
1331 	error = misc_register(&rfkill_miscdev);
1332 	if (error)
1333 		goto error_misc;
1334 
1335 	error = rfkill_any_led_trigger_register();
1336 	if (error)
1337 		goto error_led_trigger;
1338 
1339 #ifdef CONFIG_RFKILL_INPUT
1340 	error = rfkill_handler_init();
1341 	if (error)
1342 		goto error_input;
1343 #endif
1344 
1345 	return 0;
1346 
1347 #ifdef CONFIG_RFKILL_INPUT
1348 error_input:
1349 	rfkill_any_led_trigger_unregister();
1350 #endif
1351 error_led_trigger:
1352 	misc_deregister(&rfkill_miscdev);
1353 error_misc:
1354 	class_unregister(&rfkill_class);
1355 error_class:
1356 	return error;
1357 }
1358 subsys_initcall(rfkill_init);
1359 
rfkill_exit(void)1360 static void __exit rfkill_exit(void)
1361 {
1362 #ifdef CONFIG_RFKILL_INPUT
1363 	rfkill_handler_exit();
1364 #endif
1365 	rfkill_any_led_trigger_unregister();
1366 	misc_deregister(&rfkill_miscdev);
1367 	class_unregister(&rfkill_class);
1368 }
1369 module_exit(rfkill_exit);
1370 
1371 MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
1372 MODULE_ALIAS("devname:" RFKILL_NAME);
1373