• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Input layer to RF Kill interface connector
4  *
5  * Copyright (c) 2007 Dmitry Torokhov
6  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
7  *
8  * If you ever run into a situation in which you have a SW_ type rfkill
9  * input device, then you can revive code that was removed in the patch
10  * "rfkill-input: remove unused code".
11  */
12 
13 #include <linux/input.h>
14 #include <linux/slab.h>
15 #include <linux/moduleparam.h>
16 #include <linux/workqueue.h>
17 #include <linux/init.h>
18 #include <linux/rfkill.h>
19 #include <linux/sched.h>
20 
21 #include "rfkill.h"
22 
23 enum rfkill_input_master_mode {
24 	RFKILL_INPUT_MASTER_UNLOCK = 0,
25 	RFKILL_INPUT_MASTER_RESTORE = 1,
26 	RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
27 	NUM_RFKILL_INPUT_MASTER_MODES
28 };
29 
30 /* Delay (in ms) between consecutive switch ops */
31 #define RFKILL_OPS_DELAY 200
32 
33 static enum rfkill_input_master_mode rfkill_master_switch_mode =
34 					RFKILL_INPUT_MASTER_UNBLOCKALL;
35 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
36 MODULE_PARM_DESC(master_switch_mode,
37 	"SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
38 
39 static spinlock_t rfkill_op_lock;
40 static bool rfkill_op_pending;
41 static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
42 static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
43 
44 enum rfkill_sched_op {
45 	RFKILL_GLOBAL_OP_EPO = 0,
46 	RFKILL_GLOBAL_OP_RESTORE,
47 	RFKILL_GLOBAL_OP_UNLOCK,
48 	RFKILL_GLOBAL_OP_UNBLOCK,
49 };
50 
51 static enum rfkill_sched_op rfkill_master_switch_op;
52 static enum rfkill_sched_op rfkill_op;
53 
__rfkill_handle_global_op(enum rfkill_sched_op op)54 static void __rfkill_handle_global_op(enum rfkill_sched_op op)
55 {
56 	unsigned int i;
57 
58 	switch (op) {
59 	case RFKILL_GLOBAL_OP_EPO:
60 		rfkill_epo();
61 		break;
62 	case RFKILL_GLOBAL_OP_RESTORE:
63 		rfkill_restore_states();
64 		break;
65 	case RFKILL_GLOBAL_OP_UNLOCK:
66 		rfkill_remove_epo_lock();
67 		break;
68 	case RFKILL_GLOBAL_OP_UNBLOCK:
69 		rfkill_remove_epo_lock();
70 		for (i = 0; i < NUM_RFKILL_TYPES; i++)
71 			rfkill_switch_all(i, false);
72 		break;
73 	default:
74 		/* memory corruption or bug, fail safely */
75 		rfkill_epo();
76 		WARN(1, "Unknown requested operation %d! "
77 			"rfkill Emergency Power Off activated\n",
78 			op);
79 	}
80 }
81 
__rfkill_handle_normal_op(const enum rfkill_type type,const bool complement)82 static void __rfkill_handle_normal_op(const enum rfkill_type type,
83 				      const bool complement)
84 {
85 	bool blocked;
86 
87 	blocked = rfkill_get_global_sw_state(type);
88 	if (complement)
89 		blocked = !blocked;
90 
91 	rfkill_switch_all(type, blocked);
92 }
93 
rfkill_op_handler(struct work_struct * work)94 static void rfkill_op_handler(struct work_struct *work)
95 {
96 	unsigned int i;
97 	bool c;
98 
99 	spin_lock_irq(&rfkill_op_lock);
100 	do {
101 		if (rfkill_op_pending) {
102 			enum rfkill_sched_op op = rfkill_op;
103 			rfkill_op_pending = false;
104 			memset(rfkill_sw_pending, 0,
105 				sizeof(rfkill_sw_pending));
106 			spin_unlock_irq(&rfkill_op_lock);
107 
108 			__rfkill_handle_global_op(op);
109 
110 			spin_lock_irq(&rfkill_op_lock);
111 
112 			/*
113 			 * handle global ops first -- during unlocked period
114 			 * we might have gotten a new global op.
115 			 */
116 			if (rfkill_op_pending)
117 				continue;
118 		}
119 
120 		if (rfkill_is_epo_lock_active())
121 			continue;
122 
123 		for (i = 0; i < NUM_RFKILL_TYPES; i++) {
124 			if (__test_and_clear_bit(i, rfkill_sw_pending)) {
125 				c = __test_and_clear_bit(i, rfkill_sw_state);
126 				spin_unlock_irq(&rfkill_op_lock);
127 
128 				__rfkill_handle_normal_op(i, c);
129 
130 				spin_lock_irq(&rfkill_op_lock);
131 			}
132 		}
133 	} while (rfkill_op_pending);
134 	spin_unlock_irq(&rfkill_op_lock);
135 }
136 
137 static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
138 static unsigned long rfkill_last_scheduled;
139 
rfkill_ratelimit(const unsigned long last)140 static unsigned long rfkill_ratelimit(const unsigned long last)
141 {
142 	const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
143 	return time_after(jiffies, last + delay) ? 0 : delay;
144 }
145 
rfkill_schedule_ratelimited(void)146 static void rfkill_schedule_ratelimited(void)
147 {
148 	if (schedule_delayed_work(&rfkill_op_work,
149 				  rfkill_ratelimit(rfkill_last_scheduled)))
150 		rfkill_last_scheduled = jiffies;
151 }
152 
rfkill_schedule_global_op(enum rfkill_sched_op op)153 static void rfkill_schedule_global_op(enum rfkill_sched_op op)
154 {
155 	unsigned long flags;
156 
157 	spin_lock_irqsave(&rfkill_op_lock, flags);
158 	rfkill_op = op;
159 	rfkill_op_pending = true;
160 	if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
161 		/* bypass the limiter for EPO */
162 		mod_delayed_work(system_wq, &rfkill_op_work, 0);
163 		rfkill_last_scheduled = jiffies;
164 	} else
165 		rfkill_schedule_ratelimited();
166 	spin_unlock_irqrestore(&rfkill_op_lock, flags);
167 }
168 
rfkill_schedule_toggle(enum rfkill_type type)169 static void rfkill_schedule_toggle(enum rfkill_type type)
170 {
171 	unsigned long flags;
172 
173 	if (rfkill_is_epo_lock_active())
174 		return;
175 
176 	spin_lock_irqsave(&rfkill_op_lock, flags);
177 	if (!rfkill_op_pending) {
178 		__set_bit(type, rfkill_sw_pending);
179 		__change_bit(type, rfkill_sw_state);
180 		rfkill_schedule_ratelimited();
181 	}
182 	spin_unlock_irqrestore(&rfkill_op_lock, flags);
183 }
184 
rfkill_schedule_evsw_rfkillall(int state)185 static void rfkill_schedule_evsw_rfkillall(int state)
186 {
187 	if (state)
188 		rfkill_schedule_global_op(rfkill_master_switch_op);
189 	else
190 		rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
191 }
192 
rfkill_event(struct input_handle * handle,unsigned int type,unsigned int code,int data)193 static void rfkill_event(struct input_handle *handle, unsigned int type,
194 			unsigned int code, int data)
195 {
196 	if (type == EV_KEY && data == 1) {
197 		switch (code) {
198 		case KEY_WLAN:
199 			rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
200 			break;
201 		case KEY_BLUETOOTH:
202 			rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
203 			break;
204 		case KEY_UWB:
205 			rfkill_schedule_toggle(RFKILL_TYPE_UWB);
206 			break;
207 		case KEY_WIMAX:
208 			rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
209 			break;
210 		case KEY_RFKILL:
211 			rfkill_schedule_toggle(RFKILL_TYPE_ALL);
212 			break;
213 		}
214 	} else if (type == EV_SW && code == SW_RFKILL_ALL)
215 		rfkill_schedule_evsw_rfkillall(data);
216 }
217 
rfkill_connect(struct input_handler * handler,struct input_dev * dev,const struct input_device_id * id)218 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
219 			  const struct input_device_id *id)
220 {
221 	struct input_handle *handle;
222 	int error;
223 
224 	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
225 	if (!handle)
226 		return -ENOMEM;
227 
228 	handle->dev = dev;
229 	handle->handler = handler;
230 	handle->name = "rfkill";
231 
232 	/* causes rfkill_start() to be called */
233 	error = input_register_handle(handle);
234 	if (error)
235 		goto err_free_handle;
236 
237 	error = input_open_device(handle);
238 	if (error)
239 		goto err_unregister_handle;
240 
241 	return 0;
242 
243  err_unregister_handle:
244 	input_unregister_handle(handle);
245  err_free_handle:
246 	kfree(handle);
247 	return error;
248 }
249 
rfkill_start(struct input_handle * handle)250 static void rfkill_start(struct input_handle *handle)
251 {
252 	/*
253 	 * Take event_lock to guard against configuration changes, we
254 	 * should be able to deal with concurrency with rfkill_event()
255 	 * just fine (which event_lock will also avoid).
256 	 */
257 	spin_lock_irq(&handle->dev->event_lock);
258 
259 	if (test_bit(EV_SW, handle->dev->evbit) &&
260 	    test_bit(SW_RFKILL_ALL, handle->dev->swbit))
261 		rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
262 							handle->dev->sw));
263 
264 	spin_unlock_irq(&handle->dev->event_lock);
265 }
266 
rfkill_disconnect(struct input_handle * handle)267 static void rfkill_disconnect(struct input_handle *handle)
268 {
269 	input_close_device(handle);
270 	input_unregister_handle(handle);
271 	kfree(handle);
272 }
273 
274 static const struct input_device_id rfkill_ids[] = {
275 	{
276 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
277 		.evbit = { BIT_MASK(EV_KEY) },
278 		.keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
279 	},
280 	{
281 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
282 		.evbit = { BIT_MASK(EV_KEY) },
283 		.keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
284 	},
285 	{
286 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
287 		.evbit = { BIT_MASK(EV_KEY) },
288 		.keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
289 	},
290 	{
291 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
292 		.evbit = { BIT_MASK(EV_KEY) },
293 		.keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
294 	},
295 	{
296 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
297 		.evbit = { BIT_MASK(EV_KEY) },
298 		.keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
299 	},
300 	{
301 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
302 		.evbit = { BIT(EV_SW) },
303 		.swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
304 	},
305 	{ }
306 };
307 
308 static struct input_handler rfkill_handler = {
309 	.name =	"rfkill",
310 	.event = rfkill_event,
311 	.connect = rfkill_connect,
312 	.start = rfkill_start,
313 	.disconnect = rfkill_disconnect,
314 	.id_table = rfkill_ids,
315 };
316 
rfkill_handler_init(void)317 int __init rfkill_handler_init(void)
318 {
319 	switch (rfkill_master_switch_mode) {
320 	case RFKILL_INPUT_MASTER_UNBLOCKALL:
321 		rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
322 		break;
323 	case RFKILL_INPUT_MASTER_RESTORE:
324 		rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
325 		break;
326 	case RFKILL_INPUT_MASTER_UNLOCK:
327 		rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
328 		break;
329 	default:
330 		return -EINVAL;
331 	}
332 
333 	spin_lock_init(&rfkill_op_lock);
334 
335 	/* Avoid delay at first schedule */
336 	rfkill_last_scheduled =
337 			jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
338 	return input_register_handler(&rfkill_handler);
339 }
340 
rfkill_handler_exit(void)341 void __exit rfkill_handler_exit(void)
342 {
343 	input_unregister_handler(&rfkill_handler);
344 	cancel_delayed_work_sync(&rfkill_op_work);
345 }
346