1 /*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6 /*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13 #include <linux/atomic.h>
14 #include <linux/compat.h>
15 #include <linux/cred.h>
16 #include <linux/device.h>
17 #include <linux/fs.h>
18 #include <linux/hid.h>
19 #include <linux/input.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/poll.h>
24 #include <linux/sched.h>
25 #include <linux/spinlock.h>
26 #include <linux/uhid.h>
27 #include <linux/wait.h>
28 #include <linux/uaccess.h>
29 #include <linux/eventpoll.h>
30
31 #define UHID_NAME "uhid"
32 #define UHID_BUFSIZE 32
33
34 static DEFINE_MUTEX(uhid_open_mutex);
35
36 struct uhid_device {
37 struct mutex devlock;
38
39 /* This flag tracks whether the HID device is usable for commands from
40 * userspace. The flag is already set before hid_add_device(), which
41 * runs in workqueue context, to allow hid_add_device() to communicate
42 * with userspace.
43 * However, if hid_add_device() fails, the flag is cleared without
44 * holding devlock.
45 * We guarantee that if @running changes from true to false while you're
46 * holding @devlock, it's still fine to access @hid.
47 */
48 bool running;
49
50 __u8 *rd_data;
51 uint rd_size;
52
53 /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
54 struct hid_device *hid;
55 struct uhid_event input_buf;
56
57 wait_queue_head_t waitq;
58 spinlock_t qlock;
59 __u8 head;
60 __u8 tail;
61 struct uhid_event *outq[UHID_BUFSIZE];
62
63 /* blocking GET_REPORT support; state changes protected by qlock */
64 struct mutex report_lock;
65 wait_queue_head_t report_wait;
66 bool report_running;
67 u32 report_id;
68 u32 report_type;
69 struct uhid_event report_buf;
70 struct work_struct worker;
71 };
72
73 static struct miscdevice uhid_misc;
74
uhid_device_add_worker(struct work_struct * work)75 static void uhid_device_add_worker(struct work_struct *work)
76 {
77 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
78 int ret;
79
80 ret = hid_add_device(uhid->hid);
81 if (ret) {
82 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
83
84 /* We used to call hid_destroy_device() here, but that's really
85 * messy to get right because we have to coordinate with
86 * concurrent writes from userspace that might be in the middle
87 * of using uhid->hid.
88 * Just leave uhid->hid as-is for now, and clean it up when
89 * userspace tries to close or reinitialize the uhid instance.
90 *
91 * However, we do have to clear the ->running flag and do a
92 * wakeup to make sure userspace knows that the device is gone.
93 */
94 uhid->running = false;
95 wake_up_interruptible(&uhid->report_wait);
96 }
97 }
98
uhid_queue(struct uhid_device * uhid,struct uhid_event * ev)99 static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
100 {
101 __u8 newhead;
102
103 newhead = (uhid->head + 1) % UHID_BUFSIZE;
104
105 if (newhead != uhid->tail) {
106 uhid->outq[uhid->head] = ev;
107 uhid->head = newhead;
108 wake_up_interruptible(&uhid->waitq);
109 } else {
110 hid_warn(uhid->hid, "Output queue is full\n");
111 kfree(ev);
112 }
113 }
114
uhid_queue_event(struct uhid_device * uhid,__u32 event)115 static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
116 {
117 unsigned long flags;
118 struct uhid_event *ev;
119
120 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
121 if (!ev)
122 return -ENOMEM;
123
124 ev->type = event;
125
126 spin_lock_irqsave(&uhid->qlock, flags);
127 uhid_queue(uhid, ev);
128 spin_unlock_irqrestore(&uhid->qlock, flags);
129
130 return 0;
131 }
132
uhid_hid_start(struct hid_device * hid)133 static int uhid_hid_start(struct hid_device *hid)
134 {
135 struct uhid_device *uhid = hid->driver_data;
136 struct uhid_event *ev;
137 unsigned long flags;
138
139 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
140 if (!ev)
141 return -ENOMEM;
142
143 ev->type = UHID_START;
144
145 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
146 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
147 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
148 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
149 if (hid->report_enum[HID_INPUT_REPORT].numbered)
150 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
151
152 spin_lock_irqsave(&uhid->qlock, flags);
153 uhid_queue(uhid, ev);
154 spin_unlock_irqrestore(&uhid->qlock, flags);
155
156 return 0;
157 }
158
uhid_hid_stop(struct hid_device * hid)159 static void uhid_hid_stop(struct hid_device *hid)
160 {
161 struct uhid_device *uhid = hid->driver_data;
162
163 hid->claimed = 0;
164 uhid_queue_event(uhid, UHID_STOP);
165 }
166
uhid_hid_open(struct hid_device * hid)167 static int uhid_hid_open(struct hid_device *hid)
168 {
169 struct uhid_device *uhid = hid->driver_data;
170 int retval = 0;
171
172 mutex_lock(&uhid_open_mutex);
173 if (!hid->open++) {
174 retval = uhid_queue_event(uhid, UHID_OPEN);
175 if (retval)
176 hid->open--;
177 }
178 mutex_unlock(&uhid_open_mutex);
179 return retval;
180 }
181
uhid_hid_close(struct hid_device * hid)182 static void uhid_hid_close(struct hid_device *hid)
183 {
184 struct uhid_device *uhid = hid->driver_data;
185
186 mutex_lock(&uhid_open_mutex);
187 if (!--hid->open)
188 uhid_queue_event(uhid, UHID_CLOSE);
189 mutex_unlock(&uhid_open_mutex);
190 }
191
uhid_hid_parse(struct hid_device * hid)192 static int uhid_hid_parse(struct hid_device *hid)
193 {
194 struct uhid_device *uhid = hid->driver_data;
195
196 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
197 }
198
199 /* must be called with report_lock held */
__uhid_report_queue_and_wait(struct uhid_device * uhid,struct uhid_event * ev,__u32 * report_id)200 static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
201 struct uhid_event *ev,
202 __u32 *report_id)
203 {
204 unsigned long flags;
205 int ret;
206
207 spin_lock_irqsave(&uhid->qlock, flags);
208 *report_id = ++uhid->report_id;
209 uhid->report_type = ev->type + 1;
210 uhid->report_running = true;
211 uhid_queue(uhid, ev);
212 spin_unlock_irqrestore(&uhid->qlock, flags);
213
214 ret = wait_event_interruptible_timeout(uhid->report_wait,
215 !uhid->report_running || !uhid->running,
216 5 * HZ);
217 if (!ret || !uhid->running || uhid->report_running)
218 ret = -EIO;
219 else if (ret < 0)
220 ret = -ERESTARTSYS;
221 else
222 ret = 0;
223
224 uhid->report_running = false;
225
226 return ret;
227 }
228
uhid_report_wake_up(struct uhid_device * uhid,u32 id,const struct uhid_event * ev)229 static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
230 const struct uhid_event *ev)
231 {
232 unsigned long flags;
233
234 spin_lock_irqsave(&uhid->qlock, flags);
235
236 /* id for old report; drop it silently */
237 if (uhid->report_type != ev->type || uhid->report_id != id)
238 goto unlock;
239 if (!uhid->report_running)
240 goto unlock;
241
242 memcpy(&uhid->report_buf, ev, sizeof(*ev));
243 uhid->report_running = false;
244 wake_up_interruptible(&uhid->report_wait);
245
246 unlock:
247 spin_unlock_irqrestore(&uhid->qlock, flags);
248 }
249
uhid_hid_get_report(struct hid_device * hid,unsigned char rnum,u8 * buf,size_t count,u8 rtype)250 static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
251 u8 *buf, size_t count, u8 rtype)
252 {
253 struct uhid_device *uhid = hid->driver_data;
254 struct uhid_get_report_reply_req *req;
255 struct uhid_event *ev;
256 int ret;
257
258 if (!uhid->running)
259 return -EIO;
260
261 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
262 if (!ev)
263 return -ENOMEM;
264
265 ev->type = UHID_GET_REPORT;
266 ev->u.get_report.rnum = rnum;
267 ev->u.get_report.rtype = rtype;
268
269 ret = mutex_lock_interruptible(&uhid->report_lock);
270 if (ret) {
271 kfree(ev);
272 return ret;
273 }
274
275 /* this _always_ takes ownership of @ev */
276 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
277 if (ret)
278 goto unlock;
279
280 req = &uhid->report_buf.u.get_report_reply;
281 if (req->err) {
282 ret = -EIO;
283 } else {
284 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
285 memcpy(buf, req->data, ret);
286 }
287
288 unlock:
289 mutex_unlock(&uhid->report_lock);
290 return ret;
291 }
292
uhid_hid_set_report(struct hid_device * hid,unsigned char rnum,const u8 * buf,size_t count,u8 rtype)293 static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
294 const u8 *buf, size_t count, u8 rtype)
295 {
296 struct uhid_device *uhid = hid->driver_data;
297 struct uhid_event *ev;
298 int ret;
299
300 if (!uhid->running || count > UHID_DATA_MAX)
301 return -EIO;
302
303 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
304 if (!ev)
305 return -ENOMEM;
306
307 ev->type = UHID_SET_REPORT;
308 ev->u.set_report.rnum = rnum;
309 ev->u.set_report.rtype = rtype;
310 ev->u.set_report.size = count;
311 memcpy(ev->u.set_report.data, buf, count);
312
313 ret = mutex_lock_interruptible(&uhid->report_lock);
314 if (ret) {
315 kfree(ev);
316 return ret;
317 }
318
319 /* this _always_ takes ownership of @ev */
320 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
321 if (ret)
322 goto unlock;
323
324 if (uhid->report_buf.u.set_report_reply.err)
325 ret = -EIO;
326 else
327 ret = count;
328
329 unlock:
330 mutex_unlock(&uhid->report_lock);
331 return ret;
332 }
333
uhid_hid_raw_request(struct hid_device * hid,unsigned char reportnum,__u8 * buf,size_t len,unsigned char rtype,int reqtype)334 static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
335 __u8 *buf, size_t len, unsigned char rtype,
336 int reqtype)
337 {
338 u8 u_rtype;
339
340 switch (rtype) {
341 case HID_FEATURE_REPORT:
342 u_rtype = UHID_FEATURE_REPORT;
343 break;
344 case HID_OUTPUT_REPORT:
345 u_rtype = UHID_OUTPUT_REPORT;
346 break;
347 case HID_INPUT_REPORT:
348 u_rtype = UHID_INPUT_REPORT;
349 break;
350 default:
351 return -EINVAL;
352 }
353
354 switch (reqtype) {
355 case HID_REQ_GET_REPORT:
356 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
357 case HID_REQ_SET_REPORT:
358 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
359 default:
360 return -EIO;
361 }
362 }
363
uhid_hid_output_raw(struct hid_device * hid,__u8 * buf,size_t count,unsigned char report_type)364 static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
365 unsigned char report_type)
366 {
367 struct uhid_device *uhid = hid->driver_data;
368 __u8 rtype;
369 unsigned long flags;
370 struct uhid_event *ev;
371
372 switch (report_type) {
373 case HID_FEATURE_REPORT:
374 rtype = UHID_FEATURE_REPORT;
375 break;
376 case HID_OUTPUT_REPORT:
377 rtype = UHID_OUTPUT_REPORT;
378 break;
379 default:
380 return -EINVAL;
381 }
382
383 if (count < 1 || count > UHID_DATA_MAX)
384 return -EINVAL;
385
386 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
387 if (!ev)
388 return -ENOMEM;
389
390 ev->type = UHID_OUTPUT;
391 ev->u.output.size = count;
392 ev->u.output.rtype = rtype;
393 memcpy(ev->u.output.data, buf, count);
394
395 spin_lock_irqsave(&uhid->qlock, flags);
396 uhid_queue(uhid, ev);
397 spin_unlock_irqrestore(&uhid->qlock, flags);
398
399 return count;
400 }
401
uhid_hid_output_report(struct hid_device * hid,__u8 * buf,size_t count)402 static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
403 size_t count)
404 {
405 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
406 }
407
408 struct hid_ll_driver uhid_hid_driver = {
409 .start = uhid_hid_start,
410 .stop = uhid_hid_stop,
411 .open = uhid_hid_open,
412 .close = uhid_hid_close,
413 .parse = uhid_hid_parse,
414 .raw_request = uhid_hid_raw_request,
415 .output_report = uhid_hid_output_report,
416 };
417 EXPORT_SYMBOL_GPL(uhid_hid_driver);
418
419 #ifdef CONFIG_COMPAT
420
421 /* Apparently we haven't stepped on these rakes enough times yet. */
422 struct uhid_create_req_compat {
423 __u8 name[128];
424 __u8 phys[64];
425 __u8 uniq[64];
426
427 compat_uptr_t rd_data;
428 __u16 rd_size;
429
430 __u16 bus;
431 __u32 vendor;
432 __u32 product;
433 __u32 version;
434 __u32 country;
435 } __attribute__((__packed__));
436
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)437 static int uhid_event_from_user(const char __user *buffer, size_t len,
438 struct uhid_event *event)
439 {
440 if (is_compat_task()) {
441 u32 type;
442
443 if (get_user(type, buffer))
444 return -EFAULT;
445
446 if (type == UHID_CREATE) {
447 /*
448 * This is our messed up request with compat pointer.
449 * It is largish (more than 256 bytes) so we better
450 * allocate it from the heap.
451 */
452 struct uhid_create_req_compat *compat;
453
454 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
455 if (!compat)
456 return -ENOMEM;
457
458 buffer += sizeof(type);
459 len -= sizeof(type);
460 if (copy_from_user(compat, buffer,
461 min(len, sizeof(*compat)))) {
462 kfree(compat);
463 return -EFAULT;
464 }
465
466 /* Shuffle the data over to proper structure */
467 event->type = type;
468
469 memcpy(event->u.create.name, compat->name,
470 sizeof(compat->name));
471 memcpy(event->u.create.phys, compat->phys,
472 sizeof(compat->phys));
473 memcpy(event->u.create.uniq, compat->uniq,
474 sizeof(compat->uniq));
475
476 event->u.create.rd_data = compat_ptr(compat->rd_data);
477 event->u.create.rd_size = compat->rd_size;
478
479 event->u.create.bus = compat->bus;
480 event->u.create.vendor = compat->vendor;
481 event->u.create.product = compat->product;
482 event->u.create.version = compat->version;
483 event->u.create.country = compat->country;
484
485 kfree(compat);
486 return 0;
487 }
488 /* All others can be copied directly */
489 }
490
491 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
492 return -EFAULT;
493
494 return 0;
495 }
496 #else
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)497 static int uhid_event_from_user(const char __user *buffer, size_t len,
498 struct uhid_event *event)
499 {
500 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
501 return -EFAULT;
502
503 return 0;
504 }
505 #endif
506
uhid_dev_create2(struct uhid_device * uhid,const struct uhid_event * ev)507 static int uhid_dev_create2(struct uhid_device *uhid,
508 const struct uhid_event *ev)
509 {
510 struct hid_device *hid;
511 size_t rd_size, len;
512 void *rd_data;
513 int ret;
514
515 if (uhid->hid)
516 return -EALREADY;
517
518 rd_size = ev->u.create2.rd_size;
519 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
520 return -EINVAL;
521
522 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
523 if (!rd_data)
524 return -ENOMEM;
525
526 uhid->rd_size = rd_size;
527 uhid->rd_data = rd_data;
528
529 hid = hid_allocate_device();
530 if (IS_ERR(hid)) {
531 ret = PTR_ERR(hid);
532 goto err_free;
533 }
534
535 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
536 strncpy(hid->name, ev->u.create2.name, len);
537 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
538 strncpy(hid->phys, ev->u.create2.phys, len);
539 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
540 strncpy(hid->uniq, ev->u.create2.uniq, len);
541
542 hid->ll_driver = &uhid_hid_driver;
543 hid->bus = ev->u.create2.bus;
544 hid->vendor = ev->u.create2.vendor;
545 hid->product = ev->u.create2.product;
546 hid->version = ev->u.create2.version;
547 hid->country = ev->u.create2.country;
548 hid->driver_data = uhid;
549 hid->dev.parent = uhid_misc.this_device;
550
551 uhid->hid = hid;
552 uhid->running = true;
553
554 /* Adding of a HID device is done through a worker, to allow HID drivers
555 * which use feature requests during .probe to work, without they would
556 * be blocked on devlock, which is held by uhid_char_write.
557 */
558 schedule_work(&uhid->worker);
559
560 return 0;
561
562 err_free:
563 kfree(uhid->rd_data);
564 uhid->rd_data = NULL;
565 uhid->rd_size = 0;
566 return ret;
567 }
568
uhid_dev_create(struct uhid_device * uhid,struct uhid_event * ev)569 static int uhid_dev_create(struct uhid_device *uhid,
570 struct uhid_event *ev)
571 {
572 struct uhid_create_req orig;
573
574 orig = ev->u.create;
575
576 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
577 return -EINVAL;
578 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
579 return -EFAULT;
580
581 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
582 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
583 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
584 ev->u.create2.rd_size = orig.rd_size;
585 ev->u.create2.bus = orig.bus;
586 ev->u.create2.vendor = orig.vendor;
587 ev->u.create2.product = orig.product;
588 ev->u.create2.version = orig.version;
589 ev->u.create2.country = orig.country;
590
591 return uhid_dev_create2(uhid, ev);
592 }
593
uhid_dev_destroy(struct uhid_device * uhid)594 static int uhid_dev_destroy(struct uhid_device *uhid)
595 {
596 if (!uhid->hid)
597 return -EINVAL;
598
599 uhid->running = false;
600 wake_up_interruptible(&uhid->report_wait);
601
602 cancel_work_sync(&uhid->worker);
603
604 hid_destroy_device(uhid->hid);
605 uhid->hid = NULL;
606 kfree(uhid->rd_data);
607
608 return 0;
609 }
610
uhid_dev_input(struct uhid_device * uhid,struct uhid_event * ev)611 static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
612 {
613 if (!uhid->running)
614 return -EINVAL;
615
616 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
617 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
618
619 return 0;
620 }
621
uhid_dev_input2(struct uhid_device * uhid,struct uhid_event * ev)622 static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
623 {
624 if (!uhid->running)
625 return -EINVAL;
626
627 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
628 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
629
630 return 0;
631 }
632
uhid_dev_get_report_reply(struct uhid_device * uhid,struct uhid_event * ev)633 static int uhid_dev_get_report_reply(struct uhid_device *uhid,
634 struct uhid_event *ev)
635 {
636 if (!uhid->running)
637 return -EINVAL;
638
639 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
640 return 0;
641 }
642
uhid_dev_set_report_reply(struct uhid_device * uhid,struct uhid_event * ev)643 static int uhid_dev_set_report_reply(struct uhid_device *uhid,
644 struct uhid_event *ev)
645 {
646 if (!uhid->running)
647 return -EINVAL;
648
649 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
650 return 0;
651 }
652
uhid_char_open(struct inode * inode,struct file * file)653 static int uhid_char_open(struct inode *inode, struct file *file)
654 {
655 struct uhid_device *uhid;
656
657 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
658 if (!uhid)
659 return -ENOMEM;
660
661 mutex_init(&uhid->devlock);
662 mutex_init(&uhid->report_lock);
663 spin_lock_init(&uhid->qlock);
664 init_waitqueue_head(&uhid->waitq);
665 init_waitqueue_head(&uhid->report_wait);
666 uhid->running = false;
667 INIT_WORK(&uhid->worker, uhid_device_add_worker);
668
669 file->private_data = uhid;
670 nonseekable_open(inode, file);
671
672 return 0;
673 }
674
uhid_char_release(struct inode * inode,struct file * file)675 static int uhid_char_release(struct inode *inode, struct file *file)
676 {
677 struct uhid_device *uhid = file->private_data;
678 unsigned int i;
679
680 uhid_dev_destroy(uhid);
681
682 for (i = 0; i < UHID_BUFSIZE; ++i)
683 kfree(uhid->outq[i]);
684
685 kfree(uhid);
686
687 return 0;
688 }
689
uhid_char_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)690 static ssize_t uhid_char_read(struct file *file, char __user *buffer,
691 size_t count, loff_t *ppos)
692 {
693 struct uhid_device *uhid = file->private_data;
694 int ret;
695 unsigned long flags;
696 size_t len;
697
698 /* they need at least the "type" member of uhid_event */
699 if (count < sizeof(__u32))
700 return -EINVAL;
701
702 try_again:
703 if (file->f_flags & O_NONBLOCK) {
704 if (uhid->head == uhid->tail)
705 return -EAGAIN;
706 } else {
707 ret = wait_event_interruptible(uhid->waitq,
708 uhid->head != uhid->tail);
709 if (ret)
710 return ret;
711 }
712
713 ret = mutex_lock_interruptible(&uhid->devlock);
714 if (ret)
715 return ret;
716
717 if (uhid->head == uhid->tail) {
718 mutex_unlock(&uhid->devlock);
719 goto try_again;
720 } else {
721 len = min(count, sizeof(**uhid->outq));
722 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
723 ret = -EFAULT;
724 } else {
725 kfree(uhid->outq[uhid->tail]);
726 uhid->outq[uhid->tail] = NULL;
727
728 spin_lock_irqsave(&uhid->qlock, flags);
729 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
730 spin_unlock_irqrestore(&uhid->qlock, flags);
731 }
732 }
733
734 mutex_unlock(&uhid->devlock);
735 return ret ? ret : len;
736 }
737
uhid_char_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)738 static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
739 size_t count, loff_t *ppos)
740 {
741 struct uhid_device *uhid = file->private_data;
742 int ret;
743 size_t len;
744
745 /* we need at least the "type" member of uhid_event */
746 if (count < sizeof(__u32))
747 return -EINVAL;
748
749 ret = mutex_lock_interruptible(&uhid->devlock);
750 if (ret)
751 return ret;
752
753 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
754 len = min(count, sizeof(uhid->input_buf));
755
756 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
757 if (ret)
758 goto unlock;
759
760 switch (uhid->input_buf.type) {
761 case UHID_CREATE:
762 /*
763 * 'struct uhid_create_req' contains a __user pointer which is
764 * copied from, so it's unsafe to allow this with elevated
765 * privileges (e.g. from a setuid binary) or via kernel_write().
766 */
767 if (file->f_cred != current_cred() || uaccess_kernel()) {
768 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
769 task_tgid_vnr(current), current->comm);
770 ret = -EACCES;
771 goto unlock;
772 }
773 ret = uhid_dev_create(uhid, &uhid->input_buf);
774 break;
775 case UHID_CREATE2:
776 ret = uhid_dev_create2(uhid, &uhid->input_buf);
777 break;
778 case UHID_DESTROY:
779 ret = uhid_dev_destroy(uhid);
780 break;
781 case UHID_INPUT:
782 ret = uhid_dev_input(uhid, &uhid->input_buf);
783 break;
784 case UHID_INPUT2:
785 ret = uhid_dev_input2(uhid, &uhid->input_buf);
786 break;
787 case UHID_GET_REPORT_REPLY:
788 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
789 break;
790 case UHID_SET_REPORT_REPLY:
791 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
792 break;
793 default:
794 ret = -EOPNOTSUPP;
795 }
796
797 unlock:
798 mutex_unlock(&uhid->devlock);
799
800 /* return "count" not "len" to not confuse the caller */
801 return ret ? ret : count;
802 }
803
uhid_char_poll(struct file * file,poll_table * wait)804 static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
805 {
806 struct uhid_device *uhid = file->private_data;
807 unsigned int mask = POLLOUT | POLLWRNORM; /* uhid is always writable */
808
809 poll_wait(file, &uhid->waitq, wait);
810
811 if (uhid->head != uhid->tail)
812 mask |= POLLIN | POLLRDNORM;
813
814 return mask;
815 }
816
817 static const struct file_operations uhid_fops = {
818 .owner = THIS_MODULE,
819 .open = uhid_char_open,
820 .release = uhid_char_release,
821 .read = uhid_char_read,
822 .write = uhid_char_write,
823 .poll = uhid_char_poll,
824 .llseek = no_llseek,
825 };
826
827 static struct miscdevice uhid_misc = {
828 .fops = &uhid_fops,
829 .minor = UHID_MINOR,
830 .name = UHID_NAME,
831 };
832
uhid_init(void)833 static int __init uhid_init(void)
834 {
835 return misc_register(&uhid_misc);
836 }
837
uhid_exit(void)838 static void __exit uhid_exit(void)
839 {
840 misc_deregister(&uhid_misc);
841 }
842
843 module_init(uhid_init);
844 module_exit(uhid_exit);
845 MODULE_LICENSE("GPL");
846 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
847 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
848 MODULE_ALIAS_MISCDEV(UHID_MINOR);
849 MODULE_ALIAS("devname:" UHID_NAME);
850