1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * User-space I/O driver support for HID subsystem
4 * Copyright (c) 2012 David Herrmann
5 */
6
7 /*
8 */
9
10 #include <linux/atomic.h>
11 #include <linux/compat.h>
12 #include <linux/cred.h>
13 #include <linux/device.h>
14 #include <linux/fs.h>
15 #include <linux/hid.h>
16 #include <linux/input.h>
17 #include <linux/miscdevice.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/spinlock.h>
23 #include <linux/uhid.h>
24 #include <linux/wait.h>
25
26 #define UHID_NAME "uhid"
27 #define UHID_BUFSIZE 32
28
29 struct uhid_device {
30 struct mutex devlock;
31
32 /* This flag tracks whether the HID device is usable for commands from
33 * userspace. The flag is already set before hid_add_device(), which
34 * runs in workqueue context, to allow hid_add_device() to communicate
35 * with userspace.
36 * However, if hid_add_device() fails, the flag is cleared without
37 * holding devlock.
38 * We guarantee that if @running changes from true to false while you're
39 * holding @devlock, it's still fine to access @hid.
40 */
41 bool running;
42
43 __u8 *rd_data;
44 uint rd_size;
45
46 /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
47 struct hid_device *hid;
48 struct uhid_event input_buf;
49
50 wait_queue_head_t waitq;
51 spinlock_t qlock;
52 __u8 head;
53 __u8 tail;
54 struct uhid_event *outq[UHID_BUFSIZE];
55
56 /* blocking GET_REPORT support; state changes protected by qlock */
57 struct mutex report_lock;
58 wait_queue_head_t report_wait;
59 bool report_running;
60 u32 report_id;
61 u32 report_type;
62 struct uhid_event report_buf;
63 struct work_struct worker;
64 };
65
66 static struct miscdevice uhid_misc;
67
uhid_device_add_worker(struct work_struct * work)68 static void uhid_device_add_worker(struct work_struct *work)
69 {
70 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
71 int ret;
72
73 ret = hid_add_device(uhid->hid);
74 if (ret) {
75 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
76
77 /* We used to call hid_destroy_device() here, but that's really
78 * messy to get right because we have to coordinate with
79 * concurrent writes from userspace that might be in the middle
80 * of using uhid->hid.
81 * Just leave uhid->hid as-is for now, and clean it up when
82 * userspace tries to close or reinitialize the uhid instance.
83 *
84 * However, we do have to clear the ->running flag and do a
85 * wakeup to make sure userspace knows that the device is gone.
86 */
87 uhid->running = false;
88 wake_up_interruptible(&uhid->report_wait);
89 }
90 }
91
uhid_queue(struct uhid_device * uhid,struct uhid_event * ev)92 static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
93 {
94 __u8 newhead;
95
96 newhead = (uhid->head + 1) % UHID_BUFSIZE;
97
98 if (newhead != uhid->tail) {
99 uhid->outq[uhid->head] = ev;
100 uhid->head = newhead;
101 wake_up_interruptible(&uhid->waitq);
102 } else {
103 hid_warn(uhid->hid, "Output queue is full\n");
104 kfree(ev);
105 }
106 }
107
uhid_queue_event(struct uhid_device * uhid,__u32 event)108 static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
109 {
110 unsigned long flags;
111 struct uhid_event *ev;
112
113 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
114 if (!ev)
115 return -ENOMEM;
116
117 ev->type = event;
118
119 spin_lock_irqsave(&uhid->qlock, flags);
120 uhid_queue(uhid, ev);
121 spin_unlock_irqrestore(&uhid->qlock, flags);
122
123 return 0;
124 }
125
uhid_hid_start(struct hid_device * hid)126 static int uhid_hid_start(struct hid_device *hid)
127 {
128 struct uhid_device *uhid = hid->driver_data;
129 struct uhid_event *ev;
130 unsigned long flags;
131
132 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
133 if (!ev)
134 return -ENOMEM;
135
136 ev->type = UHID_START;
137
138 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
139 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
140 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
141 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
142 if (hid->report_enum[HID_INPUT_REPORT].numbered)
143 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
144
145 spin_lock_irqsave(&uhid->qlock, flags);
146 uhid_queue(uhid, ev);
147 spin_unlock_irqrestore(&uhid->qlock, flags);
148
149 return 0;
150 }
151
uhid_hid_stop(struct hid_device * hid)152 static void uhid_hid_stop(struct hid_device *hid)
153 {
154 struct uhid_device *uhid = hid->driver_data;
155
156 hid->claimed = 0;
157 uhid_queue_event(uhid, UHID_STOP);
158 }
159
uhid_hid_open(struct hid_device * hid)160 static int uhid_hid_open(struct hid_device *hid)
161 {
162 struct uhid_device *uhid = hid->driver_data;
163
164 return uhid_queue_event(uhid, UHID_OPEN);
165 }
166
uhid_hid_close(struct hid_device * hid)167 static void uhid_hid_close(struct hid_device *hid)
168 {
169 struct uhid_device *uhid = hid->driver_data;
170
171 uhid_queue_event(uhid, UHID_CLOSE);
172 }
173
uhid_hid_parse(struct hid_device * hid)174 static int uhid_hid_parse(struct hid_device *hid)
175 {
176 struct uhid_device *uhid = hid->driver_data;
177
178 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
179 }
180
181 /* must be called with report_lock held */
__uhid_report_queue_and_wait(struct uhid_device * uhid,struct uhid_event * ev,__u32 * report_id)182 static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
183 struct uhid_event *ev,
184 __u32 *report_id)
185 {
186 unsigned long flags;
187 int ret;
188
189 spin_lock_irqsave(&uhid->qlock, flags);
190 *report_id = ++uhid->report_id;
191 uhid->report_type = ev->type + 1;
192 uhid->report_running = true;
193 uhid_queue(uhid, ev);
194 spin_unlock_irqrestore(&uhid->qlock, flags);
195
196 ret = wait_event_interruptible_timeout(uhid->report_wait,
197 !uhid->report_running || !uhid->running,
198 5 * HZ);
199 if (!ret || !uhid->running || uhid->report_running)
200 ret = -EIO;
201 else if (ret < 0)
202 ret = -ERESTARTSYS;
203 else
204 ret = 0;
205
206 uhid->report_running = false;
207
208 return ret;
209 }
210
uhid_report_wake_up(struct uhid_device * uhid,u32 id,const struct uhid_event * ev)211 static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
212 const struct uhid_event *ev)
213 {
214 unsigned long flags;
215
216 spin_lock_irqsave(&uhid->qlock, flags);
217
218 /* id for old report; drop it silently */
219 if (uhid->report_type != ev->type || uhid->report_id != id)
220 goto unlock;
221 if (!uhid->report_running)
222 goto unlock;
223
224 memcpy(&uhid->report_buf, ev, sizeof(*ev));
225 uhid->report_running = false;
226 wake_up_interruptible(&uhid->report_wait);
227
228 unlock:
229 spin_unlock_irqrestore(&uhid->qlock, flags);
230 }
231
uhid_hid_get_report(struct hid_device * hid,unsigned char rnum,u8 * buf,size_t count,u8 rtype)232 static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
233 u8 *buf, size_t count, u8 rtype)
234 {
235 struct uhid_device *uhid = hid->driver_data;
236 struct uhid_get_report_reply_req *req;
237 struct uhid_event *ev;
238 int ret;
239
240 if (!uhid->running)
241 return -EIO;
242
243 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
244 if (!ev)
245 return -ENOMEM;
246
247 ev->type = UHID_GET_REPORT;
248 ev->u.get_report.rnum = rnum;
249 ev->u.get_report.rtype = rtype;
250
251 ret = mutex_lock_interruptible(&uhid->report_lock);
252 if (ret) {
253 kfree(ev);
254 return ret;
255 }
256
257 /* this _always_ takes ownership of @ev */
258 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
259 if (ret)
260 goto unlock;
261
262 req = &uhid->report_buf.u.get_report_reply;
263 if (req->err) {
264 ret = -EIO;
265 } else {
266 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
267 memcpy(buf, req->data, ret);
268 }
269
270 unlock:
271 mutex_unlock(&uhid->report_lock);
272 return ret;
273 }
274
uhid_hid_set_report(struct hid_device * hid,unsigned char rnum,const u8 * buf,size_t count,u8 rtype)275 static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
276 const u8 *buf, size_t count, u8 rtype)
277 {
278 struct uhid_device *uhid = hid->driver_data;
279 struct uhid_event *ev;
280 int ret;
281
282 if (!uhid->running || count > UHID_DATA_MAX)
283 return -EIO;
284
285 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
286 if (!ev)
287 return -ENOMEM;
288
289 ev->type = UHID_SET_REPORT;
290 ev->u.set_report.rnum = rnum;
291 ev->u.set_report.rtype = rtype;
292 ev->u.set_report.size = count;
293 memcpy(ev->u.set_report.data, buf, count);
294
295 ret = mutex_lock_interruptible(&uhid->report_lock);
296 if (ret) {
297 kfree(ev);
298 return ret;
299 }
300
301 /* this _always_ takes ownership of @ev */
302 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
303 if (ret)
304 goto unlock;
305
306 if (uhid->report_buf.u.set_report_reply.err)
307 ret = -EIO;
308 else
309 ret = count;
310
311 unlock:
312 mutex_unlock(&uhid->report_lock);
313 return ret;
314 }
315
uhid_hid_raw_request(struct hid_device * hid,unsigned char reportnum,__u8 * buf,size_t len,unsigned char rtype,int reqtype)316 static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
317 __u8 *buf, size_t len, unsigned char rtype,
318 int reqtype)
319 {
320 u8 u_rtype;
321
322 switch (rtype) {
323 case HID_FEATURE_REPORT:
324 u_rtype = UHID_FEATURE_REPORT;
325 break;
326 case HID_OUTPUT_REPORT:
327 u_rtype = UHID_OUTPUT_REPORT;
328 break;
329 case HID_INPUT_REPORT:
330 u_rtype = UHID_INPUT_REPORT;
331 break;
332 default:
333 return -EINVAL;
334 }
335
336 switch (reqtype) {
337 case HID_REQ_GET_REPORT:
338 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
339 case HID_REQ_SET_REPORT:
340 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
341 default:
342 return -EIO;
343 }
344 }
345
uhid_hid_output_raw(struct hid_device * hid,__u8 * buf,size_t count,unsigned char report_type)346 static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
347 unsigned char report_type)
348 {
349 struct uhid_device *uhid = hid->driver_data;
350 __u8 rtype;
351 unsigned long flags;
352 struct uhid_event *ev;
353
354 switch (report_type) {
355 case HID_FEATURE_REPORT:
356 rtype = UHID_FEATURE_REPORT;
357 break;
358 case HID_OUTPUT_REPORT:
359 rtype = UHID_OUTPUT_REPORT;
360 break;
361 default:
362 return -EINVAL;
363 }
364
365 if (count < 1 || count > UHID_DATA_MAX)
366 return -EINVAL;
367
368 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
369 if (!ev)
370 return -ENOMEM;
371
372 ev->type = UHID_OUTPUT;
373 ev->u.output.size = count;
374 ev->u.output.rtype = rtype;
375 memcpy(ev->u.output.data, buf, count);
376
377 spin_lock_irqsave(&uhid->qlock, flags);
378 uhid_queue(uhid, ev);
379 spin_unlock_irqrestore(&uhid->qlock, flags);
380
381 return count;
382 }
383
uhid_hid_output_report(struct hid_device * hid,__u8 * buf,size_t count)384 static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
385 size_t count)
386 {
387 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
388 }
389
390 struct hid_ll_driver uhid_hid_driver = {
391 .start = uhid_hid_start,
392 .stop = uhid_hid_stop,
393 .open = uhid_hid_open,
394 .close = uhid_hid_close,
395 .parse = uhid_hid_parse,
396 .raw_request = uhid_hid_raw_request,
397 .output_report = uhid_hid_output_report,
398 .max_buffer_size = UHID_DATA_MAX,
399 };
400 EXPORT_SYMBOL_GPL(uhid_hid_driver);
401
402 #ifdef CONFIG_COMPAT
403
404 /* Apparently we haven't stepped on these rakes enough times yet. */
405 struct uhid_create_req_compat {
406 __u8 name[128];
407 __u8 phys[64];
408 __u8 uniq[64];
409
410 compat_uptr_t rd_data;
411 __u16 rd_size;
412
413 __u16 bus;
414 __u32 vendor;
415 __u32 product;
416 __u32 version;
417 __u32 country;
418 } __attribute__((__packed__));
419
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)420 static int uhid_event_from_user(const char __user *buffer, size_t len,
421 struct uhid_event *event)
422 {
423 if (in_compat_syscall()) {
424 u32 type;
425
426 if (get_user(type, buffer))
427 return -EFAULT;
428
429 if (type == UHID_CREATE) {
430 /*
431 * This is our messed up request with compat pointer.
432 * It is largish (more than 256 bytes) so we better
433 * allocate it from the heap.
434 */
435 struct uhid_create_req_compat *compat;
436
437 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
438 if (!compat)
439 return -ENOMEM;
440
441 buffer += sizeof(type);
442 len -= sizeof(type);
443 if (copy_from_user(compat, buffer,
444 min(len, sizeof(*compat)))) {
445 kfree(compat);
446 return -EFAULT;
447 }
448
449 /* Shuffle the data over to proper structure */
450 event->type = type;
451
452 memcpy(event->u.create.name, compat->name,
453 sizeof(compat->name));
454 memcpy(event->u.create.phys, compat->phys,
455 sizeof(compat->phys));
456 memcpy(event->u.create.uniq, compat->uniq,
457 sizeof(compat->uniq));
458
459 event->u.create.rd_data = compat_ptr(compat->rd_data);
460 event->u.create.rd_size = compat->rd_size;
461
462 event->u.create.bus = compat->bus;
463 event->u.create.vendor = compat->vendor;
464 event->u.create.product = compat->product;
465 event->u.create.version = compat->version;
466 event->u.create.country = compat->country;
467
468 kfree(compat);
469 return 0;
470 }
471 /* All others can be copied directly */
472 }
473
474 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
475 return -EFAULT;
476
477 return 0;
478 }
479 #else
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)480 static int uhid_event_from_user(const char __user *buffer, size_t len,
481 struct uhid_event *event)
482 {
483 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
484 return -EFAULT;
485
486 return 0;
487 }
488 #endif
489
uhid_dev_create2(struct uhid_device * uhid,const struct uhid_event * ev)490 static int uhid_dev_create2(struct uhid_device *uhid,
491 const struct uhid_event *ev)
492 {
493 struct hid_device *hid;
494 size_t rd_size, len;
495 void *rd_data;
496 int ret;
497
498 if (uhid->hid)
499 return -EALREADY;
500
501 rd_size = ev->u.create2.rd_size;
502 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
503 return -EINVAL;
504
505 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
506 if (!rd_data)
507 return -ENOMEM;
508
509 uhid->rd_size = rd_size;
510 uhid->rd_data = rd_data;
511
512 hid = hid_allocate_device();
513 if (IS_ERR(hid)) {
514 ret = PTR_ERR(hid);
515 goto err_free;
516 }
517
518 /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
519 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
520 strncpy(hid->name, ev->u.create2.name, len);
521 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
522 strncpy(hid->phys, ev->u.create2.phys, len);
523 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
524 strncpy(hid->uniq, ev->u.create2.uniq, len);
525
526 hid->ll_driver = &uhid_hid_driver;
527 hid->bus = ev->u.create2.bus;
528 hid->vendor = ev->u.create2.vendor;
529 hid->product = ev->u.create2.product;
530 hid->version = ev->u.create2.version;
531 hid->country = ev->u.create2.country;
532 hid->driver_data = uhid;
533 hid->dev.parent = uhid_misc.this_device;
534
535 uhid->hid = hid;
536 uhid->running = true;
537
538 /* Adding of a HID device is done through a worker, to allow HID drivers
539 * which use feature requests during .probe to work, without they would
540 * be blocked on devlock, which is held by uhid_char_write.
541 */
542 schedule_work(&uhid->worker);
543
544 return 0;
545
546 err_free:
547 kfree(uhid->rd_data);
548 uhid->rd_data = NULL;
549 uhid->rd_size = 0;
550 return ret;
551 }
552
uhid_dev_create(struct uhid_device * uhid,struct uhid_event * ev)553 static int uhid_dev_create(struct uhid_device *uhid,
554 struct uhid_event *ev)
555 {
556 struct uhid_create_req orig;
557
558 orig = ev->u.create;
559
560 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
561 return -EINVAL;
562 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
563 return -EFAULT;
564
565 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
566 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
567 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
568 ev->u.create2.rd_size = orig.rd_size;
569 ev->u.create2.bus = orig.bus;
570 ev->u.create2.vendor = orig.vendor;
571 ev->u.create2.product = orig.product;
572 ev->u.create2.version = orig.version;
573 ev->u.create2.country = orig.country;
574
575 return uhid_dev_create2(uhid, ev);
576 }
577
uhid_dev_destroy(struct uhid_device * uhid)578 static int uhid_dev_destroy(struct uhid_device *uhid)
579 {
580 if (!uhid->hid)
581 return -EINVAL;
582
583 uhid->running = false;
584 wake_up_interruptible(&uhid->report_wait);
585
586 cancel_work_sync(&uhid->worker);
587
588 hid_destroy_device(uhid->hid);
589 uhid->hid = NULL;
590 kfree(uhid->rd_data);
591
592 return 0;
593 }
594
uhid_dev_input(struct uhid_device * uhid,struct uhid_event * ev)595 static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
596 {
597 if (!uhid->running)
598 return -EINVAL;
599
600 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
601 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
602
603 return 0;
604 }
605
uhid_dev_input2(struct uhid_device * uhid,struct uhid_event * ev)606 static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
607 {
608 if (!uhid->running)
609 return -EINVAL;
610
611 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
612 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
613
614 return 0;
615 }
616
uhid_dev_get_report_reply(struct uhid_device * uhid,struct uhid_event * ev)617 static int uhid_dev_get_report_reply(struct uhid_device *uhid,
618 struct uhid_event *ev)
619 {
620 if (!uhid->running)
621 return -EINVAL;
622
623 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
624 return 0;
625 }
626
uhid_dev_set_report_reply(struct uhid_device * uhid,struct uhid_event * ev)627 static int uhid_dev_set_report_reply(struct uhid_device *uhid,
628 struct uhid_event *ev)
629 {
630 if (!uhid->running)
631 return -EINVAL;
632
633 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
634 return 0;
635 }
636
uhid_char_open(struct inode * inode,struct file * file)637 static int uhid_char_open(struct inode *inode, struct file *file)
638 {
639 struct uhid_device *uhid;
640
641 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
642 if (!uhid)
643 return -ENOMEM;
644
645 mutex_init(&uhid->devlock);
646 mutex_init(&uhid->report_lock);
647 spin_lock_init(&uhid->qlock);
648 init_waitqueue_head(&uhid->waitq);
649 init_waitqueue_head(&uhid->report_wait);
650 uhid->running = false;
651 INIT_WORK(&uhid->worker, uhid_device_add_worker);
652
653 file->private_data = uhid;
654 stream_open(inode, file);
655
656 return 0;
657 }
658
uhid_char_release(struct inode * inode,struct file * file)659 static int uhid_char_release(struct inode *inode, struct file *file)
660 {
661 struct uhid_device *uhid = file->private_data;
662 unsigned int i;
663
664 uhid_dev_destroy(uhid);
665
666 for (i = 0; i < UHID_BUFSIZE; ++i)
667 kfree(uhid->outq[i]);
668
669 kfree(uhid);
670
671 return 0;
672 }
673
uhid_char_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)674 static ssize_t uhid_char_read(struct file *file, char __user *buffer,
675 size_t count, loff_t *ppos)
676 {
677 struct uhid_device *uhid = file->private_data;
678 int ret;
679 unsigned long flags;
680 size_t len;
681
682 /* they need at least the "type" member of uhid_event */
683 if (count < sizeof(__u32))
684 return -EINVAL;
685
686 try_again:
687 if (file->f_flags & O_NONBLOCK) {
688 if (uhid->head == uhid->tail)
689 return -EAGAIN;
690 } else {
691 ret = wait_event_interruptible(uhid->waitq,
692 uhid->head != uhid->tail);
693 if (ret)
694 return ret;
695 }
696
697 ret = mutex_lock_interruptible(&uhid->devlock);
698 if (ret)
699 return ret;
700
701 if (uhid->head == uhid->tail) {
702 mutex_unlock(&uhid->devlock);
703 goto try_again;
704 } else {
705 len = min(count, sizeof(**uhid->outq));
706 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
707 ret = -EFAULT;
708 } else {
709 kfree(uhid->outq[uhid->tail]);
710 uhid->outq[uhid->tail] = NULL;
711
712 spin_lock_irqsave(&uhid->qlock, flags);
713 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
714 spin_unlock_irqrestore(&uhid->qlock, flags);
715 }
716 }
717
718 mutex_unlock(&uhid->devlock);
719 return ret ? ret : len;
720 }
721
uhid_char_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)722 static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
723 size_t count, loff_t *ppos)
724 {
725 struct uhid_device *uhid = file->private_data;
726 int ret;
727 size_t len;
728
729 /* we need at least the "type" member of uhid_event */
730 if (count < sizeof(__u32))
731 return -EINVAL;
732
733 ret = mutex_lock_interruptible(&uhid->devlock);
734 if (ret)
735 return ret;
736
737 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
738 len = min(count, sizeof(uhid->input_buf));
739
740 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
741 if (ret)
742 goto unlock;
743
744 switch (uhid->input_buf.type) {
745 case UHID_CREATE:
746 /*
747 * 'struct uhid_create_req' contains a __user pointer which is
748 * copied from, so it's unsafe to allow this with elevated
749 * privileges (e.g. from a setuid binary) or via kernel_write().
750 */
751 if (file->f_cred != current_cred() || uaccess_kernel()) {
752 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
753 task_tgid_vnr(current), current->comm);
754 ret = -EACCES;
755 goto unlock;
756 }
757 ret = uhid_dev_create(uhid, &uhid->input_buf);
758 break;
759 case UHID_CREATE2:
760 ret = uhid_dev_create2(uhid, &uhid->input_buf);
761 break;
762 case UHID_DESTROY:
763 ret = uhid_dev_destroy(uhid);
764 break;
765 case UHID_INPUT:
766 ret = uhid_dev_input(uhid, &uhid->input_buf);
767 break;
768 case UHID_INPUT2:
769 ret = uhid_dev_input2(uhid, &uhid->input_buf);
770 break;
771 case UHID_GET_REPORT_REPLY:
772 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
773 break;
774 case UHID_SET_REPORT_REPLY:
775 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
776 break;
777 default:
778 ret = -EOPNOTSUPP;
779 }
780
781 unlock:
782 mutex_unlock(&uhid->devlock);
783
784 /* return "count" not "len" to not confuse the caller */
785 return ret ? ret : count;
786 }
787
uhid_char_poll(struct file * file,poll_table * wait)788 static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
789 {
790 struct uhid_device *uhid = file->private_data;
791 __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
792
793 poll_wait(file, &uhid->waitq, wait);
794
795 if (uhid->head != uhid->tail)
796 mask |= EPOLLIN | EPOLLRDNORM;
797
798 return mask;
799 }
800
801 static const struct file_operations uhid_fops = {
802 .owner = THIS_MODULE,
803 .open = uhid_char_open,
804 .release = uhid_char_release,
805 .read = uhid_char_read,
806 .write = uhid_char_write,
807 .poll = uhid_char_poll,
808 .llseek = no_llseek,
809 };
810
811 static struct miscdevice uhid_misc = {
812 .fops = &uhid_fops,
813 .minor = UHID_MINOR,
814 .name = UHID_NAME,
815 };
816 module_misc_device(uhid_misc);
817
818 MODULE_LICENSE("GPL");
819 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
820 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
821 MODULE_ALIAS_MISCDEV(UHID_MINOR);
822 MODULE_ALIAS("devname:" UHID_NAME);
823