1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48 /*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61 }
62
63 /*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 s32 a;
70
71 if (!value || !n)
72 return 0;
73
74 a = value >> (n - 1);
75 if (a && a != -1)
76 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
77 return value & ((1 << n) - 1);
78 }
79
80 /*
81 * Register a new report for a device.
82 */
83
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)84 struct hid_report *hid_register_report(struct hid_device *device,
85 enum hid_report_type type, unsigned int id,
86 unsigned int application)
87 {
88 struct hid_report_enum *report_enum = device->report_enum + type;
89 struct hid_report *report;
90
91 if (id >= HID_MAX_IDS)
92 return NULL;
93 if (report_enum->report_id_hash[id])
94 return report_enum->report_id_hash[id];
95
96 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
97 if (!report)
98 return NULL;
99
100 if (id != 0)
101 report_enum->numbered = 1;
102
103 report->id = id;
104 report->type = type;
105 report->size = 0;
106 report->device = device;
107 report->application = application;
108 report_enum->report_id_hash[id] = report;
109
110 list_add_tail(&report->list, &report_enum->report_list);
111 INIT_LIST_HEAD(&report->field_entry_list);
112
113 return report;
114 }
115 EXPORT_SYMBOL_GPL(hid_register_report);
116
117 /*
118 * Register a new field for this report.
119 */
120
hid_register_field(struct hid_report * report,unsigned usages)121 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
122 {
123 struct hid_field *field;
124
125 if (report->maxfield == HID_MAX_FIELDS) {
126 hid_err(report->device, "too many fields in report\n");
127 return NULL;
128 }
129
130 field = kvzalloc((sizeof(struct hid_field) +
131 usages * sizeof(struct hid_usage) +
132 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
133 if (!field)
134 return NULL;
135
136 field->index = report->maxfield++;
137 report->field[field->index] = field;
138 field->usage = (struct hid_usage *)(field + 1);
139 field->value = (s32 *)(field->usage + usages);
140 field->new_value = (s32 *)(field->value + usages);
141 field->usages_priorities = (s32 *)(field->new_value + usages);
142 field->report = report;
143
144 return field;
145 }
146
147 /*
148 * Open a collection. The type/usage is pushed on the stack.
149 */
150
open_collection(struct hid_parser * parser,unsigned type)151 static int open_collection(struct hid_parser *parser, unsigned type)
152 {
153 struct hid_collection *collection;
154 unsigned usage;
155 int collection_index;
156
157 usage = parser->local.usage[0];
158
159 if (parser->collection_stack_ptr == parser->collection_stack_size) {
160 unsigned int *collection_stack;
161 unsigned int new_size = parser->collection_stack_size +
162 HID_COLLECTION_STACK_SIZE;
163
164 collection_stack = krealloc(parser->collection_stack,
165 new_size * sizeof(unsigned int),
166 GFP_KERNEL);
167 if (!collection_stack)
168 return -ENOMEM;
169
170 parser->collection_stack = collection_stack;
171 parser->collection_stack_size = new_size;
172 }
173
174 if (parser->device->maxcollection == parser->device->collection_size) {
175 collection = kmalloc(
176 array3_size(sizeof(struct hid_collection),
177 parser->device->collection_size,
178 2),
179 GFP_KERNEL);
180 if (collection == NULL) {
181 hid_err(parser->device, "failed to reallocate collection array\n");
182 return -ENOMEM;
183 }
184 memcpy(collection, parser->device->collection,
185 sizeof(struct hid_collection) *
186 parser->device->collection_size);
187 memset(collection + parser->device->collection_size, 0,
188 sizeof(struct hid_collection) *
189 parser->device->collection_size);
190 kfree(parser->device->collection);
191 parser->device->collection = collection;
192 parser->device->collection_size *= 2;
193 }
194
195 parser->collection_stack[parser->collection_stack_ptr++] =
196 parser->device->maxcollection;
197
198 collection_index = parser->device->maxcollection++;
199 collection = parser->device->collection + collection_index;
200 collection->type = type;
201 collection->usage = usage;
202 collection->level = parser->collection_stack_ptr - 1;
203 collection->parent_idx = (collection->level == 0) ? -1 :
204 parser->collection_stack[collection->level - 1];
205
206 if (type == HID_COLLECTION_APPLICATION)
207 parser->device->maxapplication++;
208
209 return 0;
210 }
211
212 /*
213 * Close a collection.
214 */
215
close_collection(struct hid_parser * parser)216 static int close_collection(struct hid_parser *parser)
217 {
218 if (!parser->collection_stack_ptr) {
219 hid_err(parser->device, "collection stack underflow\n");
220 return -EINVAL;
221 }
222 parser->collection_stack_ptr--;
223 return 0;
224 }
225
226 /*
227 * Climb up the stack, search for the specified collection type
228 * and return the usage.
229 */
230
hid_lookup_collection(struct hid_parser * parser,unsigned type)231 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
232 {
233 struct hid_collection *collection = parser->device->collection;
234 int n;
235
236 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
237 unsigned index = parser->collection_stack[n];
238 if (collection[index].type == type)
239 return collection[index].usage;
240 }
241 return 0; /* we know nothing about this usage type */
242 }
243
244 /*
245 * Concatenate usage which defines 16 bits or less with the
246 * currently defined usage page to form a 32 bit usage
247 */
248
complete_usage(struct hid_parser * parser,unsigned int index)249 static void complete_usage(struct hid_parser *parser, unsigned int index)
250 {
251 parser->local.usage[index] &= 0xFFFF;
252 parser->local.usage[index] |=
253 (parser->global.usage_page & 0xFFFF) << 16;
254 }
255
256 /*
257 * Add a usage to the temporary parser table.
258 */
259
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)260 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
261 {
262 if (parser->local.usage_index >= HID_MAX_USAGES) {
263 hid_err(parser->device, "usage index exceeded\n");
264 return -1;
265 }
266 parser->local.usage[parser->local.usage_index] = usage;
267
268 /*
269 * If Usage item only includes usage id, concatenate it with
270 * currently defined usage page
271 */
272 if (size <= 2)
273 complete_usage(parser, parser->local.usage_index);
274
275 parser->local.usage_size[parser->local.usage_index] = size;
276 parser->local.collection_index[parser->local.usage_index] =
277 parser->collection_stack_ptr ?
278 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
279 parser->local.usage_index++;
280 return 0;
281 }
282
283 /*
284 * Register a new field for this report.
285 */
286
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)287 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
288 {
289 struct hid_report *report;
290 struct hid_field *field;
291 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
292 unsigned int usages;
293 unsigned int offset;
294 unsigned int i;
295 unsigned int application;
296
297 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
298
299 report = hid_register_report(parser->device, report_type,
300 parser->global.report_id, application);
301 if (!report) {
302 hid_err(parser->device, "hid_register_report failed\n");
303 return -1;
304 }
305
306 /* Handle both signed and unsigned cases properly */
307 if ((parser->global.logical_minimum < 0 &&
308 parser->global.logical_maximum <
309 parser->global.logical_minimum) ||
310 (parser->global.logical_minimum >= 0 &&
311 (__u32)parser->global.logical_maximum <
312 (__u32)parser->global.logical_minimum)) {
313 dbg_hid("logical range invalid 0x%x 0x%x\n",
314 parser->global.logical_minimum,
315 parser->global.logical_maximum);
316 return -1;
317 }
318
319 offset = report->size;
320 report->size += parser->global.report_size * parser->global.report_count;
321
322 if (parser->device->ll_driver->max_buffer_size)
323 max_buffer_size = parser->device->ll_driver->max_buffer_size;
324
325 /* Total size check: Allow for possible report index byte */
326 if (report->size > (max_buffer_size - 1) << 3) {
327 hid_err(parser->device, "report is too long\n");
328 return -1;
329 }
330
331 if (!parser->local.usage_index) /* Ignore padding fields */
332 return 0;
333
334 usages = max_t(unsigned, parser->local.usage_index,
335 parser->global.report_count);
336
337 field = hid_register_field(report, usages);
338 if (!field)
339 return 0;
340
341 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
342 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
343 field->application = application;
344
345 for (i = 0; i < usages; i++) {
346 unsigned j = i;
347 /* Duplicate the last usage we parsed if we have excess values */
348 if (i >= parser->local.usage_index)
349 j = parser->local.usage_index - 1;
350 field->usage[i].hid = parser->local.usage[j];
351 field->usage[i].collection_index =
352 parser->local.collection_index[j];
353 field->usage[i].usage_index = i;
354 field->usage[i].resolution_multiplier = 1;
355 }
356
357 field->maxusage = usages;
358 field->flags = flags;
359 field->report_offset = offset;
360 field->report_type = report_type;
361 field->report_size = parser->global.report_size;
362 field->report_count = parser->global.report_count;
363 field->logical_minimum = parser->global.logical_minimum;
364 field->logical_maximum = parser->global.logical_maximum;
365 field->physical_minimum = parser->global.physical_minimum;
366 field->physical_maximum = parser->global.physical_maximum;
367 field->unit_exponent = parser->global.unit_exponent;
368 field->unit = parser->global.unit;
369
370 return 0;
371 }
372
373 /*
374 * Read data value from item.
375 */
376
item_udata(struct hid_item * item)377 static u32 item_udata(struct hid_item *item)
378 {
379 switch (item->size) {
380 case 1: return item->data.u8;
381 case 2: return item->data.u16;
382 case 4: return item->data.u32;
383 }
384 return 0;
385 }
386
item_sdata(struct hid_item * item)387 static s32 item_sdata(struct hid_item *item)
388 {
389 switch (item->size) {
390 case 1: return item->data.s8;
391 case 2: return item->data.s16;
392 case 4: return item->data.s32;
393 }
394 return 0;
395 }
396
397 /*
398 * Process a global item.
399 */
400
hid_parser_global(struct hid_parser * parser,struct hid_item * item)401 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
402 {
403 __s32 raw_value;
404 switch (item->tag) {
405 case HID_GLOBAL_ITEM_TAG_PUSH:
406
407 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
408 hid_err(parser->device, "global environment stack overflow\n");
409 return -1;
410 }
411
412 memcpy(parser->global_stack + parser->global_stack_ptr++,
413 &parser->global, sizeof(struct hid_global));
414 return 0;
415
416 case HID_GLOBAL_ITEM_TAG_POP:
417
418 if (!parser->global_stack_ptr) {
419 hid_err(parser->device, "global environment stack underflow\n");
420 return -1;
421 }
422
423 memcpy(&parser->global, parser->global_stack +
424 --parser->global_stack_ptr, sizeof(struct hid_global));
425 return 0;
426
427 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
428 parser->global.usage_page = item_udata(item);
429 return 0;
430
431 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
432 parser->global.logical_minimum = item_sdata(item);
433 return 0;
434
435 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
436 if (parser->global.logical_minimum < 0)
437 parser->global.logical_maximum = item_sdata(item);
438 else
439 parser->global.logical_maximum = item_udata(item);
440 return 0;
441
442 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
443 parser->global.physical_minimum = item_sdata(item);
444 return 0;
445
446 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
447 if (parser->global.physical_minimum < 0)
448 parser->global.physical_maximum = item_sdata(item);
449 else
450 parser->global.physical_maximum = item_udata(item);
451 return 0;
452
453 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
454 /* Many devices provide unit exponent as a two's complement
455 * nibble due to the common misunderstanding of HID
456 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
457 * both this and the standard encoding. */
458 raw_value = item_sdata(item);
459 if (!(raw_value & 0xfffffff0))
460 parser->global.unit_exponent = snto32(raw_value, 4);
461 else
462 parser->global.unit_exponent = raw_value;
463 return 0;
464
465 case HID_GLOBAL_ITEM_TAG_UNIT:
466 parser->global.unit = item_udata(item);
467 return 0;
468
469 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
470 parser->global.report_size = item_udata(item);
471 if (parser->global.report_size > 256) {
472 hid_err(parser->device, "invalid report_size %d\n",
473 parser->global.report_size);
474 return -1;
475 }
476 return 0;
477
478 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
479 parser->global.report_count = item_udata(item);
480 if (parser->global.report_count > HID_MAX_USAGES) {
481 hid_err(parser->device, "invalid report_count %d\n",
482 parser->global.report_count);
483 return -1;
484 }
485 return 0;
486
487 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
488 parser->global.report_id = item_udata(item);
489 if (parser->global.report_id == 0 ||
490 parser->global.report_id >= HID_MAX_IDS) {
491 hid_err(parser->device, "report_id %u is invalid\n",
492 parser->global.report_id);
493 return -1;
494 }
495 return 0;
496
497 default:
498 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
499 return -1;
500 }
501 }
502
503 /*
504 * Process a local item.
505 */
506
hid_parser_local(struct hid_parser * parser,struct hid_item * item)507 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
508 {
509 __u32 data;
510 unsigned n;
511 __u32 count;
512
513 data = item_udata(item);
514
515 switch (item->tag) {
516 case HID_LOCAL_ITEM_TAG_DELIMITER:
517
518 if (data) {
519 /*
520 * We treat items before the first delimiter
521 * as global to all usage sets (branch 0).
522 * In the moment we process only these global
523 * items and the first delimiter set.
524 */
525 if (parser->local.delimiter_depth != 0) {
526 hid_err(parser->device, "nested delimiters\n");
527 return -1;
528 }
529 parser->local.delimiter_depth++;
530 parser->local.delimiter_branch++;
531 } else {
532 if (parser->local.delimiter_depth < 1) {
533 hid_err(parser->device, "bogus close delimiter\n");
534 return -1;
535 }
536 parser->local.delimiter_depth--;
537 }
538 return 0;
539
540 case HID_LOCAL_ITEM_TAG_USAGE:
541
542 if (parser->local.delimiter_branch > 1) {
543 dbg_hid("alternative usage ignored\n");
544 return 0;
545 }
546
547 return hid_add_usage(parser, data, item->size);
548
549 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
550
551 if (parser->local.delimiter_branch > 1) {
552 dbg_hid("alternative usage ignored\n");
553 return 0;
554 }
555
556 parser->local.usage_minimum = data;
557 return 0;
558
559 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
560
561 if (parser->local.delimiter_branch > 1) {
562 dbg_hid("alternative usage ignored\n");
563 return 0;
564 }
565
566 count = data - parser->local.usage_minimum;
567 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
568 /*
569 * We do not warn if the name is not set, we are
570 * actually pre-scanning the device.
571 */
572 if (dev_name(&parser->device->dev))
573 hid_warn(parser->device,
574 "ignoring exceeding usage max\n");
575 data = HID_MAX_USAGES - parser->local.usage_index +
576 parser->local.usage_minimum - 1;
577 if (data <= 0) {
578 hid_err(parser->device,
579 "no more usage index available\n");
580 return -1;
581 }
582 }
583
584 for (n = parser->local.usage_minimum; n <= data; n++)
585 if (hid_add_usage(parser, n, item->size)) {
586 dbg_hid("hid_add_usage failed\n");
587 return -1;
588 }
589 return 0;
590
591 default:
592
593 dbg_hid("unknown local item tag 0x%x\n", item->tag);
594 return 0;
595 }
596 return 0;
597 }
598
599 /*
600 * Concatenate Usage Pages into Usages where relevant:
601 * As per specification, 6.2.2.8: "When the parser encounters a main item it
602 * concatenates the last declared Usage Page with a Usage to form a complete
603 * usage value."
604 */
605
hid_concatenate_last_usage_page(struct hid_parser * parser)606 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
607 {
608 int i;
609 unsigned int usage_page;
610 unsigned int current_page;
611
612 if (!parser->local.usage_index)
613 return;
614
615 usage_page = parser->global.usage_page;
616
617 /*
618 * Concatenate usage page again only if last declared Usage Page
619 * has not been already used in previous usages concatenation
620 */
621 for (i = parser->local.usage_index - 1; i >= 0; i--) {
622 if (parser->local.usage_size[i] > 2)
623 /* Ignore extended usages */
624 continue;
625
626 current_page = parser->local.usage[i] >> 16;
627 if (current_page == usage_page)
628 break;
629
630 complete_usage(parser, i);
631 }
632 }
633
634 /*
635 * Process a main item.
636 */
637
hid_parser_main(struct hid_parser * parser,struct hid_item * item)638 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
639 {
640 __u32 data;
641 int ret;
642
643 hid_concatenate_last_usage_page(parser);
644
645 data = item_udata(item);
646
647 switch (item->tag) {
648 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
649 ret = open_collection(parser, data & 0xff);
650 break;
651 case HID_MAIN_ITEM_TAG_END_COLLECTION:
652 ret = close_collection(parser);
653 break;
654 case HID_MAIN_ITEM_TAG_INPUT:
655 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
656 break;
657 case HID_MAIN_ITEM_TAG_OUTPUT:
658 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
659 break;
660 case HID_MAIN_ITEM_TAG_FEATURE:
661 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
662 break;
663 default:
664 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
665 ret = 0;
666 }
667
668 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
669
670 return ret;
671 }
672
673 /*
674 * Process a reserved item.
675 */
676
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)677 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
678 {
679 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
680 return 0;
681 }
682
683 /*
684 * Free a report and all registered fields. The field->usage and
685 * field->value table's are allocated behind the field, so we need
686 * only to free(field) itself.
687 */
688
hid_free_report(struct hid_report * report)689 static void hid_free_report(struct hid_report *report)
690 {
691 unsigned n;
692
693 kfree(report->field_entries);
694
695 for (n = 0; n < report->maxfield; n++)
696 kvfree(report->field[n]);
697 kfree(report);
698 }
699
700 /*
701 * Close report. This function returns the device
702 * state to the point prior to hid_open_report().
703 */
hid_close_report(struct hid_device * device)704 static void hid_close_report(struct hid_device *device)
705 {
706 unsigned i, j;
707
708 for (i = 0; i < HID_REPORT_TYPES; i++) {
709 struct hid_report_enum *report_enum = device->report_enum + i;
710
711 for (j = 0; j < HID_MAX_IDS; j++) {
712 struct hid_report *report = report_enum->report_id_hash[j];
713 if (report)
714 hid_free_report(report);
715 }
716 memset(report_enum, 0, sizeof(*report_enum));
717 INIT_LIST_HEAD(&report_enum->report_list);
718 }
719
720 kfree(device->rdesc);
721 device->rdesc = NULL;
722 device->rsize = 0;
723
724 kfree(device->collection);
725 device->collection = NULL;
726 device->collection_size = 0;
727 device->maxcollection = 0;
728 device->maxapplication = 0;
729
730 device->status &= ~HID_STAT_PARSED;
731 }
732
733 /*
734 * Free a device structure, all reports, and all fields.
735 */
736
hiddev_free(struct kref * ref)737 void hiddev_free(struct kref *ref)
738 {
739 struct hid_device *hid = container_of(ref, struct hid_device, ref);
740
741 hid_close_report(hid);
742 kfree(hid->dev_rdesc);
743 kfree(hid);
744 }
745
hid_device_release(struct device * dev)746 static void hid_device_release(struct device *dev)
747 {
748 struct hid_device *hid = to_hid_device(dev);
749
750 kref_put(&hid->ref, hiddev_free);
751 }
752
753 /*
754 * Fetch a report description item from the data stream. We support long
755 * items, though they are not used yet.
756 */
757
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)758 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
759 {
760 u8 b;
761
762 if ((end - start) <= 0)
763 return NULL;
764
765 b = *start++;
766
767 item->type = (b >> 2) & 3;
768 item->tag = (b >> 4) & 15;
769
770 if (item->tag == HID_ITEM_TAG_LONG) {
771
772 item->format = HID_ITEM_FORMAT_LONG;
773
774 if ((end - start) < 2)
775 return NULL;
776
777 item->size = *start++;
778 item->tag = *start++;
779
780 if ((end - start) < item->size)
781 return NULL;
782
783 item->data.longdata = start;
784 start += item->size;
785 return start;
786 }
787
788 item->format = HID_ITEM_FORMAT_SHORT;
789 item->size = b & 3;
790
791 switch (item->size) {
792 case 0:
793 return start;
794
795 case 1:
796 if ((end - start) < 1)
797 return NULL;
798 item->data.u8 = *start++;
799 return start;
800
801 case 2:
802 if ((end - start) < 2)
803 return NULL;
804 item->data.u16 = get_unaligned_le16(start);
805 start = (__u8 *)((__le16 *)start + 1);
806 return start;
807
808 case 3:
809 item->size++;
810 if ((end - start) < 4)
811 return NULL;
812 item->data.u32 = get_unaligned_le32(start);
813 start = (__u8 *)((__le32 *)start + 1);
814 return start;
815 }
816
817 return NULL;
818 }
819
hid_scan_input_usage(struct hid_parser * parser,u32 usage)820 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
821 {
822 struct hid_device *hid = parser->device;
823
824 if (usage == HID_DG_CONTACTID)
825 hid->group = HID_GROUP_MULTITOUCH;
826 }
827
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)828 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
829 {
830 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
831 parser->global.report_size == 8)
832 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
833
834 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
835 parser->global.report_size == 8)
836 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
837 }
838
hid_scan_collection(struct hid_parser * parser,unsigned type)839 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
840 {
841 struct hid_device *hid = parser->device;
842 int i;
843
844 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
845 (type == HID_COLLECTION_PHYSICAL ||
846 type == HID_COLLECTION_APPLICATION))
847 hid->group = HID_GROUP_SENSOR_HUB;
848
849 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
850 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
851 hid->group == HID_GROUP_MULTITOUCH)
852 hid->group = HID_GROUP_GENERIC;
853
854 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
855 for (i = 0; i < parser->local.usage_index; i++)
856 if (parser->local.usage[i] == HID_GD_POINTER)
857 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
858
859 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
860 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
861
862 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
863 for (i = 0; i < parser->local.usage_index; i++)
864 if (parser->local.usage[i] ==
865 (HID_UP_GOOGLEVENDOR | 0x0001))
866 parser->device->group =
867 HID_GROUP_VIVALDI;
868 }
869
hid_scan_main(struct hid_parser * parser,struct hid_item * item)870 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
871 {
872 __u32 data;
873 int i;
874
875 hid_concatenate_last_usage_page(parser);
876
877 data = item_udata(item);
878
879 switch (item->tag) {
880 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
881 hid_scan_collection(parser, data & 0xff);
882 break;
883 case HID_MAIN_ITEM_TAG_END_COLLECTION:
884 break;
885 case HID_MAIN_ITEM_TAG_INPUT:
886 /* ignore constant inputs, they will be ignored by hid-input */
887 if (data & HID_MAIN_ITEM_CONSTANT)
888 break;
889 for (i = 0; i < parser->local.usage_index; i++)
890 hid_scan_input_usage(parser, parser->local.usage[i]);
891 break;
892 case HID_MAIN_ITEM_TAG_OUTPUT:
893 break;
894 case HID_MAIN_ITEM_TAG_FEATURE:
895 for (i = 0; i < parser->local.usage_index; i++)
896 hid_scan_feature_usage(parser, parser->local.usage[i]);
897 break;
898 }
899
900 /* Reset the local parser environment */
901 memset(&parser->local, 0, sizeof(parser->local));
902
903 return 0;
904 }
905
906 /*
907 * Scan a report descriptor before the device is added to the bus.
908 * Sets device groups and other properties that determine what driver
909 * to load.
910 */
hid_scan_report(struct hid_device * hid)911 static int hid_scan_report(struct hid_device *hid)
912 {
913 struct hid_parser *parser;
914 struct hid_item item;
915 const __u8 *start = hid->dev_rdesc;
916 const __u8 *end = start + hid->dev_rsize;
917 static int (*dispatch_type[])(struct hid_parser *parser,
918 struct hid_item *item) = {
919 hid_scan_main,
920 hid_parser_global,
921 hid_parser_local,
922 hid_parser_reserved
923 };
924
925 parser = vzalloc(sizeof(struct hid_parser));
926 if (!parser)
927 return -ENOMEM;
928
929 parser->device = hid;
930 hid->group = HID_GROUP_GENERIC;
931
932 /*
933 * The parsing is simpler than the one in hid_open_report() as we should
934 * be robust against hid errors. Those errors will be raised by
935 * hid_open_report() anyway.
936 */
937 while ((start = fetch_item(start, end, &item)) != NULL)
938 dispatch_type[item.type](parser, &item);
939
940 /*
941 * Handle special flags set during scanning.
942 */
943 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
944 (hid->group == HID_GROUP_MULTITOUCH))
945 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
946
947 /*
948 * Vendor specific handlings
949 */
950 switch (hid->vendor) {
951 case USB_VENDOR_ID_WACOM:
952 hid->group = HID_GROUP_WACOM;
953 break;
954 case USB_VENDOR_ID_SYNAPTICS:
955 if (hid->group == HID_GROUP_GENERIC)
956 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
957 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
958 /*
959 * hid-rmi should take care of them,
960 * not hid-generic
961 */
962 hid->group = HID_GROUP_RMI;
963 break;
964 }
965
966 kfree(parser->collection_stack);
967 vfree(parser);
968 return 0;
969 }
970
971 /**
972 * hid_parse_report - parse device report
973 *
974 * @hid: hid device
975 * @start: report start
976 * @size: report size
977 *
978 * Allocate the device report as read by the bus driver. This function should
979 * only be called from parse() in ll drivers.
980 */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)981 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
982 {
983 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
984 if (!hid->dev_rdesc)
985 return -ENOMEM;
986 hid->dev_rsize = size;
987 return 0;
988 }
989 EXPORT_SYMBOL_GPL(hid_parse_report);
990
991 static const char * const hid_report_names[] = {
992 "HID_INPUT_REPORT",
993 "HID_OUTPUT_REPORT",
994 "HID_FEATURE_REPORT",
995 };
996 /**
997 * hid_validate_values - validate existing device report's value indexes
998 *
999 * @hid: hid device
1000 * @type: which report type to examine
1001 * @id: which report ID to examine (0 for first)
1002 * @field_index: which report field to examine
1003 * @report_counts: expected number of values
1004 *
1005 * Validate the number of values in a given field of a given report, after
1006 * parsing.
1007 */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1008 struct hid_report *hid_validate_values(struct hid_device *hid,
1009 enum hid_report_type type, unsigned int id,
1010 unsigned int field_index,
1011 unsigned int report_counts)
1012 {
1013 struct hid_report *report;
1014
1015 if (type > HID_FEATURE_REPORT) {
1016 hid_err(hid, "invalid HID report type %u\n", type);
1017 return NULL;
1018 }
1019
1020 if (id >= HID_MAX_IDS) {
1021 hid_err(hid, "invalid HID report id %u\n", id);
1022 return NULL;
1023 }
1024
1025 /*
1026 * Explicitly not using hid_get_report() here since it depends on
1027 * ->numbered being checked, which may not always be the case when
1028 * drivers go to access report values.
1029 */
1030 if (id == 0) {
1031 /*
1032 * Validating on id 0 means we should examine the first
1033 * report in the list.
1034 */
1035 report = list_first_entry_or_null(
1036 &hid->report_enum[type].report_list,
1037 struct hid_report, list);
1038 } else {
1039 report = hid->report_enum[type].report_id_hash[id];
1040 }
1041 if (!report) {
1042 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1043 return NULL;
1044 }
1045 if (report->maxfield <= field_index) {
1046 hid_err(hid, "not enough fields in %s %u\n",
1047 hid_report_names[type], id);
1048 return NULL;
1049 }
1050 if (report->field[field_index]->report_count < report_counts) {
1051 hid_err(hid, "not enough values in %s %u field %u\n",
1052 hid_report_names[type], id, field_index);
1053 return NULL;
1054 }
1055 return report;
1056 }
1057 EXPORT_SYMBOL_GPL(hid_validate_values);
1058
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1059 static int hid_calculate_multiplier(struct hid_device *hid,
1060 struct hid_field *multiplier)
1061 {
1062 int m;
1063 __s32 v = *multiplier->value;
1064 __s32 lmin = multiplier->logical_minimum;
1065 __s32 lmax = multiplier->logical_maximum;
1066 __s32 pmin = multiplier->physical_minimum;
1067 __s32 pmax = multiplier->physical_maximum;
1068
1069 /*
1070 * "Because OS implementations will generally divide the control's
1071 * reported count by the Effective Resolution Multiplier, designers
1072 * should take care not to establish a potential Effective
1073 * Resolution Multiplier of zero."
1074 * HID Usage Table, v1.12, Section 4.3.1, p31
1075 */
1076 if (lmax - lmin == 0)
1077 return 1;
1078 /*
1079 * Handling the unit exponent is left as an exercise to whoever
1080 * finds a device where that exponent is not 0.
1081 */
1082 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1083 if (unlikely(multiplier->unit_exponent != 0)) {
1084 hid_warn(hid,
1085 "unsupported Resolution Multiplier unit exponent %d\n",
1086 multiplier->unit_exponent);
1087 }
1088
1089 /* There are no devices with an effective multiplier > 255 */
1090 if (unlikely(m == 0 || m > 255 || m < -255)) {
1091 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1092 m = 1;
1093 }
1094
1095 return m;
1096 }
1097
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1098 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1099 struct hid_field *field,
1100 struct hid_collection *multiplier_collection,
1101 int effective_multiplier)
1102 {
1103 struct hid_collection *collection;
1104 struct hid_usage *usage;
1105 int i;
1106
1107 /*
1108 * If multiplier_collection is NULL, the multiplier applies
1109 * to all fields in the report.
1110 * Otherwise, it is the Logical Collection the multiplier applies to
1111 * but our field may be in a subcollection of that collection.
1112 */
1113 for (i = 0; i < field->maxusage; i++) {
1114 usage = &field->usage[i];
1115
1116 collection = &hid->collection[usage->collection_index];
1117 while (collection->parent_idx != -1 &&
1118 collection != multiplier_collection)
1119 collection = &hid->collection[collection->parent_idx];
1120
1121 if (collection->parent_idx != -1 ||
1122 multiplier_collection == NULL)
1123 usage->resolution_multiplier = effective_multiplier;
1124
1125 }
1126 }
1127
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1128 static void hid_apply_multiplier(struct hid_device *hid,
1129 struct hid_field *multiplier)
1130 {
1131 struct hid_report_enum *rep_enum;
1132 struct hid_report *rep;
1133 struct hid_field *field;
1134 struct hid_collection *multiplier_collection;
1135 int effective_multiplier;
1136 int i;
1137
1138 /*
1139 * "The Resolution Multiplier control must be contained in the same
1140 * Logical Collection as the control(s) to which it is to be applied.
1141 * If no Resolution Multiplier is defined, then the Resolution
1142 * Multiplier defaults to 1. If more than one control exists in a
1143 * Logical Collection, the Resolution Multiplier is associated with
1144 * all controls in the collection. If no Logical Collection is
1145 * defined, the Resolution Multiplier is associated with all
1146 * controls in the report."
1147 * HID Usage Table, v1.12, Section 4.3.1, p30
1148 *
1149 * Thus, search from the current collection upwards until we find a
1150 * logical collection. Then search all fields for that same parent
1151 * collection. Those are the fields the multiplier applies to.
1152 *
1153 * If we have more than one multiplier, it will overwrite the
1154 * applicable fields later.
1155 */
1156 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1157 while (multiplier_collection->parent_idx != -1 &&
1158 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1159 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1160 if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1161 multiplier_collection = NULL;
1162
1163 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1164
1165 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1166 list_for_each_entry(rep, &rep_enum->report_list, list) {
1167 for (i = 0; i < rep->maxfield; i++) {
1168 field = rep->field[i];
1169 hid_apply_multiplier_to_field(hid, field,
1170 multiplier_collection,
1171 effective_multiplier);
1172 }
1173 }
1174 }
1175
1176 /*
1177 * hid_setup_resolution_multiplier - set up all resolution multipliers
1178 *
1179 * @device: hid device
1180 *
1181 * Search for all Resolution Multiplier Feature Reports and apply their
1182 * value to all matching Input items. This only updates the internal struct
1183 * fields.
1184 *
1185 * The Resolution Multiplier is applied by the hardware. If the multiplier
1186 * is anything other than 1, the hardware will send pre-multiplied events
1187 * so that the same physical interaction generates an accumulated
1188 * accumulated_value = value * * multiplier
1189 * This may be achieved by sending
1190 * - "value * multiplier" for each event, or
1191 * - "value" but "multiplier" times as frequently, or
1192 * - a combination of the above
1193 * The only guarantee is that the same physical interaction always generates
1194 * an accumulated 'value * multiplier'.
1195 *
1196 * This function must be called before any event processing and after
1197 * any SetRequest to the Resolution Multiplier.
1198 */
hid_setup_resolution_multiplier(struct hid_device * hid)1199 void hid_setup_resolution_multiplier(struct hid_device *hid)
1200 {
1201 struct hid_report_enum *rep_enum;
1202 struct hid_report *rep;
1203 struct hid_usage *usage;
1204 int i, j;
1205
1206 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1207 list_for_each_entry(rep, &rep_enum->report_list, list) {
1208 for (i = 0; i < rep->maxfield; i++) {
1209 /* Ignore if report count is out of bounds. */
1210 if (rep->field[i]->report_count < 1)
1211 continue;
1212
1213 for (j = 0; j < rep->field[i]->maxusage; j++) {
1214 usage = &rep->field[i]->usage[j];
1215 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1216 hid_apply_multiplier(hid,
1217 rep->field[i]);
1218 }
1219 }
1220 }
1221 }
1222 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1223
1224 /**
1225 * hid_open_report - open a driver-specific device report
1226 *
1227 * @device: hid device
1228 *
1229 * Parse a report description into a hid_device structure. Reports are
1230 * enumerated, fields are attached to these reports.
1231 * 0 returned on success, otherwise nonzero error value.
1232 *
1233 * This function (or the equivalent hid_parse() macro) should only be
1234 * called from probe() in drivers, before starting the device.
1235 */
hid_open_report(struct hid_device * device)1236 int hid_open_report(struct hid_device *device)
1237 {
1238 struct hid_parser *parser;
1239 struct hid_item item;
1240 unsigned int size;
1241 const __u8 *start;
1242 __u8 *buf;
1243 const __u8 *end;
1244 const __u8 *next;
1245 int ret;
1246 int i;
1247 static int (*dispatch_type[])(struct hid_parser *parser,
1248 struct hid_item *item) = {
1249 hid_parser_main,
1250 hid_parser_global,
1251 hid_parser_local,
1252 hid_parser_reserved
1253 };
1254
1255 if (WARN_ON(device->status & HID_STAT_PARSED))
1256 return -EBUSY;
1257
1258 start = device->dev_rdesc;
1259 if (WARN_ON(!start))
1260 return -ENODEV;
1261 size = device->dev_rsize;
1262
1263 /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */
1264 buf = call_hid_bpf_rdesc_fixup(device, start, &size);
1265 if (buf == NULL)
1266 return -ENOMEM;
1267
1268 if (device->driver->report_fixup)
1269 start = device->driver->report_fixup(device, buf, &size);
1270 else
1271 start = buf;
1272
1273 start = kmemdup(start, size, GFP_KERNEL);
1274 kfree(buf);
1275 if (start == NULL)
1276 return -ENOMEM;
1277
1278 device->rdesc = start;
1279 device->rsize = size;
1280
1281 parser = vzalloc(sizeof(struct hid_parser));
1282 if (!parser) {
1283 ret = -ENOMEM;
1284 goto alloc_err;
1285 }
1286
1287 parser->device = device;
1288
1289 end = start + size;
1290
1291 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1292 sizeof(struct hid_collection), GFP_KERNEL);
1293 if (!device->collection) {
1294 ret = -ENOMEM;
1295 goto err;
1296 }
1297 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1298 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1299 device->collection[i].parent_idx = -1;
1300
1301 ret = -EINVAL;
1302 while ((next = fetch_item(start, end, &item)) != NULL) {
1303 start = next;
1304
1305 if (item.format != HID_ITEM_FORMAT_SHORT) {
1306 hid_err(device, "unexpected long global item\n");
1307 goto err;
1308 }
1309
1310 if (dispatch_type[item.type](parser, &item)) {
1311 hid_err(device, "item %u %u %u %u parsing failed\n",
1312 item.format, (unsigned)item.size,
1313 (unsigned)item.type, (unsigned)item.tag);
1314 goto err;
1315 }
1316
1317 if (start == end) {
1318 if (parser->collection_stack_ptr) {
1319 hid_err(device, "unbalanced collection at end of report description\n");
1320 goto err;
1321 }
1322 if (parser->local.delimiter_depth) {
1323 hid_err(device, "unbalanced delimiter at end of report description\n");
1324 goto err;
1325 }
1326
1327 /*
1328 * fetch initial values in case the device's
1329 * default multiplier isn't the recommended 1
1330 */
1331 hid_setup_resolution_multiplier(device);
1332
1333 kfree(parser->collection_stack);
1334 vfree(parser);
1335 device->status |= HID_STAT_PARSED;
1336
1337 return 0;
1338 }
1339 }
1340
1341 hid_err(device, "item fetching failed at offset %u/%u\n",
1342 size - (unsigned int)(end - start), size);
1343 err:
1344 kfree(parser->collection_stack);
1345 alloc_err:
1346 vfree(parser);
1347 hid_close_report(device);
1348 return ret;
1349 }
1350 EXPORT_SYMBOL_GPL(hid_open_report);
1351
1352 /*
1353 * Extract/implement a data field from/to a little endian report (bit array).
1354 *
1355 * Code sort-of follows HID spec:
1356 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1357 *
1358 * While the USB HID spec allows unlimited length bit fields in "report
1359 * descriptors", most devices never use more than 16 bits.
1360 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1361 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1362 */
1363
__extract(u8 * report,unsigned offset,int n)1364 static u32 __extract(u8 *report, unsigned offset, int n)
1365 {
1366 unsigned int idx = offset / 8;
1367 unsigned int bit_nr = 0;
1368 unsigned int bit_shift = offset % 8;
1369 int bits_to_copy = 8 - bit_shift;
1370 u32 value = 0;
1371 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1372
1373 while (n > 0) {
1374 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1375 n -= bits_to_copy;
1376 bit_nr += bits_to_copy;
1377 bits_to_copy = 8;
1378 bit_shift = 0;
1379 idx++;
1380 }
1381
1382 return value & mask;
1383 }
1384
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1385 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1386 unsigned offset, unsigned n)
1387 {
1388 if (n > 32) {
1389 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1390 __func__, n, current->comm);
1391 n = 32;
1392 }
1393
1394 return __extract(report, offset, n);
1395 }
1396 EXPORT_SYMBOL_GPL(hid_field_extract);
1397
1398 /*
1399 * "implement" : set bits in a little endian bit stream.
1400 * Same concepts as "extract" (see comments above).
1401 * The data mangled in the bit stream remains in little endian
1402 * order the whole time. It make more sense to talk about
1403 * endianness of register values by considering a register
1404 * a "cached" copy of the little endian bit stream.
1405 */
1406
__implement(u8 * report,unsigned offset,int n,u32 value)1407 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1408 {
1409 unsigned int idx = offset / 8;
1410 unsigned int bit_shift = offset % 8;
1411 int bits_to_set = 8 - bit_shift;
1412
1413 while (n - bits_to_set >= 0) {
1414 report[idx] &= ~(0xff << bit_shift);
1415 report[idx] |= value << bit_shift;
1416 value >>= bits_to_set;
1417 n -= bits_to_set;
1418 bits_to_set = 8;
1419 bit_shift = 0;
1420 idx++;
1421 }
1422
1423 /* last nibble */
1424 if (n) {
1425 u8 bit_mask = ((1U << n) - 1);
1426 report[idx] &= ~(bit_mask << bit_shift);
1427 report[idx] |= value << bit_shift;
1428 }
1429 }
1430
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1431 static void implement(const struct hid_device *hid, u8 *report,
1432 unsigned offset, unsigned n, u32 value)
1433 {
1434 if (unlikely(n > 32)) {
1435 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1436 __func__, n, current->comm);
1437 n = 32;
1438 } else if (n < 32) {
1439 u32 m = (1U << n) - 1;
1440
1441 if (unlikely(value > m)) {
1442 hid_warn(hid,
1443 "%s() called with too large value %d (n: %d)! (%s)\n",
1444 __func__, value, n, current->comm);
1445 value &= m;
1446 }
1447 }
1448
1449 __implement(report, offset, n, value);
1450 }
1451
1452 /*
1453 * Search an array for a value.
1454 */
1455
search(__s32 * array,__s32 value,unsigned n)1456 static int search(__s32 *array, __s32 value, unsigned n)
1457 {
1458 while (n--) {
1459 if (*array++ == value)
1460 return 0;
1461 }
1462 return -1;
1463 }
1464
1465 /**
1466 * hid_match_report - check if driver's raw_event should be called
1467 *
1468 * @hid: hid device
1469 * @report: hid report to match against
1470 *
1471 * compare hid->driver->report_table->report_type to report->type
1472 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1473 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1474 {
1475 const struct hid_report_id *id = hid->driver->report_table;
1476
1477 if (!id) /* NULL means all */
1478 return 1;
1479
1480 for (; id->report_type != HID_TERMINATOR; id++)
1481 if (id->report_type == HID_ANY_ID ||
1482 id->report_type == report->type)
1483 return 1;
1484 return 0;
1485 }
1486
1487 /**
1488 * hid_match_usage - check if driver's event should be called
1489 *
1490 * @hid: hid device
1491 * @usage: usage to match against
1492 *
1493 * compare hid->driver->usage_table->usage_{type,code} to
1494 * usage->usage_{type,code}
1495 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1496 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1497 {
1498 const struct hid_usage_id *id = hid->driver->usage_table;
1499
1500 if (!id) /* NULL means all */
1501 return 1;
1502
1503 for (; id->usage_type != HID_ANY_ID - 1; id++)
1504 if ((id->usage_hid == HID_ANY_ID ||
1505 id->usage_hid == usage->hid) &&
1506 (id->usage_type == HID_ANY_ID ||
1507 id->usage_type == usage->type) &&
1508 (id->usage_code == HID_ANY_ID ||
1509 id->usage_code == usage->code))
1510 return 1;
1511 return 0;
1512 }
1513
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1514 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1515 struct hid_usage *usage, __s32 value, int interrupt)
1516 {
1517 struct hid_driver *hdrv = hid->driver;
1518 int ret;
1519
1520 if (!list_empty(&hid->debug_list))
1521 hid_dump_input(hid, usage, value);
1522
1523 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1524 ret = hdrv->event(hid, field, usage, value);
1525 if (ret != 0) {
1526 if (ret < 0)
1527 hid_err(hid, "%s's event failed with %d\n",
1528 hdrv->name, ret);
1529 return;
1530 }
1531 }
1532
1533 if (hid->claimed & HID_CLAIMED_INPUT)
1534 hidinput_hid_event(hid, field, usage, value);
1535 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1536 hid->hiddev_hid_event(hid, field, usage, value);
1537 }
1538
1539 /*
1540 * Checks if the given value is valid within this field
1541 */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1542 static inline int hid_array_value_is_valid(struct hid_field *field,
1543 __s32 value)
1544 {
1545 __s32 min = field->logical_minimum;
1546
1547 /*
1548 * Value needs to be between logical min and max, and
1549 * (value - min) is used as an index in the usage array.
1550 * This array is of size field->maxusage
1551 */
1552 return value >= min &&
1553 value <= field->logical_maximum &&
1554 value - min < field->maxusage;
1555 }
1556
1557 /*
1558 * Fetch the field from the data. The field content is stored for next
1559 * report processing (we do differential reporting to the layer).
1560 */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1561 static void hid_input_fetch_field(struct hid_device *hid,
1562 struct hid_field *field,
1563 __u8 *data)
1564 {
1565 unsigned n;
1566 unsigned count = field->report_count;
1567 unsigned offset = field->report_offset;
1568 unsigned size = field->report_size;
1569 __s32 min = field->logical_minimum;
1570 __s32 *value;
1571
1572 value = field->new_value;
1573 memset(value, 0, count * sizeof(__s32));
1574 field->ignored = false;
1575
1576 for (n = 0; n < count; n++) {
1577
1578 value[n] = min < 0 ?
1579 snto32(hid_field_extract(hid, data, offset + n * size,
1580 size), size) :
1581 hid_field_extract(hid, data, offset + n * size, size);
1582
1583 /* Ignore report if ErrorRollOver */
1584 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1585 hid_array_value_is_valid(field, value[n]) &&
1586 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1587 field->ignored = true;
1588 return;
1589 }
1590 }
1591 }
1592
1593 /*
1594 * Process a received variable field.
1595 */
1596
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1597 static void hid_input_var_field(struct hid_device *hid,
1598 struct hid_field *field,
1599 int interrupt)
1600 {
1601 unsigned int count = field->report_count;
1602 __s32 *value = field->new_value;
1603 unsigned int n;
1604
1605 for (n = 0; n < count; n++)
1606 hid_process_event(hid,
1607 field,
1608 &field->usage[n],
1609 value[n],
1610 interrupt);
1611
1612 memcpy(field->value, value, count * sizeof(__s32));
1613 }
1614
1615 /*
1616 * Process a received array field. The field content is stored for
1617 * next report processing (we do differential reporting to the layer).
1618 */
1619
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1620 static void hid_input_array_field(struct hid_device *hid,
1621 struct hid_field *field,
1622 int interrupt)
1623 {
1624 unsigned int n;
1625 unsigned int count = field->report_count;
1626 __s32 min = field->logical_minimum;
1627 __s32 *value;
1628
1629 value = field->new_value;
1630
1631 /* ErrorRollOver */
1632 if (field->ignored)
1633 return;
1634
1635 for (n = 0; n < count; n++) {
1636 if (hid_array_value_is_valid(field, field->value[n]) &&
1637 search(value, field->value[n], count))
1638 hid_process_event(hid,
1639 field,
1640 &field->usage[field->value[n] - min],
1641 0,
1642 interrupt);
1643
1644 if (hid_array_value_is_valid(field, value[n]) &&
1645 search(field->value, value[n], count))
1646 hid_process_event(hid,
1647 field,
1648 &field->usage[value[n] - min],
1649 1,
1650 interrupt);
1651 }
1652
1653 memcpy(field->value, value, count * sizeof(__s32));
1654 }
1655
1656 /*
1657 * Analyse a received report, and fetch the data from it. The field
1658 * content is stored for next report processing (we do differential
1659 * reporting to the layer).
1660 */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1661 static void hid_process_report(struct hid_device *hid,
1662 struct hid_report *report,
1663 __u8 *data,
1664 int interrupt)
1665 {
1666 unsigned int a;
1667 struct hid_field_entry *entry;
1668 struct hid_field *field;
1669
1670 /* first retrieve all incoming values in data */
1671 for (a = 0; a < report->maxfield; a++)
1672 hid_input_fetch_field(hid, report->field[a], data);
1673
1674 if (!list_empty(&report->field_entry_list)) {
1675 /* INPUT_REPORT, we have a priority list of fields */
1676 list_for_each_entry(entry,
1677 &report->field_entry_list,
1678 list) {
1679 field = entry->field;
1680
1681 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1682 hid_process_event(hid,
1683 field,
1684 &field->usage[entry->index],
1685 field->new_value[entry->index],
1686 interrupt);
1687 else
1688 hid_input_array_field(hid, field, interrupt);
1689 }
1690
1691 /* we need to do the memcpy at the end for var items */
1692 for (a = 0; a < report->maxfield; a++) {
1693 field = report->field[a];
1694
1695 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1696 memcpy(field->value, field->new_value,
1697 field->report_count * sizeof(__s32));
1698 }
1699 } else {
1700 /* FEATURE_REPORT, regular processing */
1701 for (a = 0; a < report->maxfield; a++) {
1702 field = report->field[a];
1703
1704 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1705 hid_input_var_field(hid, field, interrupt);
1706 else
1707 hid_input_array_field(hid, field, interrupt);
1708 }
1709 }
1710 }
1711
1712 /*
1713 * Insert a given usage_index in a field in the list
1714 * of processed usages in the report.
1715 *
1716 * The elements of lower priority score are processed
1717 * first.
1718 */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1719 static void __hid_insert_field_entry(struct hid_device *hid,
1720 struct hid_report *report,
1721 struct hid_field_entry *entry,
1722 struct hid_field *field,
1723 unsigned int usage_index)
1724 {
1725 struct hid_field_entry *next;
1726
1727 entry->field = field;
1728 entry->index = usage_index;
1729 entry->priority = field->usages_priorities[usage_index];
1730
1731 /* insert the element at the correct position */
1732 list_for_each_entry(next,
1733 &report->field_entry_list,
1734 list) {
1735 /*
1736 * the priority of our element is strictly higher
1737 * than the next one, insert it before
1738 */
1739 if (entry->priority > next->priority) {
1740 list_add_tail(&entry->list, &next->list);
1741 return;
1742 }
1743 }
1744
1745 /* lowest priority score: insert at the end */
1746 list_add_tail(&entry->list, &report->field_entry_list);
1747 }
1748
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1749 static void hid_report_process_ordering(struct hid_device *hid,
1750 struct hid_report *report)
1751 {
1752 struct hid_field *field;
1753 struct hid_field_entry *entries;
1754 unsigned int a, u, usages;
1755 unsigned int count = 0;
1756
1757 /* count the number of individual fields in the report */
1758 for (a = 0; a < report->maxfield; a++) {
1759 field = report->field[a];
1760
1761 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1762 count += field->report_count;
1763 else
1764 count++;
1765 }
1766
1767 /* allocate the memory to process the fields */
1768 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1769 if (!entries)
1770 return;
1771
1772 report->field_entries = entries;
1773
1774 /*
1775 * walk through all fields in the report and
1776 * store them by priority order in report->field_entry_list
1777 *
1778 * - Var elements are individualized (field + usage_index)
1779 * - Arrays are taken as one, we can not chose an order for them
1780 */
1781 usages = 0;
1782 for (a = 0; a < report->maxfield; a++) {
1783 field = report->field[a];
1784
1785 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1786 for (u = 0; u < field->report_count; u++) {
1787 __hid_insert_field_entry(hid, report,
1788 &entries[usages],
1789 field, u);
1790 usages++;
1791 }
1792 } else {
1793 __hid_insert_field_entry(hid, report, &entries[usages],
1794 field, 0);
1795 usages++;
1796 }
1797 }
1798 }
1799
hid_process_ordering(struct hid_device * hid)1800 static void hid_process_ordering(struct hid_device *hid)
1801 {
1802 struct hid_report *report;
1803 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1804
1805 list_for_each_entry(report, &report_enum->report_list, list)
1806 hid_report_process_ordering(hid, report);
1807 }
1808
1809 /*
1810 * Output the field into the report.
1811 */
1812
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1813 static void hid_output_field(const struct hid_device *hid,
1814 struct hid_field *field, __u8 *data)
1815 {
1816 unsigned count = field->report_count;
1817 unsigned offset = field->report_offset;
1818 unsigned size = field->report_size;
1819 unsigned n;
1820
1821 for (n = 0; n < count; n++) {
1822 if (field->logical_minimum < 0) /* signed values */
1823 implement(hid, data, offset + n * size, size,
1824 s32ton(field->value[n], size));
1825 else /* unsigned values */
1826 implement(hid, data, offset + n * size, size,
1827 field->value[n]);
1828 }
1829 }
1830
1831 /*
1832 * Compute the size of a report.
1833 */
hid_compute_report_size(struct hid_report * report)1834 static size_t hid_compute_report_size(struct hid_report *report)
1835 {
1836 if (report->size)
1837 return ((report->size - 1) >> 3) + 1;
1838
1839 return 0;
1840 }
1841
1842 /*
1843 * Create a report. 'data' has to be allocated using
1844 * hid_alloc_report_buf() so that it has proper size.
1845 */
1846
hid_output_report(struct hid_report * report,__u8 * data)1847 void hid_output_report(struct hid_report *report, __u8 *data)
1848 {
1849 unsigned n;
1850
1851 if (report->id > 0)
1852 *data++ = report->id;
1853
1854 memset(data, 0, hid_compute_report_size(report));
1855 for (n = 0; n < report->maxfield; n++)
1856 hid_output_field(report->device, report->field[n], data);
1857 }
1858 EXPORT_SYMBOL_GPL(hid_output_report);
1859
1860 /*
1861 * Allocator for buffer that is going to be passed to hid_output_report()
1862 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1863 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1864 {
1865 /*
1866 * 7 extra bytes are necessary to achieve proper functionality
1867 * of implement() working on 8 byte chunks
1868 * 1 extra byte for the report ID if it is null (not used) so
1869 * we can reserve that extra byte in the first position of the buffer
1870 * when sending it to .raw_request()
1871 */
1872
1873 u32 len = hid_report_len(report) + 7 + (report->id == 0);
1874
1875 return kzalloc(len, flags);
1876 }
1877 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1878
1879 /*
1880 * Set a field value. The report this field belongs to has to be
1881 * created and transferred to the device, to set this value in the
1882 * device.
1883 */
1884
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1885 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1886 {
1887 unsigned size;
1888
1889 if (!field)
1890 return -1;
1891
1892 size = field->report_size;
1893
1894 hid_dump_input(field->report->device, field->usage + offset, value);
1895
1896 if (offset >= field->report_count) {
1897 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1898 offset, field->report_count);
1899 return -1;
1900 }
1901 if (field->logical_minimum < 0) {
1902 if (value != snto32(s32ton(value, size), size)) {
1903 hid_err(field->report->device, "value %d is out of range\n", value);
1904 return -1;
1905 }
1906 }
1907 field->value[offset] = value;
1908 return 0;
1909 }
1910 EXPORT_SYMBOL_GPL(hid_set_field);
1911
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1912 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1913 unsigned int application, unsigned int usage)
1914 {
1915 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1916 struct hid_report *report;
1917 int i, j;
1918
1919 list_for_each_entry(report, report_list, list) {
1920 if (report->application != application)
1921 continue;
1922
1923 for (i = 0; i < report->maxfield; i++) {
1924 struct hid_field *field = report->field[i];
1925
1926 for (j = 0; j < field->maxusage; j++) {
1927 if (field->usage[j].hid == usage)
1928 return field;
1929 }
1930 }
1931 }
1932
1933 return NULL;
1934 }
1935 EXPORT_SYMBOL_GPL(hid_find_field);
1936
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1937 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1938 const u8 *data)
1939 {
1940 struct hid_report *report;
1941 unsigned int n = 0; /* Normally report number is 0 */
1942
1943 /* Device uses numbered reports, data[0] is report number */
1944 if (report_enum->numbered)
1945 n = *data;
1946
1947 report = report_enum->report_id_hash[n];
1948 if (report == NULL)
1949 dbg_hid("undefined report_id %u received\n", n);
1950
1951 return report;
1952 }
1953
1954 /*
1955 * Implement a generic .request() callback, using .raw_request()
1956 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1957 */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)1958 int __hid_request(struct hid_device *hid, struct hid_report *report,
1959 enum hid_class_request reqtype)
1960 {
1961 char *buf, *data_buf;
1962 int ret;
1963 u32 len;
1964
1965 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1966 if (!buf)
1967 return -ENOMEM;
1968
1969 data_buf = buf;
1970 len = hid_report_len(report);
1971
1972 if (report->id == 0) {
1973 /* reserve the first byte for the report ID */
1974 data_buf++;
1975 len++;
1976 }
1977
1978 if (reqtype == HID_REQ_SET_REPORT)
1979 hid_output_report(report, data_buf);
1980
1981 ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
1982 if (ret < 0) {
1983 dbg_hid("unable to complete request: %d\n", ret);
1984 goto out;
1985 }
1986
1987 if (reqtype == HID_REQ_GET_REPORT)
1988 hid_input_report(hid, report->type, buf, ret, 0);
1989
1990 ret = 0;
1991
1992 out:
1993 kfree(buf);
1994 return ret;
1995 }
1996 EXPORT_SYMBOL_GPL(__hid_request);
1997
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)1998 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
1999 int interrupt)
2000 {
2001 struct hid_report_enum *report_enum = hid->report_enum + type;
2002 struct hid_report *report;
2003 struct hid_driver *hdrv;
2004 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2005 u32 rsize, csize = size;
2006 u8 *cdata = data;
2007 int ret = 0;
2008
2009 report = hid_get_report(report_enum, data);
2010 if (!report)
2011 goto out;
2012
2013 if (report_enum->numbered) {
2014 cdata++;
2015 csize--;
2016 }
2017
2018 rsize = hid_compute_report_size(report);
2019
2020 if (hid->ll_driver->max_buffer_size)
2021 max_buffer_size = hid->ll_driver->max_buffer_size;
2022
2023 if (report_enum->numbered && rsize >= max_buffer_size)
2024 rsize = max_buffer_size - 1;
2025 else if (rsize > max_buffer_size)
2026 rsize = max_buffer_size;
2027
2028 if (csize < rsize) {
2029 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2030 csize, rsize);
2031 memset(cdata + csize, 0, rsize - csize);
2032 }
2033
2034 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2035 hid->hiddev_report_event(hid, report);
2036 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2037 ret = hidraw_report_event(hid, data, size);
2038 if (ret)
2039 goto out;
2040 }
2041
2042 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2043 hid_process_report(hid, report, cdata, interrupt);
2044 hdrv = hid->driver;
2045 if (hdrv && hdrv->report)
2046 hdrv->report(hid, report);
2047 }
2048
2049 if (hid->claimed & HID_CLAIMED_INPUT)
2050 hidinput_report_event(hid, report);
2051 out:
2052 return ret;
2053 }
2054 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2055
2056
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2057 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2058 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2059 bool lock_already_taken)
2060 {
2061 struct hid_report_enum *report_enum;
2062 struct hid_driver *hdrv;
2063 struct hid_report *report;
2064 int ret = 0;
2065
2066 if (!hid)
2067 return -ENODEV;
2068
2069 ret = down_trylock(&hid->driver_input_lock);
2070 if (lock_already_taken && !ret) {
2071 up(&hid->driver_input_lock);
2072 return -EINVAL;
2073 } else if (!lock_already_taken && ret) {
2074 return -EBUSY;
2075 }
2076
2077 if (!hid->driver) {
2078 ret = -ENODEV;
2079 goto unlock;
2080 }
2081 report_enum = hid->report_enum + type;
2082 hdrv = hid->driver;
2083
2084 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2085 if (IS_ERR(data)) {
2086 ret = PTR_ERR(data);
2087 goto unlock;
2088 }
2089
2090 if (!size) {
2091 dbg_hid("empty report\n");
2092 ret = -1;
2093 goto unlock;
2094 }
2095
2096 /* Avoid unnecessary overhead if debugfs is disabled */
2097 if (!list_empty(&hid->debug_list))
2098 hid_dump_report(hid, type, data, size);
2099
2100 report = hid_get_report(report_enum, data);
2101
2102 if (!report) {
2103 ret = -1;
2104 goto unlock;
2105 }
2106
2107 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2108 ret = hdrv->raw_event(hid, report, data, size);
2109 if (ret < 0)
2110 goto unlock;
2111 }
2112
2113 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2114
2115 unlock:
2116 if (!lock_already_taken)
2117 up(&hid->driver_input_lock);
2118 return ret;
2119 }
2120
2121 /**
2122 * hid_input_report - report data from lower layer (usb, bt...)
2123 *
2124 * @hid: hid device
2125 * @type: HID report type (HID_*_REPORT)
2126 * @data: report contents
2127 * @size: size of data parameter
2128 * @interrupt: distinguish between interrupt and control transfers
2129 *
2130 * This is data entry for lower layers.
2131 */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2132 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2133 int interrupt)
2134 {
2135 return __hid_input_report(hid, type, data, size, interrupt, 0,
2136 false, /* from_bpf */
2137 false /* lock_already_taken */);
2138 }
2139 EXPORT_SYMBOL_GPL(hid_input_report);
2140
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2141 bool hid_match_one_id(const struct hid_device *hdev,
2142 const struct hid_device_id *id)
2143 {
2144 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2145 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2146 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2147 (id->product == HID_ANY_ID || id->product == hdev->product);
2148 }
2149
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2150 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2151 const struct hid_device_id *id)
2152 {
2153 for (; id->bus; id++)
2154 if (hid_match_one_id(hdev, id))
2155 return id;
2156
2157 return NULL;
2158 }
2159 EXPORT_SYMBOL_GPL(hid_match_id);
2160
2161 static const struct hid_device_id hid_hiddev_list[] = {
2162 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2163 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2164 { }
2165 };
2166
hid_hiddev(struct hid_device * hdev)2167 static bool hid_hiddev(struct hid_device *hdev)
2168 {
2169 return !!hid_match_id(hdev, hid_hiddev_list);
2170 }
2171
2172
2173 static ssize_t
read_report_descriptor(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)2174 read_report_descriptor(struct file *filp, struct kobject *kobj,
2175 struct bin_attribute *attr,
2176 char *buf, loff_t off, size_t count)
2177 {
2178 struct device *dev = kobj_to_dev(kobj);
2179 struct hid_device *hdev = to_hid_device(dev);
2180
2181 if (off >= hdev->rsize)
2182 return 0;
2183
2184 if (off + count > hdev->rsize)
2185 count = hdev->rsize - off;
2186
2187 memcpy(buf, hdev->rdesc + off, count);
2188
2189 return count;
2190 }
2191
2192 static ssize_t
show_country(struct device * dev,struct device_attribute * attr,char * buf)2193 show_country(struct device *dev, struct device_attribute *attr,
2194 char *buf)
2195 {
2196 struct hid_device *hdev = to_hid_device(dev);
2197
2198 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2199 }
2200
2201 static struct bin_attribute dev_bin_attr_report_desc = {
2202 .attr = { .name = "report_descriptor", .mode = 0444 },
2203 .read = read_report_descriptor,
2204 .size = HID_MAX_DESCRIPTOR_SIZE,
2205 };
2206
2207 static const struct device_attribute dev_attr_country = {
2208 .attr = { .name = "country", .mode = 0444 },
2209 .show = show_country,
2210 };
2211
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2212 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2213 {
2214 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2215 "Joystick", "Gamepad", "Keyboard", "Keypad",
2216 "Multi-Axis Controller"
2217 };
2218 const char *type, *bus;
2219 char buf[64] = "";
2220 unsigned int i;
2221 int len;
2222 int ret;
2223
2224 ret = hid_bpf_connect_device(hdev);
2225 if (ret)
2226 return ret;
2227
2228 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2229 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2230 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2231 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2232 if (hdev->bus != BUS_USB)
2233 connect_mask &= ~HID_CONNECT_HIDDEV;
2234 if (hid_hiddev(hdev))
2235 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2236
2237 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2238 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2239 hdev->claimed |= HID_CLAIMED_INPUT;
2240
2241 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2242 !hdev->hiddev_connect(hdev,
2243 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2244 hdev->claimed |= HID_CLAIMED_HIDDEV;
2245 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2246 hdev->claimed |= HID_CLAIMED_HIDRAW;
2247
2248 if (connect_mask & HID_CONNECT_DRIVER)
2249 hdev->claimed |= HID_CLAIMED_DRIVER;
2250
2251 /* Drivers with the ->raw_event callback set are not required to connect
2252 * to any other listener. */
2253 if (!hdev->claimed && !hdev->driver->raw_event) {
2254 hid_err(hdev, "device has no listeners, quitting\n");
2255 return -ENODEV;
2256 }
2257
2258 hid_process_ordering(hdev);
2259
2260 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2261 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2262 hdev->ff_init(hdev);
2263
2264 len = 0;
2265 if (hdev->claimed & HID_CLAIMED_INPUT)
2266 len += sprintf(buf + len, "input");
2267 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2268 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2269 ((struct hiddev *)hdev->hiddev)->minor);
2270 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2271 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2272 ((struct hidraw *)hdev->hidraw)->minor);
2273
2274 type = "Device";
2275 for (i = 0; i < hdev->maxcollection; i++) {
2276 struct hid_collection *col = &hdev->collection[i];
2277 if (col->type == HID_COLLECTION_APPLICATION &&
2278 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2279 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2280 type = types[col->usage & 0xffff];
2281 break;
2282 }
2283 }
2284
2285 switch (hdev->bus) {
2286 case BUS_USB:
2287 bus = "USB";
2288 break;
2289 case BUS_BLUETOOTH:
2290 bus = "BLUETOOTH";
2291 break;
2292 case BUS_I2C:
2293 bus = "I2C";
2294 break;
2295 case BUS_VIRTUAL:
2296 bus = "VIRTUAL";
2297 break;
2298 case BUS_INTEL_ISHTP:
2299 case BUS_AMD_SFH:
2300 bus = "SENSOR HUB";
2301 break;
2302 default:
2303 bus = "<UNKNOWN>";
2304 }
2305
2306 ret = device_create_file(&hdev->dev, &dev_attr_country);
2307 if (ret)
2308 hid_warn(hdev,
2309 "can't create sysfs country code attribute err: %d\n", ret);
2310
2311 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2312 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2313 type, hdev->name, hdev->phys);
2314
2315 return 0;
2316 }
2317 EXPORT_SYMBOL_GPL(hid_connect);
2318
hid_disconnect(struct hid_device * hdev)2319 void hid_disconnect(struct hid_device *hdev)
2320 {
2321 device_remove_file(&hdev->dev, &dev_attr_country);
2322 if (hdev->claimed & HID_CLAIMED_INPUT)
2323 hidinput_disconnect(hdev);
2324 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2325 hdev->hiddev_disconnect(hdev);
2326 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2327 hidraw_disconnect(hdev);
2328 hdev->claimed = 0;
2329
2330 hid_bpf_disconnect_device(hdev);
2331 }
2332 EXPORT_SYMBOL_GPL(hid_disconnect);
2333
2334 /**
2335 * hid_hw_start - start underlying HW
2336 * @hdev: hid device
2337 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2338 *
2339 * Call this in probe function *after* hid_parse. This will setup HW
2340 * buffers and start the device (if not defeirred to device open).
2341 * hid_hw_stop must be called if this was successful.
2342 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2343 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2344 {
2345 int error;
2346
2347 error = hdev->ll_driver->start(hdev);
2348 if (error)
2349 return error;
2350
2351 if (connect_mask) {
2352 error = hid_connect(hdev, connect_mask);
2353 if (error) {
2354 hdev->ll_driver->stop(hdev);
2355 return error;
2356 }
2357 }
2358
2359 return 0;
2360 }
2361 EXPORT_SYMBOL_GPL(hid_hw_start);
2362
2363 /**
2364 * hid_hw_stop - stop underlying HW
2365 * @hdev: hid device
2366 *
2367 * This is usually called from remove function or from probe when something
2368 * failed and hid_hw_start was called already.
2369 */
hid_hw_stop(struct hid_device * hdev)2370 void hid_hw_stop(struct hid_device *hdev)
2371 {
2372 hid_disconnect(hdev);
2373 hdev->ll_driver->stop(hdev);
2374 }
2375 EXPORT_SYMBOL_GPL(hid_hw_stop);
2376
2377 /**
2378 * hid_hw_open - signal underlying HW to start delivering events
2379 * @hdev: hid device
2380 *
2381 * Tell underlying HW to start delivering events from the device.
2382 * This function should be called sometime after successful call
2383 * to hid_hw_start().
2384 */
hid_hw_open(struct hid_device * hdev)2385 int hid_hw_open(struct hid_device *hdev)
2386 {
2387 int ret;
2388
2389 ret = mutex_lock_killable(&hdev->ll_open_lock);
2390 if (ret)
2391 return ret;
2392
2393 if (!hdev->ll_open_count++) {
2394 ret = hdev->ll_driver->open(hdev);
2395 if (ret)
2396 hdev->ll_open_count--;
2397 }
2398
2399 mutex_unlock(&hdev->ll_open_lock);
2400 return ret;
2401 }
2402 EXPORT_SYMBOL_GPL(hid_hw_open);
2403
2404 /**
2405 * hid_hw_close - signal underlaying HW to stop delivering events
2406 *
2407 * @hdev: hid device
2408 *
2409 * This function indicates that we are not interested in the events
2410 * from this device anymore. Delivery of events may or may not stop,
2411 * depending on the number of users still outstanding.
2412 */
hid_hw_close(struct hid_device * hdev)2413 void hid_hw_close(struct hid_device *hdev)
2414 {
2415 mutex_lock(&hdev->ll_open_lock);
2416 if (!--hdev->ll_open_count)
2417 hdev->ll_driver->close(hdev);
2418 mutex_unlock(&hdev->ll_open_lock);
2419 }
2420 EXPORT_SYMBOL_GPL(hid_hw_close);
2421
2422 /**
2423 * hid_hw_request - send report request to device
2424 *
2425 * @hdev: hid device
2426 * @report: report to send
2427 * @reqtype: hid request type
2428 */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2429 void hid_hw_request(struct hid_device *hdev,
2430 struct hid_report *report, enum hid_class_request reqtype)
2431 {
2432 if (hdev->ll_driver->request)
2433 return hdev->ll_driver->request(hdev, report, reqtype);
2434
2435 __hid_request(hdev, report, reqtype);
2436 }
2437 EXPORT_SYMBOL_GPL(hid_hw_request);
2438
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2439 int __hid_hw_raw_request(struct hid_device *hdev,
2440 unsigned char reportnum, __u8 *buf,
2441 size_t len, enum hid_report_type rtype,
2442 enum hid_class_request reqtype,
2443 u64 source, bool from_bpf)
2444 {
2445 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2446 int ret;
2447
2448 if (hdev->ll_driver->max_buffer_size)
2449 max_buffer_size = hdev->ll_driver->max_buffer_size;
2450
2451 if (len < 1 || len > max_buffer_size || !buf)
2452 return -EINVAL;
2453
2454 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2455 reqtype, source, from_bpf);
2456 if (ret)
2457 return ret;
2458
2459 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2460 rtype, reqtype);
2461 }
2462
2463 /**
2464 * hid_hw_raw_request - send report request to device
2465 *
2466 * @hdev: hid device
2467 * @reportnum: report ID
2468 * @buf: in/out data to transfer
2469 * @len: length of buf
2470 * @rtype: HID report type
2471 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2472 *
2473 * Return: count of data transferred, negative if error
2474 *
2475 * Same behavior as hid_hw_request, but with raw buffers instead.
2476 */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2477 int hid_hw_raw_request(struct hid_device *hdev,
2478 unsigned char reportnum, __u8 *buf,
2479 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2480 {
2481 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2482 }
2483 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2484
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2485 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2486 bool from_bpf)
2487 {
2488 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2489 int ret;
2490
2491 if (hdev->ll_driver->max_buffer_size)
2492 max_buffer_size = hdev->ll_driver->max_buffer_size;
2493
2494 if (len < 1 || len > max_buffer_size || !buf)
2495 return -EINVAL;
2496
2497 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2498 if (ret)
2499 return ret;
2500
2501 if (hdev->ll_driver->output_report)
2502 return hdev->ll_driver->output_report(hdev, buf, len);
2503
2504 return -ENOSYS;
2505 }
2506
2507 /**
2508 * hid_hw_output_report - send output report to device
2509 *
2510 * @hdev: hid device
2511 * @buf: raw data to transfer
2512 * @len: length of buf
2513 *
2514 * Return: count of data transferred, negative if error
2515 */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2516 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2517 {
2518 return __hid_hw_output_report(hdev, buf, len, 0, false);
2519 }
2520 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2521
2522 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2523 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2524 {
2525 if (hdev->driver && hdev->driver->suspend)
2526 return hdev->driver->suspend(hdev, state);
2527
2528 return 0;
2529 }
2530 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2531
hid_driver_reset_resume(struct hid_device * hdev)2532 int hid_driver_reset_resume(struct hid_device *hdev)
2533 {
2534 if (hdev->driver && hdev->driver->reset_resume)
2535 return hdev->driver->reset_resume(hdev);
2536
2537 return 0;
2538 }
2539 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2540
hid_driver_resume(struct hid_device * hdev)2541 int hid_driver_resume(struct hid_device *hdev)
2542 {
2543 if (hdev->driver && hdev->driver->resume)
2544 return hdev->driver->resume(hdev);
2545
2546 return 0;
2547 }
2548 EXPORT_SYMBOL_GPL(hid_driver_resume);
2549 #endif /* CONFIG_PM */
2550
2551 struct hid_dynid {
2552 struct list_head list;
2553 struct hid_device_id id;
2554 };
2555
2556 /**
2557 * new_id_store - add a new HID device ID to this driver and re-probe devices
2558 * @drv: target device driver
2559 * @buf: buffer for scanning device ID data
2560 * @count: input size
2561 *
2562 * Adds a new dynamic hid device ID to this driver,
2563 * and causes the driver to probe for all devices again.
2564 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2565 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2566 size_t count)
2567 {
2568 struct hid_driver *hdrv = to_hid_driver(drv);
2569 struct hid_dynid *dynid;
2570 __u32 bus, vendor, product;
2571 unsigned long driver_data = 0;
2572 int ret;
2573
2574 ret = sscanf(buf, "%x %x %x %lx",
2575 &bus, &vendor, &product, &driver_data);
2576 if (ret < 3)
2577 return -EINVAL;
2578
2579 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2580 if (!dynid)
2581 return -ENOMEM;
2582
2583 dynid->id.bus = bus;
2584 dynid->id.group = HID_GROUP_ANY;
2585 dynid->id.vendor = vendor;
2586 dynid->id.product = product;
2587 dynid->id.driver_data = driver_data;
2588
2589 spin_lock(&hdrv->dyn_lock);
2590 list_add_tail(&dynid->list, &hdrv->dyn_list);
2591 spin_unlock(&hdrv->dyn_lock);
2592
2593 ret = driver_attach(&hdrv->driver);
2594
2595 return ret ? : count;
2596 }
2597 static DRIVER_ATTR_WO(new_id);
2598
2599 static struct attribute *hid_drv_attrs[] = {
2600 &driver_attr_new_id.attr,
2601 NULL,
2602 };
2603 ATTRIBUTE_GROUPS(hid_drv);
2604
hid_free_dynids(struct hid_driver * hdrv)2605 static void hid_free_dynids(struct hid_driver *hdrv)
2606 {
2607 struct hid_dynid *dynid, *n;
2608
2609 spin_lock(&hdrv->dyn_lock);
2610 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2611 list_del(&dynid->list);
2612 kfree(dynid);
2613 }
2614 spin_unlock(&hdrv->dyn_lock);
2615 }
2616
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2617 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2618 struct hid_driver *hdrv)
2619 {
2620 struct hid_dynid *dynid;
2621
2622 spin_lock(&hdrv->dyn_lock);
2623 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2624 if (hid_match_one_id(hdev, &dynid->id)) {
2625 spin_unlock(&hdrv->dyn_lock);
2626 return &dynid->id;
2627 }
2628 }
2629 spin_unlock(&hdrv->dyn_lock);
2630
2631 return hid_match_id(hdev, hdrv->id_table);
2632 }
2633 EXPORT_SYMBOL_GPL(hid_match_device);
2634
hid_bus_match(struct device * dev,const struct device_driver * drv)2635 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2636 {
2637 struct hid_driver *hdrv = to_hid_driver(drv);
2638 struct hid_device *hdev = to_hid_device(dev);
2639
2640 return hid_match_device(hdev, hdrv) != NULL;
2641 }
2642
2643 /**
2644 * hid_compare_device_paths - check if both devices share the same path
2645 * @hdev_a: hid device
2646 * @hdev_b: hid device
2647 * @separator: char to use as separator
2648 *
2649 * Check if two devices share the same path up to the last occurrence of
2650 * the separator char. Both paths must exist (i.e., zero-length paths
2651 * don't match).
2652 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2653 bool hid_compare_device_paths(struct hid_device *hdev_a,
2654 struct hid_device *hdev_b, char separator)
2655 {
2656 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2657 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2658
2659 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2660 return false;
2661
2662 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2663 }
2664 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2665
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2666 static bool hid_check_device_match(struct hid_device *hdev,
2667 struct hid_driver *hdrv,
2668 const struct hid_device_id **id)
2669 {
2670 *id = hid_match_device(hdev, hdrv);
2671 if (!*id)
2672 return false;
2673
2674 if (hdrv->match)
2675 return hdrv->match(hdev, hid_ignore_special_drivers);
2676
2677 /*
2678 * hid-generic implements .match(), so we must be dealing with a
2679 * different HID driver here, and can simply check if
2680 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2681 * are set or not.
2682 */
2683 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2684 }
2685
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2686 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2687 {
2688 const struct hid_device_id *id;
2689 int ret;
2690
2691 if (!hid_check_device_match(hdev, hdrv, &id))
2692 return -ENODEV;
2693
2694 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2695 if (!hdev->devres_group_id)
2696 return -ENOMEM;
2697
2698 /* reset the quirks that has been previously set */
2699 hdev->quirks = hid_lookup_quirk(hdev);
2700 hdev->driver = hdrv;
2701
2702 if (hdrv->probe) {
2703 ret = hdrv->probe(hdev, id);
2704 } else { /* default probe */
2705 ret = hid_open_report(hdev);
2706 if (!ret)
2707 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2708 }
2709
2710 /*
2711 * Note that we are not closing the devres group opened above so
2712 * even resources that were attached to the device after probe is
2713 * run are released when hid_device_remove() is executed. This is
2714 * needed as some drivers would allocate additional resources,
2715 * for example when updating firmware.
2716 */
2717
2718 if (ret) {
2719 devres_release_group(&hdev->dev, hdev->devres_group_id);
2720 hid_close_report(hdev);
2721 hdev->driver = NULL;
2722 }
2723
2724 return ret;
2725 }
2726
hid_device_probe(struct device * dev)2727 static int hid_device_probe(struct device *dev)
2728 {
2729 struct hid_device *hdev = to_hid_device(dev);
2730 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2731 int ret = 0;
2732
2733 if (down_interruptible(&hdev->driver_input_lock))
2734 return -EINTR;
2735
2736 hdev->io_started = false;
2737 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2738
2739 if (!hdev->driver)
2740 ret = __hid_device_probe(hdev, hdrv);
2741
2742 if (!hdev->io_started)
2743 up(&hdev->driver_input_lock);
2744
2745 return ret;
2746 }
2747
hid_device_remove(struct device * dev)2748 static void hid_device_remove(struct device *dev)
2749 {
2750 struct hid_device *hdev = to_hid_device(dev);
2751 struct hid_driver *hdrv;
2752
2753 down(&hdev->driver_input_lock);
2754 hdev->io_started = false;
2755
2756 hdrv = hdev->driver;
2757 if (hdrv) {
2758 if (hdrv->remove)
2759 hdrv->remove(hdev);
2760 else /* default remove */
2761 hid_hw_stop(hdev);
2762
2763 /* Release all devres resources allocated by the driver */
2764 devres_release_group(&hdev->dev, hdev->devres_group_id);
2765
2766 hid_close_report(hdev);
2767 hdev->driver = NULL;
2768 }
2769
2770 if (!hdev->io_started)
2771 up(&hdev->driver_input_lock);
2772 }
2773
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2774 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2775 char *buf)
2776 {
2777 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2778
2779 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2780 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2781 }
2782 static DEVICE_ATTR_RO(modalias);
2783
2784 static struct attribute *hid_dev_attrs[] = {
2785 &dev_attr_modalias.attr,
2786 NULL,
2787 };
2788 static struct bin_attribute *hid_dev_bin_attrs[] = {
2789 &dev_bin_attr_report_desc,
2790 NULL
2791 };
2792 static const struct attribute_group hid_dev_group = {
2793 .attrs = hid_dev_attrs,
2794 .bin_attrs = hid_dev_bin_attrs,
2795 };
2796 __ATTRIBUTE_GROUPS(hid_dev);
2797
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2798 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2799 {
2800 const struct hid_device *hdev = to_hid_device(dev);
2801
2802 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2803 hdev->bus, hdev->vendor, hdev->product))
2804 return -ENOMEM;
2805
2806 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2807 return -ENOMEM;
2808
2809 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2810 return -ENOMEM;
2811
2812 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2813 return -ENOMEM;
2814
2815 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2816 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2817 return -ENOMEM;
2818
2819 return 0;
2820 }
2821
2822 const struct bus_type hid_bus_type = {
2823 .name = "hid",
2824 .dev_groups = hid_dev_groups,
2825 .drv_groups = hid_drv_groups,
2826 .match = hid_bus_match,
2827 .probe = hid_device_probe,
2828 .remove = hid_device_remove,
2829 .uevent = hid_uevent,
2830 };
2831 EXPORT_SYMBOL(hid_bus_type);
2832
hid_add_device(struct hid_device * hdev)2833 int hid_add_device(struct hid_device *hdev)
2834 {
2835 static atomic_t id = ATOMIC_INIT(0);
2836 int ret;
2837
2838 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2839 return -EBUSY;
2840
2841 hdev->quirks = hid_lookup_quirk(hdev);
2842
2843 /* we need to kill them here, otherwise they will stay allocated to
2844 * wait for coming driver */
2845 if (hid_ignore(hdev))
2846 return -ENODEV;
2847
2848 /*
2849 * Check for the mandatory transport channel.
2850 */
2851 if (!hdev->ll_driver->raw_request) {
2852 hid_err(hdev, "transport driver missing .raw_request()\n");
2853 return -EINVAL;
2854 }
2855
2856 /*
2857 * Read the device report descriptor once and use as template
2858 * for the driver-specific modifications.
2859 */
2860 ret = hdev->ll_driver->parse(hdev);
2861 if (ret)
2862 return ret;
2863 if (!hdev->dev_rdesc)
2864 return -ENODEV;
2865
2866 /*
2867 * Scan generic devices for group information
2868 */
2869 if (hid_ignore_special_drivers) {
2870 hdev->group = HID_GROUP_GENERIC;
2871 } else if (!hdev->group &&
2872 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2873 ret = hid_scan_report(hdev);
2874 if (ret)
2875 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2876 }
2877
2878 hdev->id = atomic_inc_return(&id);
2879
2880 /* XXX hack, any other cleaner solution after the driver core
2881 * is converted to allow more than 20 bytes as the device name? */
2882 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2883 hdev->vendor, hdev->product, hdev->id);
2884
2885 hid_debug_register(hdev, dev_name(&hdev->dev));
2886 ret = device_add(&hdev->dev);
2887 if (!ret)
2888 hdev->status |= HID_STAT_ADDED;
2889 else
2890 hid_debug_unregister(hdev);
2891
2892 return ret;
2893 }
2894 EXPORT_SYMBOL_GPL(hid_add_device);
2895
2896 /**
2897 * hid_allocate_device - allocate new hid device descriptor
2898 *
2899 * Allocate and initialize hid device, so that hid_destroy_device might be
2900 * used to free it.
2901 *
2902 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2903 * error value.
2904 */
hid_allocate_device(void)2905 struct hid_device *hid_allocate_device(void)
2906 {
2907 struct hid_device *hdev;
2908 int ret = -ENOMEM;
2909
2910 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2911 if (hdev == NULL)
2912 return ERR_PTR(ret);
2913
2914 device_initialize(&hdev->dev);
2915 hdev->dev.release = hid_device_release;
2916 hdev->dev.bus = &hid_bus_type;
2917 device_enable_async_suspend(&hdev->dev);
2918
2919 hid_close_report(hdev);
2920
2921 init_waitqueue_head(&hdev->debug_wait);
2922 INIT_LIST_HEAD(&hdev->debug_list);
2923 spin_lock_init(&hdev->debug_list_lock);
2924 sema_init(&hdev->driver_input_lock, 1);
2925 mutex_init(&hdev->ll_open_lock);
2926 kref_init(&hdev->ref);
2927
2928 ret = hid_bpf_device_init(hdev);
2929 if (ret)
2930 goto out_err;
2931
2932 return hdev;
2933
2934 out_err:
2935 hid_destroy_device(hdev);
2936 return ERR_PTR(ret);
2937 }
2938 EXPORT_SYMBOL_GPL(hid_allocate_device);
2939
hid_remove_device(struct hid_device * hdev)2940 static void hid_remove_device(struct hid_device *hdev)
2941 {
2942 if (hdev->status & HID_STAT_ADDED) {
2943 device_del(&hdev->dev);
2944 hid_debug_unregister(hdev);
2945 hdev->status &= ~HID_STAT_ADDED;
2946 }
2947 kfree(hdev->dev_rdesc);
2948 hdev->dev_rdesc = NULL;
2949 hdev->dev_rsize = 0;
2950 }
2951
2952 /**
2953 * hid_destroy_device - free previously allocated device
2954 *
2955 * @hdev: hid device
2956 *
2957 * If you allocate hid_device through hid_allocate_device, you should ever
2958 * free by this function.
2959 */
hid_destroy_device(struct hid_device * hdev)2960 void hid_destroy_device(struct hid_device *hdev)
2961 {
2962 hid_bpf_destroy_device(hdev);
2963 hid_remove_device(hdev);
2964 put_device(&hdev->dev);
2965 }
2966 EXPORT_SYMBOL_GPL(hid_destroy_device);
2967
2968
__hid_bus_reprobe_drivers(struct device * dev,void * data)2969 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2970 {
2971 struct hid_driver *hdrv = data;
2972 struct hid_device *hdev = to_hid_device(dev);
2973
2974 if (hdev->driver == hdrv &&
2975 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2976 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2977 return device_reprobe(dev);
2978
2979 return 0;
2980 }
2981
__hid_bus_driver_added(struct device_driver * drv,void * data)2982 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
2983 {
2984 struct hid_driver *hdrv = to_hid_driver(drv);
2985
2986 if (hdrv->match) {
2987 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
2988 __hid_bus_reprobe_drivers);
2989 }
2990
2991 return 0;
2992 }
2993
__bus_removed_driver(struct device_driver * drv,void * data)2994 static int __bus_removed_driver(struct device_driver *drv, void *data)
2995 {
2996 return bus_rescan_devices(&hid_bus_type);
2997 }
2998
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)2999 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3000 const char *mod_name)
3001 {
3002 int ret;
3003
3004 hdrv->driver.name = hdrv->name;
3005 hdrv->driver.bus = &hid_bus_type;
3006 hdrv->driver.owner = owner;
3007 hdrv->driver.mod_name = mod_name;
3008
3009 INIT_LIST_HEAD(&hdrv->dyn_list);
3010 spin_lock_init(&hdrv->dyn_lock);
3011
3012 ret = driver_register(&hdrv->driver);
3013
3014 if (ret == 0)
3015 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3016 __hid_bus_driver_added);
3017
3018 return ret;
3019 }
3020 EXPORT_SYMBOL_GPL(__hid_register_driver);
3021
hid_unregister_driver(struct hid_driver * hdrv)3022 void hid_unregister_driver(struct hid_driver *hdrv)
3023 {
3024 driver_unregister(&hdrv->driver);
3025 hid_free_dynids(hdrv);
3026
3027 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3028 }
3029 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3030
hid_check_keys_pressed(struct hid_device * hid)3031 int hid_check_keys_pressed(struct hid_device *hid)
3032 {
3033 struct hid_input *hidinput;
3034 int i;
3035
3036 if (!(hid->claimed & HID_CLAIMED_INPUT))
3037 return 0;
3038
3039 list_for_each_entry(hidinput, &hid->inputs, list) {
3040 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3041 if (hidinput->input->key[i])
3042 return 1;
3043 }
3044
3045 return 0;
3046 }
3047 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3048
3049 #ifdef CONFIG_HID_BPF
3050 static struct hid_ops __hid_ops = {
3051 .hid_get_report = hid_get_report,
3052 .hid_hw_raw_request = __hid_hw_raw_request,
3053 .hid_hw_output_report = __hid_hw_output_report,
3054 .hid_input_report = __hid_input_report,
3055 .owner = THIS_MODULE,
3056 .bus_type = &hid_bus_type,
3057 };
3058 #endif
3059
hid_init(void)3060 static int __init hid_init(void)
3061 {
3062 int ret;
3063
3064 ret = bus_register(&hid_bus_type);
3065 if (ret) {
3066 pr_err("can't register hid bus\n");
3067 goto err;
3068 }
3069
3070 #ifdef CONFIG_HID_BPF
3071 hid_ops = &__hid_ops;
3072 #endif
3073
3074 ret = hidraw_init();
3075 if (ret)
3076 goto err_bus;
3077
3078 hid_debug_init();
3079
3080 return 0;
3081 err_bus:
3082 bus_unregister(&hid_bus_type);
3083 err:
3084 return ret;
3085 }
3086
hid_exit(void)3087 static void __exit hid_exit(void)
3088 {
3089 #ifdef CONFIG_HID_BPF
3090 hid_ops = NULL;
3091 #endif
3092 hid_debug_exit();
3093 hidraw_exit();
3094 bus_unregister(&hid_bus_type);
3095 hid_quirks_exit(HID_BUS_ANY);
3096 }
3097
3098 module_init(hid_init);
3099 module_exit(hid_exit);
3100
3101 MODULE_AUTHOR("Andreas Gal");
3102 MODULE_AUTHOR("Vojtech Pavlik");
3103 MODULE_AUTHOR("Jiri Kosina");
3104 MODULE_DESCRIPTION("HID support for Linux");
3105 MODULE_LICENSE("GPL");
3106