1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <asm/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43 #define HID_COLLECTION_SIZE_OF 2
44 #define HID_COMPLETE_USAGE_MASK 0xFFFF
45 #define HID_COMPLETE_USAGE_SHIFT_MASK 16
46 #define HID_ADD_USAGE_SIZE 2
47 #define HID_MAX_BUFFER_SIZE_SHIFT_MASK 3
48 #define HID_ITEM_UDATA_BYTE 1
49 #define HID_ITEM_UDATA_WORD 2
50 #define HID_ITEM_UDATA_DWORD 4
51 #define HID_ITEM_SDATA_BYTE 1
52 #define HID_ITEM_SDATA_WORD 2
53 #define HID_ITEM_SDATA_DWORD 4
54 #define HID_REPORT_SIZE 256
55 #define HID_OPEN_COLLECTION_TYPE_MASK 0XFF
56 #define HID_FETCH_ITEM_SHIFT_MASK_TWO 2
57 #define HID_FETCH_ITEM_SHIFT_MASK_FOUR 4
58 #define HID_FETCH_ITEM_SHIFT_BIT_MASK_THREE 3
59 #define HID_FETCH_ITEM_SHIFT_BIT_MASK_FIFTEEN 15
60 #define HID_FETCH_ITEM_SIZE_ZERO 0
61 #define HID_FETCH_ITEM_SIZE_ONE 1
62 #define HID_FETCH_ITEM_SIZE_TWO 2
63 #define HID_FETCH_ITEM_SIZE_THREE 3
64 #define HID_FETCH_ITEM_SIZE_FOUR 4
65 #define HID_SCAN_FEATURE_USAGE_REG_C5 0xff0000c5
66 #define HID_SCAN_FEATURE_USAGE_REG_C6 0xff0000c6
67 #define HID_REPORT_COUNT_SIZE_ONE 1
68 #define HID_GLOBAL_REPORT_SIZE 8
69 #define HID_GLOBAL_USAGE_PAGE_SHIFT_MASK 16
70 #define HID_UP_VENDOR_MASK 0x0001
71 #define HID_COLLECTION_TYPE_MASK 0xff
72 #define HID_MULTIPLIER_MAX 255
73 #define HID_BYTE_BIT_WIDTH 8
74 #define HID_WORD_BIT_WIDTH 16
75 #define HID_DWORD_BIT_WIDTH 32
76 #define HID_BYTE_BIT_MASK 0xff
77 #define HID_REPORT_SIEZE_MASK 3
78 #define HID_BYTE_CHUNK 7
79 #define HID_FETCH_ITEM_SIZE_WORD 2
80
81 int hid_debug = 0;
82 module_param_named(debug, hid_debug, int, 0600);
83 MODULE_PARM_DESC(debug, "toggle HID debugging messages");
84 EXPORT_SYMBOL_GPL(hid_debug);
85
86 static int hid_ignore_special_drivers = 0;
87 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
88 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
89
90 /*
91 * Register a new report for a device.
92 */
93
hid_register_report(struct hid_device * device,unsigned int type,unsigned int id,unsigned int application)94 struct hid_report *hid_register_report(struct hid_device *device, unsigned int type, unsigned int id,
95 unsigned int application)
96 {
97 struct hid_report_enum *report_enum = device->report_enum + type;
98 struct hid_report *report;
99
100 if (id >= HID_MAX_IDS) {
101 return NULL;
102 }
103 if (report_enum->report_id_hash[id]) {
104 return report_enum->report_id_hash[id];
105 }
106
107 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
108 if (!report) {
109 return NULL;
110 }
111
112 if (id != 0) {
113 report_enum->numbered = 1;
114 }
115
116 report->id = id;
117 report->type = type;
118 report->size = 0;
119 report->device = device;
120 report->application = application;
121 report_enum->report_id_hash[id] = report;
122
123 list_add_tail(&report->list, &report_enum->report_list);
124
125 return report;
126 }
127 EXPORT_SYMBOL_GPL(hid_register_report);
128
129 /*
130 * Register a new field for this report.
131 */
132
hid_register_field(struct hid_report * report,unsigned usages)133 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
134 {
135 struct hid_field *field;
136
137 if (report->maxfield == HID_MAX_FIELDS) {
138 return NULL;
139 }
140
141 field =
142 kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + usages * sizeof(unsigned)), GFP_KERNEL);
143 if (!field) {
144 return NULL;
145 }
146
147 field->index = report->maxfield++;
148 report->field[field->index] = field;
149 field->usage = (struct hid_usage *)(field + 1);
150 field->value = (s32 *)(field->usage + usages);
151 field->report = report;
152
153 return field;
154 }
155
156 /*
157 * Open a collection. The type/usage is pushed on the stack.
158 */
159
open_collection(struct hid_parser * parser,unsigned type)160 static int open_collection(struct hid_parser *parser, unsigned type)
161 {
162 struct hid_collection *collection;
163 unsigned usage;
164 int collection_index;
165
166 usage = parser->local.usage[0];
167
168 if (parser->collection_stack_ptr == parser->collection_stack_size) {
169 unsigned int *collection_stack;
170 unsigned int new_size = parser->collection_stack_size + HID_COLLECTION_STACK_SIZE;
171
172 collection_stack = krealloc(parser->collection_stack, new_size * sizeof(unsigned int), GFP_KERNEL);
173 if (!collection_stack) {
174 return -ENOMEM;
175 }
176
177 parser->collection_stack = collection_stack;
178 parser->collection_stack_size = new_size;
179 }
180
181 if (parser->device->maxcollection == parser->device->collection_size) {
182 collection =
183 kmalloc(array3_size(sizeof(struct hid_collection), parser->device->collection_size, HID_COLLECTION_SIZE_OF),
184 GFP_KERNEL);
185 if (collection == NULL) {
186 return -ENOMEM;
187 }
188 memcpy(collection, parser->device->collection, sizeof(struct hid_collection) * parser->device->collection_size);
189 memset(collection + parser->device->collection_size, 0,
190 sizeof(struct hid_collection) * parser->device->collection_size);
191 kfree(parser->device->collection);
192 parser->device->collection = collection;
193 parser->device->collection_size *= HID_COLLECTION_SIZE_OF;
194 }
195
196 parser->collection_stack[parser->collection_stack_ptr++] = parser->device->maxcollection;
197
198 collection_index = parser->device->maxcollection++;
199 collection = parser->device->collection + collection_index;
200 collection->type = type;
201 collection->usage = usage;
202 collection->level = parser->collection_stack_ptr - 1;
203 collection->parent_idx = (collection->level == 0) ? -1 : parser->collection_stack[collection->level - 1];
204
205 if (type == HID_COLLECTION_APPLICATION) {
206 parser->device->maxapplication++;
207 }
208
209 return 0;
210 }
211
212 /*
213 * Close a collection.
214 */
215
close_collection(struct hid_parser * parser)216 static int close_collection(struct hid_parser *parser)
217 {
218 if (!parser->collection_stack_ptr) {
219 return -EINVAL;
220 }
221 parser->collection_stack_ptr--;
222 return 0;
223 }
224
225 /*
226 * Climb up the stack, search for the specified collection type
227 * and return the usage.
228 */
229
hid_lookup_collection(struct hid_parser * parser,unsigned type)230 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
231 {
232 struct hid_collection *collection = parser->device->collection;
233 int n;
234
235 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
236 unsigned index = parser->collection_stack[n];
237 if (collection[index].type == type) {
238 return collection[index].usage;
239 }
240 }
241 return 0; /* we know nothing about this usage type */
242 }
243
244 /*
245 * Concatenate usage which defines 16 bits or less with the
246 * currently defined usage page to form a 32 bit usage
247 */
248
complete_usage(struct hid_parser * parser,unsigned int index)249 static void complete_usage(struct hid_parser *parser, unsigned int index)
250 {
251 parser->local.usage[index] &= HID_COMPLETE_USAGE_MASK;
252 parser->local.usage[index] |= (parser->global.usage_page & HID_COMPLETE_USAGE_MASK)
253 << HID_COMPLETE_USAGE_SHIFT_MASK;
254 }
255
256 /*
257 * Add a usage to the temporary parser table.
258 */
259
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)260 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
261 {
262 if (parser->local.usage_index >= HID_MAX_USAGES) {
263 return -EPERM;
264 }
265 parser->local.usage[parser->local.usage_index] = usage;
266
267 /*
268 * If Usage item only includes usage id, concatenate it with
269 * currently defined usage page
270 */
271 if (size <= HID_ADD_USAGE_SIZE) {
272 complete_usage(parser, parser->local.usage_index);
273 }
274
275 parser->local.usage_size[parser->local.usage_index] = size;
276 parser->local.collection_index[parser->local.usage_index] =
277 parser->collection_stack_ptr ? parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
278 parser->local.usage_index++;
279 return 0;
280 }
281
282 /*
283 * Register a new field for this report.
284 */
285
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)286 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
287 {
288 struct hid_report *report;
289 struct hid_field *field;
290 unsigned int usages;
291 unsigned int offset;
292 unsigned int i;
293 unsigned int application;
294 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
295 report = hid_register_report(parser->device, report_type, parser->global.report_id, application);
296 if (!report) {
297 return -EPERM;
298 }
299 /* Handle both signed and unsigned cases properly */
300 if ((parser->global.logical_minimum < 0 && parser->global.logical_maximum < parser->global.logical_minimum) ||
301 (parser->global.logical_minimum >= 0 &&
302 (u32)parser->global.logical_maximum < (u32)parser->global.logical_minimum)) {
303 return -EPERM;
304 }
305 offset = report->size;
306 report->size += parser->global.report_size * parser->global.report_count;
307 /* Total size check: Allow for possible report index byte */
308 if (report->size > (HID_MAX_BUFFER_SIZE - 1) << HID_MAX_BUFFER_SIZE_SHIFT_MASK) {
309 return -EPERM;
310 }
311 if (!parser->local.usage_index) { /* Ignore padding fields */
312 return 0;
313 }
314 usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count);
315 field = hid_register_field(report, usages);
316 if (!field) {
317 return 0;
318 }
319 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
320 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
321 field->application = application;
322 for (i = 0; i < usages; i++) {
323 unsigned j = i;
324 /* Duplicate the last usage we parsed if we have excess values */
325 if (i >= parser->local.usage_index) {
326 j = parser->local.usage_index - 1;
327 }
328 field->usage[i].hid = parser->local.usage[j];
329 field->usage[i].collection_index = parser->local.collection_index[j];
330 field->usage[i].usage_index = i;
331 field->usage[i].resolution_multiplier = 1;
332 }
333 field->maxusage = usages;
334 field->flags = flags;
335 field->report_offset = offset;
336 field->report_type = report_type;
337 field->report_size = parser->global.report_size;
338 field->report_count = parser->global.report_count;
339 field->logical_minimum = parser->global.logical_minimum;
340 field->logical_maximum = parser->global.logical_maximum;
341 field->physical_minimum = parser->global.physical_minimum;
342 field->physical_maximum = parser->global.physical_maximum;
343 field->unit_exponent = parser->global.unit_exponent;
344 field->unit = parser->global.unit;
345 return 0;
346 }
347
348 /*
349 * Read data value from item.
350 */
item_udata(struct hid_item * item)351 static u32 item_udata(struct hid_item *item)
352 {
353 switch (item->size) {
354 case HID_ITEM_UDATA_BYTE:
355 return item->data.u8;
356 case HID_ITEM_UDATA_WORD:
357 return item->data.u16;
358 case HID_ITEM_UDATA_DWORD:
359 return item->data.u32;
360 }
361 return 0;
362 }
363
item_sdata(struct hid_item * item)364 static s32 item_sdata(struct hid_item *item)
365 {
366 switch (item->size) {
367 case HID_ITEM_SDATA_BYTE:
368 return item->data.s8;
369 case HID_ITEM_SDATA_WORD:
370 return item->data.s16;
371 case HID_ITEM_SDATA_DWORD:
372 return item->data.s32;
373 }
374 return 0;
375 }
376
377 /*
378 * Process a global item.
379 */
380
hid_parser_global(struct hid_parser * parser,struct hid_item * item)381 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
382 {
383 s32 raw_value;
384 switch (item->tag) {
385 case HID_GLOBAL_ITEM_TAG_PUSH:
386
387 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
388 return -EPERM;
389 }
390
391 memcpy(parser->global_stack + parser->global_stack_ptr++, &parser->global, sizeof(struct hid_global));
392 return 0;
393
394 case HID_GLOBAL_ITEM_TAG_POP:
395
396 if (!parser->global_stack_ptr) {
397 return -EPERM;
398 }
399
400 memcpy(&parser->global, parser->global_stack + --parser->global_stack_ptr, sizeof(struct hid_global));
401 return 0;
402
403 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
404 parser->global.usage_page = item_udata(item);
405 return 0;
406
407 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
408 parser->global.logical_minimum = item_sdata(item);
409 return 0;
410
411 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
412 if (parser->global.logical_minimum < 0) {
413 parser->global.logical_maximum = item_sdata(item);
414 } else {
415 parser->global.logical_maximum = item_udata(item);
416 }
417 return 0;
418
419 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
420 parser->global.physical_minimum = item_sdata(item);
421 return 0;
422
423 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
424 if (parser->global.physical_minimum < 0) {
425 parser->global.physical_maximum = item_sdata(item);
426 } else {
427 parser->global.physical_maximum = item_udata(item);
428 }
429 return 0;
430
431 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
432 /* Many devices provide unit exponent as a two's complement
433 * nibble due to the common misunderstanding of HID
434 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
435 * both this and the standard encoding. */
436 raw_value = item_sdata(item);
437 if (!(raw_value & 0xfffffff0)) {
438 parser->global.unit_exponent = hid_snto32(raw_value, 0x4);
439 } else {
440 parser->global.unit_exponent = raw_value;
441 }
442 return 0;
443
444 case HID_GLOBAL_ITEM_TAG_UNIT:
445 parser->global.unit = item_udata(item);
446 return 0;
447
448 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
449 parser->global.report_size = item_udata(item);
450 if (parser->global.report_size > HID_REPORT_SIZE) {
451 return -EPERM;
452 }
453 return 0;
454
455 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
456 parser->global.report_count = item_udata(item);
457 if (parser->global.report_count > HID_MAX_USAGES) {
458 return -EPERM;
459 }
460 return 0;
461
462 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
463 parser->global.report_id = item_udata(item);
464 if (parser->global.report_id == 0 || parser->global.report_id >= HID_MAX_IDS) {
465 return -EPERM;
466 }
467 return 0;
468
469 default:
470 return -EPERM;
471 }
472 }
473
474 /*
475 * Process a local item.
476 */
477
hid_parser_local(struct hid_parser * parser,struct hid_item * item)478 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
479 {
480 u32 data;
481 unsigned n;
482 u32 count;
483
484 data = item_udata(item);
485
486 switch (item->tag) {
487 case HID_LOCAL_ITEM_TAG_DELIMITER:
488
489 if (data) {
490 /*
491 * We treat items before the first delimiter
492 * as global to all usage sets (branch 0).
493 * In the moment we process only these global
494 * items and the first delimiter set.
495 */
496 if (parser->local.delimiter_depth != 0) {
497 return -EPERM;
498 }
499 parser->local.delimiter_depth++;
500 parser->local.delimiter_branch++;
501 } else {
502 if (parser->local.delimiter_depth < 1) {
503 return -EPERM;
504 }
505 parser->local.delimiter_depth--;
506 }
507 return 0;
508
509 case HID_LOCAL_ITEM_TAG_USAGE:
510
511 if (parser->local.delimiter_branch > 1) {
512 return 0;
513 }
514
515 return hid_add_usage(parser, data, item->size);
516
517 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
518
519 if (parser->local.delimiter_branch > 1) {
520 return 0;
521 }
522
523 parser->local.usage_minimum = data;
524 return 0;
525
526 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
527
528 if (parser->local.delimiter_branch > 1) {
529 return 0;
530 }
531
532 count = data - parser->local.usage_minimum;
533 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
534 /*
535 * We do not warn if the name is not set, we are
536 * actually pre-scanning the device.
537 */
538 if (dev_name(&parser->device->dev)) {
539 data = HID_MAX_USAGES - parser->local.usage_index + parser->local.usage_minimum - 1;
540 }
541 if (data <= 0) {
542 return -EPERM;
543 }
544 }
545
546 for (n = parser->local.usage_minimum; n <= data; n++) {
547 if (hid_add_usage(parser, n, item->size)) {
548 return -EPERM;
549 }
550 }
551 return 0;
552
553 default:
554
555 return 0;
556 }
557 return 0;
558 }
559
560 /*
561 * Concatenate Usage Pages into Usages where relevant:
562 * As per specification, 6.2.2.8: "When the parser encounters a main item it
563 * concatenates the last declared Usage Page with a Usage to form a complete
564 * usage value."
565 */
566
hid_concatenate_last_usage_page(struct hid_parser * parser)567 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
568 {
569 int i;
570 unsigned int usage_page;
571 unsigned int current_page;
572
573 if (!parser->local.usage_index) {
574 return;
575 }
576
577 usage_page = parser->global.usage_page;
578
579 /*
580 * Concatenate usage page again only if last declared Usage Page
581 * has not been already used in previous usages concatenation
582 */
583 for (i = parser->local.usage_index - 1; i >= 0; i--) {
584 if (parser->local.usage_size[i] > HID_ADD_USAGE_SIZE) {
585 /* Ignore extended usages */
586 continue;
587 }
588
589 current_page = parser->local.usage[i] >> HID_COMPLETE_USAGE_SHIFT_MASK;
590 if (current_page == usage_page) {
591 break;
592 }
593
594 complete_usage(parser, i);
595 }
596 }
597
598 /*
599 * Process a main item.
600 */
601
hid_parser_main(struct hid_parser * parser,struct hid_item * item)602 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
603 {
604 u32 data;
605 int ret;
606
607 hid_concatenate_last_usage_page(parser);
608
609 data = item_udata(item);
610
611 switch (item->tag) {
612 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
613 ret = open_collection(parser, data & HID_OPEN_COLLECTION_TYPE_MASK);
614 break;
615 case HID_MAIN_ITEM_TAG_END_COLLECTION:
616 ret = close_collection(parser);
617 break;
618 case HID_MAIN_ITEM_TAG_INPUT:
619 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
620 break;
621 case HID_MAIN_ITEM_TAG_OUTPUT:
622 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
623 break;
624 case HID_MAIN_ITEM_TAG_FEATURE:
625 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
626 break;
627 default:
628 ret = 0;
629 }
630
631 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
632
633 return ret;
634 }
635
636 /*
637 * Process a reserved item.
638 */
639
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)640 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
641 {
642 return 0;
643 }
644
645 /*
646 * Free a report and all registered fields. The field->usage and
647 * field->value table's are allocated behind the field, so we need
648 * only to free(field) itself.
649 */
650
hid_free_report(struct hid_report * report)651 static void hid_free_report(struct hid_report *report)
652 {
653 unsigned n;
654
655 for (n = 0; n < report->maxfield; n++) {
656 kfree(report->field[n]);
657 }
658 kfree(report);
659 }
660
661 /*
662 * Close report. This function returns the device
663 * state to the point prior to hid_open_report().
664 */
hid_close_report(struct hid_device * device)665 static void hid_close_report(struct hid_device *device)
666 {
667 unsigned i, j;
668
669 for (i = 0; i < HID_REPORT_TYPES; i++) {
670 struct hid_report_enum *report_enum = device->report_enum + i;
671
672 for (j = 0; j < HID_MAX_IDS; j++) {
673 struct hid_report *report = report_enum->report_id_hash[j];
674 if (report) {
675 hid_free_report(report);
676 }
677 }
678 memset(report_enum, 0, sizeof(*report_enum));
679 INIT_LIST_HEAD(&report_enum->report_list);
680 }
681
682 kfree(device->rdesc);
683 device->rdesc = NULL;
684 device->rsize = 0;
685
686 kfree(device->collection);
687 device->collection = NULL;
688 device->collection_size = 0;
689 device->maxcollection = 0;
690 device->maxapplication = 0;
691
692 device->status &= ~HID_STAT_PARSED;
693 }
694
695 /*
696 * Free a device structure, all reports, and all fields.
697 */
698
hid_device_release(struct device * dev)699 static void hid_device_release(struct device *dev)
700 {
701 struct hid_device *hid = to_hid_device(dev);
702
703 hid_close_report(hid);
704 kfree(hid->dev_rdesc);
705 kfree(hid);
706 }
707
708 /*
709 * Fetch a report description item from the data stream. We support long
710 * items, though they are not used yet.
711 */
712
fetch_item(u8 * start,u8 * end,struct hid_item * item)713 static u8 *fetch_item(u8 *start, u8 *end, struct hid_item *item)
714 {
715 u8 b;
716 if ((end - start) <= 0) {
717 return NULL;
718 }
719 b = *start++;
720 item->type = (b >> HID_FETCH_ITEM_SHIFT_MASK_TWO) & HID_FETCH_ITEM_SHIFT_BIT_MASK_THREE;
721 item->tag = (b >> HID_FETCH_ITEM_SHIFT_MASK_FOUR) & HID_FETCH_ITEM_SHIFT_BIT_MASK_FIFTEEN;
722 if (item->tag == HID_ITEM_TAG_LONG) {
723 item->format = HID_ITEM_FORMAT_LONG;
724 if ((end - start) < HID_FETCH_ITEM_SIZE_WORD) {
725 return NULL;
726 }
727 item->size = *start++;
728 item->tag = *start++;
729 if ((end - start) < item->size) {
730 return NULL;
731 }
732 item->data.longdata = start;
733 start += item->size;
734 return start;
735 }
736 item->format = HID_ITEM_FORMAT_SHORT;
737 item->size = b & HID_FETCH_ITEM_SHIFT_BIT_MASK_THREE;
738
739 switch (item->size) {
740 case HID_FETCH_ITEM_SIZE_ZERO:
741 return start;
742 case HID_FETCH_ITEM_SIZE_ONE:
743 if ((end - start) < HID_FETCH_ITEM_SIZE_ONE) {
744 return NULL;
745 }
746 item->data.u8 = *start++;
747 return start;
748 case HID_FETCH_ITEM_SIZE_TWO:
749 if ((end - start) < HID_FETCH_ITEM_SIZE_TWO) {
750 return NULL;
751 }
752 item->data.u16 = get_unaligned_le16(start);
753 start = (u8 *)((u16 *)start + 1);
754 return start;
755 case HID_FETCH_ITEM_SIZE_THREE:
756 item->size++;
757 if ((end - start) < HID_FETCH_ITEM_SIZE_FOUR) {
758 return NULL;
759 }
760 item->data.u32 = get_unaligned_le32(start);
761 start = (u8 *)((u32 *)start + 1);
762 return start;
763 }
764 return NULL;
765 }
766
hid_scan_input_usage(struct hid_parser * parser,u32 usage)767 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
768 {
769 struct hid_device *hid = parser->device;
770
771 if (usage == HID_DG_CONTACTID) {
772 hid->group = HID_GROUP_MULTITOUCH;
773 }
774 }
775
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)776 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
777 {
778 if (usage == HID_SCAN_FEATURE_USAGE_REG_C5 && parser->global.report_count == HID_REPORT_SIZE &&
779 parser->global.report_size == HID_GLOBAL_REPORT_SIZE) {
780 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
781 }
782
783 if (usage == HID_SCAN_FEATURE_USAGE_REG_C6 && parser->global.report_count == HID_REPORT_COUNT_SIZE_ONE &&
784 parser->global.report_size == HID_GLOBAL_REPORT_SIZE) {
785 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
786 }
787 }
788
hid_scan_collection(struct hid_parser * parser,unsigned type)789 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
790 {
791 struct hid_device *hid = parser->device;
792 int i;
793
794 if (((parser->global.usage_page << HID_GLOBAL_USAGE_PAGE_SHIFT_MASK) == HID_UP_SENSOR) &&
795 type == HID_COLLECTION_PHYSICAL) {
796 hid->group = HID_GROUP_SENSOR_HUB;
797 }
798
799 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
800 hid->group == HID_GROUP_MULTITOUCH) {
801 hid->group = HID_GROUP_GENERIC;
802 }
803
804 if ((parser->global.usage_page << HID_GLOBAL_USAGE_PAGE_SHIFT_MASK) == HID_UP_GENDESK) {
805 for (i = 0; i < parser->local.usage_index; i++) {
806 if (parser->local.usage[i] == HID_GD_POINTER) {
807 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
808 }
809 }
810 }
811
812 if ((parser->global.usage_page << HID_GLOBAL_USAGE_PAGE_SHIFT_MASK) >= HID_UP_MSVENDOR) {
813 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
814 }
815
816 if ((parser->global.usage_page << HID_GLOBAL_USAGE_PAGE_SHIFT_MASK) == HID_UP_GOOGLEVENDOR) {
817 for (i = 0; i < parser->local.usage_index; i++) {
818 if (parser->local.usage[i] == (HID_UP_GOOGLEVENDOR | HID_UP_VENDOR_MASK)) {
819 parser->device->group = HID_GROUP_VIVALDI;
820 }
821 }
822 }
823 }
824
hid_scan_main(struct hid_parser * parser,struct hid_item * item)825 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
826 {
827 u32 data;
828 int i;
829
830 hid_concatenate_last_usage_page(parser);
831
832 data = item_udata(item);
833
834 switch (item->tag) {
835 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
836 hid_scan_collection(parser, data & HID_COLLECTION_TYPE_MASK);
837 break;
838 case HID_MAIN_ITEM_TAG_END_COLLECTION:
839 break;
840 case HID_MAIN_ITEM_TAG_INPUT:
841 /* ignore constant inputs, they will be ignored by hid-input */
842 if (data & HID_MAIN_ITEM_CONSTANT) {
843 break;
844 }
845 for (i = 0; i < parser->local.usage_index; i++) {
846 hid_scan_input_usage(parser, parser->local.usage[i]);
847 }
848 break;
849 case HID_MAIN_ITEM_TAG_OUTPUT:
850 break;
851 case HID_MAIN_ITEM_TAG_FEATURE:
852 for (i = 0; i < parser->local.usage_index; i++) {
853 hid_scan_feature_usage(parser, parser->local.usage[i]);
854 }
855 break;
856 }
857
858 /* Reset the local parser environment */
859 memset(&parser->local, 0, sizeof(parser->local));
860
861 return 0;
862 }
863
864 /*
865 * Scan a report descriptor before the device is added to the bus.
866 * Sets device groups and other properties that determine what driver
867 * to load.
868 */
hid_scan_report(struct hid_device * hid)869 static int hid_scan_report(struct hid_device *hid)
870 {
871 struct hid_parser *parser;
872 struct hid_item item;
873 u8 *start = hid->dev_rdesc;
874 u8 *end = start + hid->dev_rsize;
875 static int (*dispatch_type[])(struct hid_parser * parser, struct hid_item * item) = {
876 hid_scan_main, hid_parser_global, hid_parser_local, hid_parser_reserved};
877
878 parser = vzalloc(sizeof(struct hid_parser));
879 if (!parser) {
880 return -ENOMEM;
881 }
882
883 parser->device = hid;
884 hid->group = HID_GROUP_GENERIC;
885
886 /*
887 * The parsing is simpler than the one in hid_open_report() as we should
888 * be robust against hid errors. Those errors will be raised by
889 * hid_open_report() anyway.
890 */
891 while ((start = fetch_item(start, end, &item)) != NULL) {
892 dispatch_type[item.type](parser, &item);
893 }
894
895 /*
896 * Handle special flags set during scanning.
897 */
898 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && (hid->group == HID_GROUP_MULTITOUCH)) {
899 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
900 }
901
902 /*
903 * Vendor specific handlings
904 */
905 switch (hid->vendor) {
906 case USB_VENDOR_ID_WACOM:
907 hid->group = HID_GROUP_WACOM;
908 break;
909 case USB_VENDOR_ID_SYNAPTICS:
910 if (hid->group == HID_GROUP_GENERIC) {
911 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) &&
912 (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) {
913 /*
914 * hid-rmi should take care of them,
915 * not hid-generic
916 */
917 hid->group = HID_GROUP_RMI;
918 }
919 }
920 break;
921 }
922
923 kfree(parser->collection_stack);
924 vfree(parser);
925 return 0;
926 }
927
928 /**
929 * hid_parse_report - parse device report
930 *
931 * @hid: hid device
932 * @start: report start
933 * @size: report size
934 *
935 * Allocate the device report as read by the bus driver. This function should
936 * only be called from parse() in ll drivers.
937 */
hid_parse_report(struct hid_device * hid,u8 * start,unsigned size)938 int hid_parse_report(struct hid_device *hid, u8 *start, unsigned size)
939 {
940 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
941 if (!hid->dev_rdesc) {
942 return -ENOMEM;
943 }
944 hid->dev_rsize = size;
945 return 0;
946 }
947 EXPORT_SYMBOL_GPL(hid_parse_report);
948
949 static const char *const hid_report_names[] = {
950 "HID_INPUT_REPORT",
951 "HID_OUTPUT_REPORT",
952 "HID_FEATURE_REPORT",
953 };
954 /**
955 * hid_validate_values - validate existing device report's value indexes
956 *
957 * @hid: hid device
958 * @type: which report type to examine
959 * @id: which report ID to examine (0 for first)
960 * @field_index: which report field to examine
961 * @report_counts: expected number of values
962 *
963 * Validate the number of values in a given field of a given report, after
964 * parsing.
965 */
hid_validate_values(struct hid_device * hid,unsigned int type,unsigned int id,unsigned int field_index,unsigned int report_counts)966 struct hid_report *hid_validate_values(struct hid_device *hid, unsigned int type, unsigned int id,
967 unsigned int field_index, unsigned int report_counts)
968 {
969 struct hid_report *report;
970
971 if (type > HID_FEATURE_REPORT) {
972 return NULL;
973 }
974
975 if (id >= HID_MAX_IDS) {
976 return NULL;
977 }
978
979 /*
980 * Explicitly not using hid_get_report() here since it depends on
981 * ->numbered being checked, which may not always be the case when
982 * drivers go to access report values.
983 */
984 if (id == 0) {
985 /*
986 * Validating on id 0 means we should examine the first
987 * report in the list.
988 */
989 report = list_entry(hid->report_enum[type].report_list.next, struct hid_report, list);
990 } else {
991 report = hid->report_enum[type].report_id_hash[id];
992 }
993 if (!report) {
994 return NULL;
995 }
996 if (report->maxfield <= field_index) {
997 return NULL;
998 }
999 if (report->field[field_index]->report_count < report_counts) {
1000 return NULL;
1001 }
1002 return report;
1003 }
1004 EXPORT_SYMBOL_GPL(hid_validate_values);
1005
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1006 static int hid_calculate_multiplier(struct hid_device *hid, struct hid_field *multiplier)
1007 {
1008 int m;
1009 s32 v = *multiplier->value;
1010 s32 lmin = multiplier->logical_minimum;
1011 s32 lmax = multiplier->logical_maximum;
1012 s32 pmin = multiplier->physical_minimum;
1013 s32 pmax = multiplier->physical_maximum;
1014
1015 /*
1016 * "Because OS implementations will generally divide the control's
1017 * reported count by the Effective Resolution Multiplier, designers
1018 * should take care not to establish a potential Effective
1019 * Resolution Multiplier of zero."
1020 * HID Usage Table, v1.12, Section 4.3.1, p31
1021 */
1022 if (lmax - lmin == 0) {
1023 return 1;
1024 }
1025 /*
1026 * Handling the unit exponent is left as an exercise to whoever
1027 * finds a device where that exponent is not 0.
1028 */
1029 m = ((v - lmin) / (lmax - lmin) * (pmax - pmin) + pmin);
1030
1031 /* There are no devices with an effective multiplier > 255 */
1032 if (unlikely(m == 0 || m > HID_MULTIPLIER_MAX || m < -HID_MULTIPLIER_MAX)) {
1033 m = 1;
1034 }
1035
1036 return m;
1037 }
1038
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1039 static void hid_apply_multiplier_to_field(struct hid_device *hid, struct hid_field *field,
1040 struct hid_collection *multiplier_collection, int effective_multiplier)
1041 {
1042 struct hid_collection *collection;
1043 struct hid_usage *usage;
1044 int i;
1045
1046 /*
1047 * If multiplier_collection is NULL, the multiplier applies
1048 * to all fields in the report.
1049 * Otherwise, it is the Logical Collection the multiplier applies to
1050 * but our field may be in a subcollection of that collection.
1051 */
1052 for (i = 0; i < field->maxusage; i++) {
1053 usage = &field->usage[i];
1054
1055 collection = &hid->collection[usage->collection_index];
1056 while (collection->parent_idx != -1 && collection != multiplier_collection) {
1057 collection = &hid->collection[collection->parent_idx];
1058 }
1059
1060 if (collection->parent_idx != -1 || multiplier_collection == NULL) {
1061 usage->resolution_multiplier = effective_multiplier;
1062 }
1063 }
1064 }
1065
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1066 static void hid_apply_multiplier(struct hid_device *hid, struct hid_field *multiplier)
1067 {
1068 struct hid_report_enum *rep_enum;
1069 struct hid_report *rep;
1070 struct hid_field *field;
1071 struct hid_collection *multiplier_collection;
1072 int effective_multiplier;
1073 int i;
1074
1075 /*
1076 * "The Resolution Multiplier control must be contained in the same
1077 * Logical Collection as the control(s) to which it is to be applied.
1078 * If no Resolution Multiplier is defined, then the Resolution
1079 * Multiplier defaults to 1. If more than one control exists in a
1080 * Logical Collection, the Resolution Multiplier is associated with
1081 * all controls in the collection. If no Logical Collection is
1082 * defined, the Resolution Multiplier is associated with all
1083 * controls in the report."
1084 * HID Usage Table, v1.12, Section 4.3.1, p30
1085 *
1086 * Thus, search from the current collection upwards until we find a
1087 * logical collection. Then search all fields for that same parent
1088 * collection. Those are the fields the multiplier applies to.
1089 *
1090 * If we have more than one multiplier, it will overwrite the
1091 * applicable fields later.
1092 */
1093 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1094 while (multiplier_collection->parent_idx != -1 && multiplier_collection->type != HID_COLLECTION_LOGICAL) {
1095 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1096 }
1097
1098 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1099
1100 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1101 list_for_each_entry(rep, &rep_enum->report_list, list)
1102 {
1103 for (i = 0; i < rep->maxfield; i++) {
1104 field = rep->field[i];
1105 hid_apply_multiplier_to_field(hid, field, multiplier_collection, effective_multiplier);
1106 }
1107 }
1108 }
1109
1110 /*
1111 * hid_setup_resolution_multiplier - set up all resolution multipliers
1112 *
1113 * @device: hid device
1114 *
1115 * Search for all Resolution Multiplier Feature Reports and apply their
1116 * value to all matching Input items. This only updates the internal struct
1117 * fields.
1118 *
1119 * The Resolution Multiplier is applied by the hardware. If the multiplier
1120 * is anything other than 1, the hardware will send pre-multiplied events
1121 * so that the same physical interaction generates an accumulated
1122 * accumulated_value = value * * multiplier
1123 * This may be achieved by sending
1124 * - "value * multiplier" for each event, or
1125 * - "value" but "multiplier" times as frequently, or
1126 * - a combination of the above
1127 * The only guarantee is that the same physical interaction always generates
1128 * an accumulated 'value * multiplier'.
1129 *
1130 * This function must be called before any event processing and after
1131 * any SetRequest to the Resolution Multiplier.
1132 */
hid_setup_resolution_multiplier(struct hid_device * hid)1133 void hid_setup_resolution_multiplier(struct hid_device *hid)
1134 {
1135 struct hid_report_enum *rep_enum;
1136 struct hid_report *rep;
1137 struct hid_usage *usage;
1138 int i, j;
1139
1140 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1141 list_for_each_entry(rep, &rep_enum->report_list, list)
1142 {
1143 for (i = 0; i < rep->maxfield; i++) {
1144 /* Ignore if report count is out of bounds. */
1145 if (rep->field[i]->report_count < 1) {
1146 continue;
1147 }
1148
1149 for (j = 0; j < rep->field[i]->maxusage; j++) {
1150 usage = &rep->field[i]->usage[j];
1151 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) {
1152 hid_apply_multiplier(hid, rep->field[i]);
1153 }
1154 }
1155 }
1156 }
1157 }
1158 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1159
1160 /**
1161 * hid_open_report - open a driver-specific device report
1162 *
1163 * @device: hid device
1164 *
1165 * Parse a report description into a hid_device structure. Reports are
1166 * enumerated, fields are attached to these reports.
1167 * 0 returned on success, otherwise nonzero error value.
1168 *
1169 * This function (or the equivalent hid_parse() macro) should only be
1170 * called from probe() in drivers, before starting the device.
1171 */
hid_open_report(struct hid_device * device)1172 int hid_open_report(struct hid_device *device)
1173 {
1174 struct hid_parser *parser;
1175 struct hid_item item;
1176 unsigned int size;
1177 u8 *start;
1178 u8 *buf;
1179 u8 *end;
1180 u8 *next;
1181 int ret;
1182 static int (*dispatch_type[])(struct hid_parser * parser, struct hid_item * item) = {
1183 hid_parser_main, hid_parser_global, hid_parser_local, hid_parser_reserved};
1184
1185 if (WARN_ON(device->status & HID_STAT_PARSED)) {
1186 return -EBUSY;
1187 }
1188
1189 start = device->dev_rdesc;
1190 if (WARN_ON(!start)) {
1191 return -ENODEV;
1192 }
1193 size = device->dev_rsize;
1194
1195 buf = kmemdup(start, size, GFP_KERNEL);
1196 if (buf == NULL) {
1197 return -ENOMEM;
1198 }
1199
1200 if (device->driver->report_fixup) {
1201 start = device->driver->report_fixup(device, buf, &size);
1202 } else {
1203 start = buf;
1204 }
1205
1206 start = kmemdup(start, size, GFP_KERNEL);
1207 kfree(buf);
1208 if (start == NULL) {
1209 return -ENOMEM;
1210 }
1211
1212 device->rdesc = start;
1213 device->rsize = size;
1214
1215 parser = vzalloc(sizeof(struct hid_parser));
1216 if (!parser) {
1217 ret = -ENOMEM;
1218 goto alloc_err;
1219 }
1220
1221 parser->device = device;
1222
1223 end = start + size;
1224
1225 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, sizeof(struct hid_collection), GFP_KERNEL);
1226 if (!device->collection) {
1227 ret = -ENOMEM;
1228 goto err;
1229 }
1230 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1231
1232 ret = -EINVAL;
1233 while ((next = fetch_item(start, end, &item)) != NULL) {
1234 start = next;
1235
1236 if (item.format != HID_ITEM_FORMAT_SHORT) {
1237 goto err;
1238 }
1239
1240 if (dispatch_type[item.type](parser, &item)) {
1241 goto err;
1242 }
1243
1244 if (start == end) {
1245 if (parser->collection_stack_ptr) {
1246 goto err;
1247 }
1248 if (parser->local.delimiter_depth) {
1249 goto err;
1250 }
1251
1252 /*
1253 * fetch initial values in case the device's
1254 * default multiplier isn't the recommended 1
1255 */
1256 hid_setup_resolution_multiplier(device);
1257
1258 kfree(parser->collection_stack);
1259 vfree(parser);
1260 device->status |= HID_STAT_PARSED;
1261
1262 return 0;
1263 }
1264 }
1265
1266 err:
1267 kfree(parser->collection_stack);
1268 alloc_err:
1269 vfree(parser);
1270 hid_close_report(device);
1271 return ret;
1272 }
1273 EXPORT_SYMBOL_GPL(hid_open_report);
1274
1275 /*
1276 * Convert a signed n-bit integer to signed 32-bit integer. Common
1277 * cases are done through the compiler, the screwed things has to be
1278 * done by hand.
1279 */
1280
snto32(u32 value,unsigned n)1281 static s32 snto32(u32 value, unsigned n)
1282 {
1283 if (!value || !n) {
1284 return 0;
1285 }
1286
1287 switch (n) {
1288 case HID_BYTE_BIT_WIDTH:
1289 return ((s8)value);
1290 case HID_WORD_BIT_WIDTH:
1291 return ((s16)value);
1292 case HID_DWORD_BIT_WIDTH:
1293 return ((s32)value);
1294 default:
1295 break;
1296 }
1297 return value & (1 << (n - 1)) ? value | (~0U << n) : value;
1298 }
1299
hid_snto32(u32 value,unsigned n)1300 s32 hid_snto32(u32 value, unsigned n)
1301 {
1302 return snto32(value, n);
1303 }
1304 EXPORT_SYMBOL_GPL(hid_snto32);
1305
1306 /*
1307 * Convert a signed 32-bit integer to a signed n-bit integer.
1308 */
1309
s32ton(s32 value,unsigned n)1310 static u32 s32ton(s32 value, unsigned n)
1311 {
1312 s32 a = value >> (n - 1);
1313 if (a && a != -1) {
1314 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
1315 }
1316 return value & ((1 << n) - 1);
1317 }
1318
1319 /*
1320 * Extract/implement a data field from/to a little endian report (bit array).
1321 *
1322 * Code sort-of follows HID spec:
1323 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1324 *
1325 * While the USB HID spec allows unlimited length bit fields in "report
1326 * descriptors", most devices never use more than 16 bits.
1327 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1328 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1329 */
1330
hid_core_extract(u8 * report,unsigned offset,int n)1331 static u32 hid_core_extract(u8 *report, unsigned offset, int n)
1332 {
1333 unsigned int idx = offset / 8;
1334 unsigned int bit_nr = 0;
1335 unsigned int bit_shift = offset % 8;
1336 int bits_to_copy = 8 - bit_shift;
1337 u32 value = 0;
1338 u32 mask = n < HID_DWORD_BIT_WIDTH ? (1U << n) - 1 : ~0U;
1339
1340 while (n > 0) {
1341 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1342 n -= bits_to_copy;
1343 bit_nr += bits_to_copy;
1344 bits_to_copy = HID_BYTE_BIT_WIDTH;
1345 bit_shift = 0;
1346 idx++;
1347 }
1348
1349 return value & mask;
1350 }
1351
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1352 u32 hid_field_extract(const struct hid_device *hid, u8 *report, unsigned offset, unsigned n)
1353 {
1354 if (n > HID_DWORD_BIT_WIDTH) {
1355 n = HID_DWORD_BIT_WIDTH;
1356 }
1357
1358 return hid_core_extract(report, offset, n);
1359 }
1360 EXPORT_SYMBOL_GPL(hid_field_extract);
1361
1362 /*
1363 * "implement" : set bits in a little endian bit stream.
1364 * Same concepts as "extract" (see comments above).
1365 * The data mangled in the bit stream remains in little endian
1366 * order the whole time. It make more sense to talk about
1367 * endianness of register values by considering a register
1368 * a "cached" copy of the little endian bit stream.
1369 */
1370
hid_core_implement(u8 * report,unsigned offset,int n,u32 value)1371 static void hid_core_implement(u8 *report, unsigned offset, int n, u32 value)
1372 {
1373 unsigned int idx = offset / HID_BYTE_BIT_WIDTH;
1374 unsigned int bit_shift = offset % HID_BYTE_BIT_WIDTH;
1375 int bits_to_set = HID_BYTE_BIT_WIDTH - bit_shift;
1376
1377 while (n - bits_to_set >= 0) {
1378 report[idx] &= ~(HID_BYTE_BIT_MASK << bit_shift);
1379 report[idx] |= value << bit_shift;
1380 value >>= bits_to_set;
1381 n -= bits_to_set;
1382 bits_to_set = HID_BYTE_BIT_WIDTH;
1383 bit_shift = 0;
1384 idx++;
1385 }
1386
1387 /* last nibble */
1388 if (n) {
1389 u8 bit_mask = ((1U << n) - 1);
1390 report[idx] &= ~(bit_mask << bit_shift);
1391 report[idx] |= value << bit_shift;
1392 }
1393 }
1394
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1395 static void implement(const struct hid_device *hid, u8 *report, unsigned offset, unsigned n, u32 value)
1396 {
1397 if (unlikely(n > HID_DWORD_BIT_WIDTH)) {
1398 n = HID_DWORD_BIT_WIDTH;
1399 } else if (n < HID_DWORD_BIT_WIDTH) {
1400 u32 m = (1U << n) - 1;
1401
1402 if (unlikely(value > m)) {
1403 WARN_ON(1);
1404 value &= m;
1405 }
1406 }
1407
1408 hid_core_implement(report, offset, n, value);
1409 }
1410
1411 /*
1412 * Search an array for a value.
1413 */
1414
search(s32 * array,s32 value,unsigned n)1415 static int search(s32 *array, s32 value, unsigned n)
1416 {
1417 while (n--) {
1418 if (*array++ == value) {
1419 return 0;
1420 }
1421 }
1422 return -EPERM;
1423 }
1424
1425 /**
1426 * hid_match_report - check if driver's raw_event should be called
1427 *
1428 * @hid: hid device
1429 * @report: hid report to match against
1430 *
1431 * compare hid->driver->report_table->report_type to report->type
1432 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1433 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1434 {
1435 const struct hid_report_id *id = hid->driver->report_table;
1436
1437 if (!id) { /* NULL means all */
1438 return 1;
1439 }
1440
1441 for (; id->report_type != HID_TERMINATOR; id++) {
1442 if (id->report_type == HID_ANY_ID || id->report_type == report->type) {
1443 return 1;
1444 }
1445 }
1446 return 0;
1447 }
1448
1449 /**
1450 * hid_match_usage - check if driver's event should be called
1451 *
1452 * @hid: hid device
1453 * @usage: usage to match against
1454 *
1455 * compare hid->driver->usage_table->usage_{type,code} to
1456 * usage->usage_{type,code}
1457 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1458 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1459 {
1460 const struct hid_usage_id *id = hid->driver->usage_table;
1461
1462 if (!id) { /* NULL means all */
1463 return 1;
1464 }
1465
1466 for (; id->usage_type != HID_ANY_ID - 1; id++) {
1467 if ((id->usage_hid == HID_ANY_ID || id->usage_hid == usage->hid) &&
1468 (id->usage_type == HID_ANY_ID || id->usage_type == usage->type) &&
1469 (id->usage_code == HID_ANY_ID || id->usage_code == usage->code)) {
1470 return 1;
1471 }
1472 }
1473 return 0;
1474 }
1475
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,s32 value,int interrupt)1476 static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, s32 value,
1477 int interrupt)
1478 {
1479 struct hid_driver *hdrv = hid->driver;
1480 int ret;
1481
1482 if (!list_empty(&hid->debug_list)) {
1483 hid_dump_input(hid, usage, value);
1484 }
1485
1486 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1487 ret = hdrv->event(hid, field, usage, value);
1488 if (ret != 0) {
1489 if (ret < 0) {
1490 return;
1491 }
1492 }
1493 }
1494
1495 if (hid->claimed & HID_CLAIMED_INPUT) {
1496 hidinput_hid_event(hid, field, usage, value);
1497 }
1498 if ((hid->claimed & HID_CLAIMED_HIDDEV) && interrupt && hid->hiddev_hid_event) {
1499 hid->hiddev_hid_event(hid, field, usage, value);
1500 }
1501 }
1502
1503 /*
1504 * Analyse a received field, and fetch the data from it. The field
1505 * content is stored for next report processing (we do differential
1506 * reporting to the layer).
1507 */
1508
hid_input_field(struct hid_device * hid,struct hid_field * field,u8 * data,int interrupt)1509 static void hid_input_field(struct hid_device *hid, struct hid_field *field, u8 *data, int interrupt)
1510 {
1511 unsigned n;
1512 unsigned count = field->report_count;
1513 unsigned offset = field->report_offset;
1514 unsigned size = field->report_size;
1515 s32 min = field->logical_minimum;
1516 s32 max = field->logical_maximum;
1517 s32 *value;
1518
1519 value = kmalloc_array(count, sizeof(s32), GFP_ATOMIC);
1520 if (!value) {
1521 return;
1522 }
1523 for (n = 0; n < count; n++) {
1524 value[n] = min < 0 ? snto32(hid_field_extract(hid, data, offset + n * size, size), size)
1525 : hid_field_extract(hid, data, offset + n * size, size);
1526
1527 /* Ignore report if ErrorRollOver */
1528 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && value[n] >= min && value[n] <= max &&
1529 value[n] - min < field->maxusage && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1530 goto exit;
1531 }
1532 }
1533
1534 for (n = 0; n < count; n++) {
1535 if (HID_MAIN_ITEM_VARIABLE & field->flags) {
1536 hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
1537 continue;
1538 }
1539 if (field->value[n] >= min && field->value[n] <= max && field->value[n] - min < field->maxusage &&
1540 field->usage[field->value[n] - min].hid && search(value, field->value[n], count)) {
1541 hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
1542 }
1543 if (value[n] >= min && value[n] <= max && value[n] - min < field->maxusage &&
1544 field->usage[value[n] - min].hid && search(field->value, value[n], count)) {
1545 hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
1546 }
1547 }
1548 memcpy(field->value, value, count * sizeof(s32));
1549 exit:
1550 kfree(value);
1551 }
1552
1553 /*
1554 * Output the field into the report.
1555 */
1556
hid_output_field(const struct hid_device * hid,struct hid_field * field,u8 * data)1557 static void hid_output_field(const struct hid_device *hid, struct hid_field *field, u8 *data)
1558 {
1559 unsigned count = field->report_count;
1560 unsigned offset = field->report_offset;
1561 unsigned size = field->report_size;
1562 unsigned n;
1563
1564 for (n = 0; n < count; n++) {
1565 if (field->logical_minimum < 0) { /* signed values */
1566 implement(hid, data, offset + n * size, size, s32ton(field->value[n], size));
1567 } else { /* unsigned values */
1568 implement(hid, data, offset + n * size, size, field->value[n]);
1569 }
1570 }
1571 }
1572
1573 /*
1574 * Compute the size of a report.
1575 */
hid_compute_report_size(struct hid_report * report)1576 static size_t hid_compute_report_size(struct hid_report *report)
1577 {
1578 if (report->size) {
1579 return ((report->size - 1) >> HID_REPORT_SIEZE_MASK) + 1;
1580 }
1581
1582 return 0;
1583 }
1584
1585 /*
1586 * Create a report. 'data' has to be allocated using
1587 * hid_alloc_report_buf() so that it has proper size.
1588 */
1589
hid_output_report(struct hid_report * report,u8 * data)1590 void hid_output_report(struct hid_report *report, u8 *data)
1591 {
1592 unsigned n;
1593
1594 if (report->id > 0) {
1595 *data++ = report->id;
1596 }
1597
1598 memset(data, 0, hid_compute_report_size(report));
1599 for (n = 0; n < report->maxfield; n++) {
1600 hid_output_field(report->device, report->field[n], data);
1601 }
1602 }
1603 EXPORT_SYMBOL_GPL(hid_output_report);
1604
1605 /*
1606 * Allocator for buffer that is going to be passed to hid_output_report()
1607 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1608 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1609 {
1610 /*
1611 * 7 extra bytes are necessary to achieve proper functionality
1612 * of implement() working on 8 byte chunks
1613 */
1614
1615 u32 len = hid_report_len(report) + HID_BYTE_CHUNK;
1616
1617 return kmalloc(len, flags);
1618 }
1619 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1620
1621 /*
1622 * Set a field value. The report this field belongs to has to be
1623 * created and transferred to the device, to set this value in the
1624 * device.
1625 */
1626
hid_set_field(struct hid_field * field,unsigned offset,s32 value)1627 int hid_set_field(struct hid_field *field, unsigned offset, s32 value)
1628 {
1629 unsigned size;
1630
1631 if (!field) {
1632 return -EPERM;
1633 }
1634
1635 size = field->report_size;
1636
1637 hid_dump_input(field->report->device, field->usage + offset, value);
1638
1639 if (offset >= field->report_count) {
1640 return -EPERM;
1641 }
1642 if (field->logical_minimum < 0) {
1643 if (value != snto32(s32ton(value, size), size)) {
1644 return -EPERM;
1645 }
1646 }
1647 field->value[offset] = value;
1648 return 0;
1649 }
1650 EXPORT_SYMBOL_GPL(hid_set_field);
1651
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1652 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, const u8 *data)
1653 {
1654 struct hid_report *report;
1655 unsigned int n = 0; /* Normally report number is 0 */
1656
1657 /* Device uses numbered reports, data[0] is report number */
1658 if (report_enum->numbered) {
1659 n = *data;
1660 }
1661
1662 report = report_enum->report_id_hash[n];
1663
1664 return report;
1665 }
1666
1667 /*
1668 * Implement a generic .request() callback, using .raw_request()
1669 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1670 */
__hid_request(struct hid_device * hid,struct hid_report * report,int reqtype)1671 int __hid_request(struct hid_device *hid, struct hid_report *report, int reqtype)
1672 {
1673 char *buf;
1674 int ret;
1675 u32 len;
1676
1677 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1678 if (!buf) {
1679 return -ENOMEM;
1680 }
1681
1682 len = hid_report_len(report);
1683
1684 if (reqtype == HID_REQ_SET_REPORT) {
1685 hid_output_report(report, buf);
1686 }
1687
1688 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, report->type, reqtype);
1689 if (ret < 0) {
1690 goto out;
1691 }
1692
1693 if (reqtype == HID_REQ_GET_REPORT) {
1694 hid_input_report(hid, report->type, buf, ret, 0);
1695 }
1696
1697 ret = 0;
1698
1699 out:
1700 kfree(buf);
1701 return ret;
1702 }
1703 EXPORT_SYMBOL_GPL(__hid_request);
1704
hid_report_raw_event(struct hid_device * hid,int type,u8 * data,u32 size,int interrupt)1705 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
1706 {
1707 struct hid_report_enum *report_enum = hid->report_enum + type;
1708 struct hid_report *report;
1709 struct hid_driver *hdrv;
1710 unsigned int a;
1711 u32 rsize, csize = size;
1712 u8 *cdata = data;
1713 int ret = 0;
1714
1715 report = hid_get_report(report_enum, data);
1716 if (!report) {
1717 goto out;
1718 }
1719
1720 if (report_enum->numbered) {
1721 cdata++;
1722 csize--;
1723 }
1724
1725 rsize = hid_compute_report_size(report);
1726 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) {
1727 rsize = HID_MAX_BUFFER_SIZE - 1;
1728 } else if (rsize > HID_MAX_BUFFER_SIZE) {
1729 rsize = HID_MAX_BUFFER_SIZE;
1730 }
1731
1732 if (csize < rsize) {
1733 memset(cdata + csize, 0, rsize - csize);
1734 }
1735
1736 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) {
1737 hid->hiddev_report_event(hid, report);
1738 }
1739 if (hid->claimed & HID_CLAIMED_HIDRAW) {
1740 ret = hidraw_report_event(hid, data, size);
1741 if (ret) {
1742 goto out;
1743 }
1744 }
1745
1746 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
1747 for (a = 0; a < report->maxfield; a++) {
1748 hid_input_field(hid, report->field[a], cdata, interrupt);
1749 }
1750 hdrv = hid->driver;
1751 if (hdrv && hdrv->report) {
1752 hdrv->report(hid, report);
1753 }
1754 }
1755
1756 if (hid->claimed & HID_CLAIMED_INPUT) {
1757 hidinput_report_event(hid, report);
1758 }
1759 out:
1760 return ret;
1761 }
1762 EXPORT_SYMBOL_GPL(hid_report_raw_event);
1763
1764 /**
1765 * hid_input_report - report data from lower layer (usb, bt...)
1766 *
1767 * @hid: hid device
1768 * @type: HID report type (HID_*_REPORT)
1769 * @data: report contents
1770 * @size: size of data parameter
1771 * @interrupt: distinguish between interrupt and control transfers
1772 *
1773 * This is data entry for lower layers.
1774 */
hid_input_report(struct hid_device * hid,int type,u8 * data,u32 size,int interrupt)1775 int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
1776 {
1777 struct hid_report_enum *report_enum;
1778 struct hid_driver *hdrv;
1779 struct hid_report *report;
1780 int ret = 0;
1781 if (!hid) {
1782 return -ENODEV;
1783 }
1784 if (down_trylock(&hid->driver_input_lock)) {
1785 return -EBUSY;
1786 }
1787 if (!hid->driver) {
1788 ret = -ENODEV;
1789 goto unlock;
1790 }
1791 report_enum = hid->report_enum + type;
1792 hdrv = hid->driver;
1793 if (!size) {
1794 ret = -1;
1795 goto unlock;
1796 }
1797 /* Avoid unnecessary overhead if debugfs is disabled */
1798 if (!list_empty(&hid->debug_list)) {
1799 hid_dump_report(hid, type, data, size);
1800 }
1801 report = hid_get_report(report_enum, data);
1802 if (!report) {
1803 ret = -1;
1804 goto unlock;
1805 }
1806 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
1807 ret = hdrv->raw_event(hid, report, data, size);
1808 if (ret < 0) {
1809 goto unlock;
1810 }
1811 }
1812 ret = hid_report_raw_event(hid, type, data, size, interrupt);
1813 unlock:
1814 up(&hid->driver_input_lock);
1815 return ret;
1816 }
1817 EXPORT_SYMBOL_GPL(hid_input_report);
1818
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)1819 bool hid_match_one_id(const struct hid_device *hdev, const struct hid_device_id *id)
1820 {
1821 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
1822 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
1823 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
1824 (id->product == HID_ANY_ID || id->product == hdev->product);
1825 }
1826
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)1827 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, const struct hid_device_id *id)
1828 {
1829 for (; id->bus; id++) {
1830 if (hid_match_one_id(hdev, id)) {
1831 return id;
1832 }
1833 }
1834
1835 return NULL;
1836 }
1837
1838 static const struct hid_device_id hid_hiddev_list[] = {{HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS)},
1839 {HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1)},
1840 {}};
1841
hid_hiddev(struct hid_device * hdev)1842 static bool hid_hiddev(struct hid_device *hdev)
1843 {
1844 return !!hid_match_id(hdev, hid_hiddev_list);
1845 }
1846
read_report_descriptor(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1847 static ssize_t read_report_descriptor(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf,
1848 loff_t off, size_t count)
1849 {
1850 struct device *dev = kobj_to_dev(kobj);
1851 struct hid_device *hdev = to_hid_device(dev);
1852
1853 if (off >= hdev->rsize) {
1854 return 0;
1855 }
1856
1857 if (off + count > hdev->rsize) {
1858 count = hdev->rsize - off;
1859 }
1860
1861 memcpy(buf, hdev->rdesc + off, count);
1862
1863 return count;
1864 }
1865
show_country(struct device * dev,struct device_attribute * attr,char * buf)1866 static ssize_t show_country(struct device *dev, struct device_attribute *attr, char *buf)
1867 {
1868 struct hid_device *hdev = to_hid_device(dev);
1869
1870 return sprintf(buf, "%02x\n", hdev->country & 0xff);
1871 }
1872
1873 static struct bin_attribute dev_bin_attr_report_desc = {
1874 .attr = {.name = "report_descriptor", .mode = 0444},
1875 .read = read_report_descriptor,
1876 .size = HID_MAX_DESCRIPTOR_SIZE,
1877 };
1878
1879 static const struct device_attribute dev_attr_country = {
1880 .attr = {.name = "country", .mode = 0444},
1881 .show = show_country,
1882 };
1883
hid_connect(struct hid_device * hdev,unsigned int connect_mask)1884 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
1885 {
1886 static const char *types[] = {
1887 "Device", "Pointer", "Mouse", "Device", "Joystick", "Gamepad", "Keyboard", "Keypad", "Multi-Axis Controller"};
1888 const char *type, *bus;
1889 char buf[64] = "";
1890 unsigned int i;
1891 int len;
1892 int ret;
1893
1894 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) {
1895 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
1896 }
1897 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) {
1898 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
1899 }
1900 if (hdev->bus != BUS_USB) {
1901 connect_mask &= ~HID_CONNECT_HIDDEV;
1902 }
1903 if (hid_hiddev(hdev)) {
1904 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
1905 }
1906
1907 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, connect_mask & HID_CONNECT_HIDINPUT_FORCE)) {
1908 hdev->claimed |= HID_CLAIMED_INPUT;
1909 }
1910
1911 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
1912 !hdev->hiddev_connect(hdev, connect_mask & HID_CONNECT_HIDDEV_FORCE)) {
1913 hdev->claimed |= HID_CLAIMED_HIDDEV;
1914 }
1915 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) {
1916 hdev->claimed |= HID_CLAIMED_HIDRAW;
1917 }
1918
1919 if (connect_mask & HID_CONNECT_DRIVER) {
1920 hdev->claimed |= HID_CLAIMED_DRIVER;
1921 }
1922
1923 /* Drivers with the ->raw_event callback set are not required to connect
1924 * to any other listener. */
1925 if (!hdev->claimed && !hdev->driver->raw_event) {
1926 return -ENODEV;
1927 }
1928
1929 if ((hdev->claimed & HID_CLAIMED_INPUT) && (connect_mask & HID_CONNECT_FF) && hdev->ff_init) {
1930 hdev->ff_init(hdev);
1931 }
1932
1933 len = 0;
1934 if (hdev->claimed & HID_CLAIMED_INPUT) {
1935 len += sprintf(buf + len, "input");
1936 }
1937 if (hdev->claimed & HID_CLAIMED_HIDDEV) {
1938 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", ((struct hiddev *)hdev->hiddev)->minor);
1939 }
1940 if (hdev->claimed & HID_CLAIMED_HIDRAW) {
1941 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", ((struct hidraw *)hdev->hidraw)->minor);
1942 }
1943
1944 type = "Device";
1945 for (i = 0; i < hdev->maxcollection; i++) {
1946 struct hid_collection *col = &hdev->collection[i];
1947 if (col->type == HID_COLLECTION_APPLICATION && (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
1948 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
1949 type = types[col->usage & 0xffff];
1950 break;
1951 }
1952 }
1953
1954 switch (hdev->bus) {
1955 case BUS_USB:
1956 bus = "USB";
1957 break;
1958 case BUS_BLUETOOTH:
1959 bus = "BLUETOOTH";
1960 break;
1961 case BUS_I2C:
1962 bus = "I2C";
1963 break;
1964 case BUS_VIRTUAL:
1965 bus = "VIRTUAL";
1966 break;
1967 default:
1968 bus = "<UNKNOWN>";
1969 }
1970
1971 ret = device_create_file(&hdev->dev, &dev_attr_country);
1972 return 0;
1973 }
1974 EXPORT_SYMBOL_GPL(hid_connect);
1975
hid_disconnect(struct hid_device * hdev)1976 void hid_disconnect(struct hid_device *hdev)
1977 {
1978 device_remove_file(&hdev->dev, &dev_attr_country);
1979 if (hdev->claimed & HID_CLAIMED_INPUT) {
1980 hidinput_disconnect(hdev);
1981 }
1982 if (hdev->claimed & HID_CLAIMED_HIDDEV) {
1983 hdev->hiddev_disconnect(hdev);
1984 }
1985 if (hdev->claimed & HID_CLAIMED_HIDRAW) {
1986 hidraw_disconnect(hdev);
1987 }
1988 hdev->claimed = 0;
1989 }
1990 EXPORT_SYMBOL_GPL(hid_disconnect);
1991
1992 /**
1993 * hid_hw_start - start underlying HW
1994 * @hdev: hid device
1995 * @connect_mask: which outputs to connect, see HID_CONNECT_*
1996 *
1997 * Call this in probe function *after* hid_parse. This will setup HW
1998 * buffers and start the device (if not defeirred to device open).
1999 * hid_hw_stop must be called if this was successful.
2000 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2001 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2002 {
2003 int error;
2004
2005 error = hdev->ll_driver->start(hdev);
2006 if (error) {
2007 return error;
2008 }
2009
2010 if (connect_mask) {
2011 error = hid_connect(hdev, connect_mask);
2012 if (error) {
2013 hdev->ll_driver->stop(hdev);
2014 return error;
2015 }
2016 }
2017
2018 return 0;
2019 }
2020 EXPORT_SYMBOL_GPL(hid_hw_start);
2021
2022 /**
2023 * hid_hw_stop - stop underlying HW
2024 * @hdev: hid device
2025 *
2026 * This is usually called from remove function or from probe when something
2027 * failed and hid_hw_start was called already.
2028 */
hid_hw_stop(struct hid_device * hdev)2029 void hid_hw_stop(struct hid_device *hdev)
2030 {
2031 hid_disconnect(hdev);
2032 hdev->ll_driver->stop(hdev);
2033 }
2034 EXPORT_SYMBOL_GPL(hid_hw_stop);
2035
2036 /**
2037 * hid_hw_open - signal underlying HW to start delivering events
2038 * @hdev: hid device
2039 *
2040 * Tell underlying HW to start delivering events from the device.
2041 * This function should be called sometime after successful call
2042 * to hid_hw_start().
2043 */
hid_hw_open(struct hid_device * hdev)2044 int hid_hw_open(struct hid_device *hdev)
2045 {
2046 int ret;
2047
2048 ret = mutex_lock_killable(&hdev->ll_open_lock);
2049 if (ret) {
2050 return ret;
2051 }
2052
2053 if (!hdev->ll_open_count++) {
2054 ret = hdev->ll_driver->open(hdev);
2055 if (ret) {
2056 hdev->ll_open_count--;
2057 }
2058 }
2059
2060 mutex_unlock(&hdev->ll_open_lock);
2061 return ret;
2062 }
2063 EXPORT_SYMBOL_GPL(hid_hw_open);
2064
2065 /**
2066 * hid_hw_close - signal underlaying HW to stop delivering events
2067 *
2068 * @hdev: hid device
2069 *
2070 * This function indicates that we are not interested in the events
2071 * from this device anymore. Delivery of events may or may not stop,
2072 * depending on the number of users still outstanding.
2073 */
hid_hw_close(struct hid_device * hdev)2074 void hid_hw_close(struct hid_device *hdev)
2075 {
2076 mutex_lock(&hdev->ll_open_lock);
2077 if (!--hdev->ll_open_count) {
2078 hdev->ll_driver->close(hdev);
2079 }
2080 mutex_unlock(&hdev->ll_open_lock);
2081 }
2082 EXPORT_SYMBOL_GPL(hid_hw_close);
2083
2084 struct hid_dynid {
2085 struct list_head list;
2086 struct hid_device_id id;
2087 };
2088
2089 /**
2090 * store_new_id - add a new HID device ID to this driver and re-probe devices
2091 * @drv: target device driver
2092 * @buf: buffer for scanning device ID data
2093 * @count: input size
2094 *
2095 * Adds a new dynamic hid device ID to this driver,
2096 * and causes the driver to probe for all devices again.
2097 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2098 static ssize_t new_id_store(struct device_driver *drv, const char *buf, size_t count)
2099 {
2100 struct hid_driver *hdrv = to_hid_driver(drv);
2101 struct hid_dynid *dynid;
2102 u32 bus, vendor, product;
2103 unsigned long driver_data = 0;
2104 int ret;
2105
2106 ret = sscanf(buf, "%x %x %x %lx", &bus, &vendor, &product, &driver_data);
2107 if (ret < 0x3) {
2108 return -EINVAL;
2109 }
2110
2111 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2112 if (!dynid) {
2113 return -ENOMEM;
2114 }
2115
2116 dynid->id.bus = bus;
2117 dynid->id.group = HID_GROUP_ANY;
2118 dynid->id.vendor = vendor;
2119 dynid->id.product = product;
2120 dynid->id.driver_data = driver_data;
2121
2122 spin_lock(&hdrv->dyn_lock);
2123 list_add_tail(&dynid->list, &hdrv->dyn_list);
2124 spin_unlock(&hdrv->dyn_lock);
2125
2126 ret = driver_attach(&hdrv->driver);
2127
2128 return ret ?: count;
2129 }
2130 static DRIVER_ATTR_WO(new_id);
2131
2132 static struct attribute *hid_drv_attrs[] = {
2133 &driver_attr_new_id.attr,
2134 NULL,
2135 };
2136 ATTRIBUTE_GROUPS(hid_drv);
2137
hid_free_dynids(struct hid_driver * hdrv)2138 static void hid_free_dynids(struct hid_driver *hdrv)
2139 {
2140 struct hid_dynid *dynid, *n;
2141
2142 spin_lock(&hdrv->dyn_lock);
2143 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list)
2144 {
2145 list_del(&dynid->list);
2146 kfree(dynid);
2147 }
2148 spin_unlock(&hdrv->dyn_lock);
2149 }
2150
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2151 const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv)
2152 {
2153 struct hid_dynid *dynid;
2154
2155 spin_lock(&hdrv->dyn_lock);
2156 list_for_each_entry(dynid, &hdrv->dyn_list, list)
2157 {
2158 if (hid_match_one_id(hdev, &dynid->id)) {
2159 spin_unlock(&hdrv->dyn_lock);
2160 return &dynid->id;
2161 }
2162 }
2163 spin_unlock(&hdrv->dyn_lock);
2164
2165 return hid_match_id(hdev, hdrv->id_table);
2166 }
2167 EXPORT_SYMBOL_GPL(hid_match_device);
2168
hid_bus_match(struct device * dev,struct device_driver * drv)2169 static int hid_bus_match(struct device *dev, struct device_driver *drv)
2170 {
2171 struct hid_driver *hdrv = to_hid_driver(drv);
2172 struct hid_device *hdev = to_hid_device(dev);
2173
2174 return hid_match_device(hdev, hdrv) != NULL;
2175 }
2176
2177 /**
2178 * hid_compare_device_paths - check if both devices share the same path
2179 * @hdev_a: hid device
2180 * @hdev_b: hid device
2181 * @separator: char to use as separator
2182 *
2183 * Check if two devices share the same path up to the last occurrence of
2184 * the separator char. Both paths must exist (i.e., zero-length paths
2185 * don't match).
2186 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2187 bool hid_compare_device_paths(struct hid_device *hdev_a, struct hid_device *hdev_b, char separator)
2188 {
2189 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2190 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2191 if (n1 != n2 || n1 <= 0 || n2 <= 0) {
2192 return false;
2193 }
2194 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2195 }
2196 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2197
hid_device_probe(struct device * dev)2198 static int hid_device_probe(struct device *dev)
2199 {
2200 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2201 struct hid_device *hdev = to_hid_device(dev);
2202 const struct hid_device_id *id;
2203 int ret = 0;
2204
2205 if (down_interruptible(&hdev->driver_input_lock)) {
2206 ret = -EINTR;
2207 goto end;
2208 }
2209 hdev->io_started = false;
2210
2211 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2212
2213 if (!hdev->driver) {
2214 id = hid_match_device(hdev, hdrv);
2215 if (id == NULL) {
2216 ret = -ENODEV;
2217 goto unlock;
2218 }
2219
2220 if (hdrv->match) {
2221 if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
2222 ret = -ENODEV;
2223 goto unlock;
2224 }
2225 } else {
2226 /*
2227 * hid-generic implements .match(), so if
2228 * hid_ignore_special_drivers is set, we can safely
2229 * return.
2230 */
2231 if (hid_ignore_special_drivers) {
2232 ret = -ENODEV;
2233 goto unlock;
2234 }
2235 }
2236
2237 /* reset the quirks that has been previously set */
2238 hdev->quirks = hid_lookup_quirk(hdev);
2239 hdev->driver = hdrv;
2240 if (hdrv->probe) {
2241 ret = hdrv->probe(hdev, id);
2242 } else { /* default probe */
2243 ret = hid_open_report(hdev);
2244 if (!ret) {
2245 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2246 }
2247 }
2248 if (ret) {
2249 hid_close_report(hdev);
2250 hdev->driver = NULL;
2251 }
2252 }
2253 unlock:
2254 if (!hdev->io_started) {
2255 up(&hdev->driver_input_lock);
2256 }
2257 end:
2258 return ret;
2259 }
2260
hid_device_remove(struct device * dev)2261 static int hid_device_remove(struct device *dev)
2262 {
2263 struct hid_device *hdev = to_hid_device(dev);
2264 struct hid_driver *hdrv;
2265
2266 down(&hdev->driver_input_lock);
2267 hdev->io_started = false;
2268
2269 hdrv = hdev->driver;
2270 if (hdrv) {
2271 if (hdrv->remove) {
2272 hdrv->remove(hdev);
2273 } else { /* default remove */
2274 hid_hw_stop(hdev);
2275 }
2276 hid_close_report(hdev);
2277 hdev->driver = NULL;
2278 }
2279
2280 if (!hdev->io_started) {
2281 up(&hdev->driver_input_lock);
2282 }
2283
2284 return 0;
2285 }
2286
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2287 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf)
2288 {
2289 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2290
2291 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", hdev->bus, hdev->group, hdev->vendor, hdev->product);
2292 }
2293 static DEVICE_ATTR_RO(modalias);
2294
2295 static struct attribute *hid_dev_attrs[] = {
2296 &dev_attr_modalias.attr,
2297 NULL,
2298 };
2299 static struct bin_attribute *hid_dev_bin_attrs[] = {&dev_bin_attr_report_desc, NULL};
2300 static const struct attribute_group hid_dev_group = {
2301 .attrs = hid_dev_attrs,
2302 .bin_attrs = hid_dev_bin_attrs,
2303 };
2304 __ATTRIBUTE_GROUPS(hid_dev);
2305
hid_uevent(struct device * dev,struct kobj_uevent_env * env)2306 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
2307 {
2308 struct hid_device *hdev = to_hid_device(dev);
2309
2310 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", hdev->bus, hdev->vendor, hdev->product)) {
2311 return -ENOMEM;
2312 }
2313
2314 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) {
2315 return -ENOMEM;
2316 }
2317
2318 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) {
2319 return -ENOMEM;
2320 }
2321
2322 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) {
2323 return -ENOMEM;
2324 }
2325
2326 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", hdev->bus, hdev->group, hdev->vendor, hdev->product)) {
2327 return -ENOMEM;
2328 }
2329
2330 return 0;
2331 }
2332
2333 struct bus_type hid_bus_type = {
2334 .name = "hid",
2335 .dev_groups = hid_dev_groups,
2336 .drv_groups = hid_drv_groups,
2337 .match = hid_bus_match,
2338 .probe = hid_device_probe,
2339 .remove = hid_device_remove,
2340 .uevent = hid_uevent,
2341 };
2342 EXPORT_SYMBOL(hid_bus_type);
2343
hid_add_device(struct hid_device * hdev)2344 int hid_add_device(struct hid_device *hdev)
2345 {
2346 static atomic_t id = ATOMIC_INIT(0);
2347 int ret;
2348
2349 if (WARN_ON(hdev->status & HID_STAT_ADDED)) {
2350 return -EBUSY;
2351 }
2352
2353 hdev->quirks = hid_lookup_quirk(hdev);
2354
2355 /* we need to kill them here, otherwise they will stay allocated to
2356 * wait for coming driver */
2357 if (hid_ignore(hdev)) {
2358 return -ENODEV;
2359 }
2360
2361 /*
2362 * Check for the mandatory transport channel.
2363 */
2364 if (!hdev->ll_driver->raw_request) {
2365 return -EINVAL;
2366 }
2367
2368 /*
2369 * Read the device report descriptor once and use as template
2370 * for the driver-specific modifications.
2371 */
2372 ret = hdev->ll_driver->parse(hdev);
2373 if (ret) {
2374 return ret;
2375 }
2376 if (!hdev->dev_rdesc) {
2377 return -ENODEV;
2378 }
2379
2380 /*
2381 * Scan generic devices for group information
2382 */
2383 if (hid_ignore_special_drivers) {
2384 hdev->group = HID_GROUP_GENERIC;
2385 } else if (!hdev->group && !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2386 ret = hid_scan_report(hdev);
2387 }
2388
2389 /* XXX hack, any other cleaner solution after the driver core
2390 * is converted to allow more than 20 bytes as the device name? */
2391 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, hdev->vendor, hdev->product, atomic_inc_return(&id));
2392
2393 hid_debug_register(hdev, dev_name(&hdev->dev));
2394 ret = device_add(&hdev->dev);
2395 if (!ret) {
2396 hdev->status |= HID_STAT_ADDED;
2397 } else {
2398 hid_debug_unregister(hdev);
2399 }
2400
2401 return ret;
2402 }
2403 EXPORT_SYMBOL_GPL(hid_add_device);
2404
2405 /**
2406 * hid_allocate_device - allocate new hid device descriptor
2407 *
2408 * Allocate and initialize hid device, so that hid_destroy_device might be
2409 * used to free it.
2410 *
2411 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2412 * error value.
2413 */
hid_allocate_device(void)2414 struct hid_device *hid_allocate_device(void)
2415 {
2416 struct hid_device *hdev;
2417 int ret = -ENOMEM;
2418
2419 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2420 if (hdev == NULL) {
2421 return ERR_PTR(ret);
2422 }
2423
2424 device_initialize(&hdev->dev);
2425 hdev->dev.release = hid_device_release;
2426 hdev->dev.bus = &hid_bus_type;
2427 device_enable_async_suspend(&hdev->dev);
2428
2429 hid_close_report(hdev);
2430
2431 init_waitqueue_head(&hdev->debug_wait);
2432 INIT_LIST_HEAD(&hdev->debug_list);
2433 spin_lock_init(&hdev->debug_list_lock);
2434 sema_init(&hdev->driver_input_lock, 1);
2435 mutex_init(&hdev->ll_open_lock);
2436
2437 return hdev;
2438 }
2439 EXPORT_SYMBOL_GPL(hid_allocate_device);
2440
hid_remove_device(struct hid_device * hdev)2441 static void hid_remove_device(struct hid_device *hdev)
2442 {
2443 if (hdev->status & HID_STAT_ADDED) {
2444 device_del(&hdev->dev);
2445 hid_debug_unregister(hdev);
2446 hdev->status &= ~HID_STAT_ADDED;
2447 }
2448 kfree(hdev->dev_rdesc);
2449 hdev->dev_rdesc = NULL;
2450 hdev->dev_rsize = 0;
2451 }
2452
2453 /**
2454 * hid_destroy_device - free previously allocated device
2455 *
2456 * @hdev: hid device
2457 *
2458 * If you allocate hid_device through hid_allocate_device, you should ever
2459 * free by this function.
2460 */
hid_destroy_device(struct hid_device * hdev)2461 void hid_destroy_device(struct hid_device *hdev)
2462 {
2463 hid_remove_device(hdev);
2464 put_device(&hdev->dev);
2465 }
2466 EXPORT_SYMBOL_GPL(hid_destroy_device);
2467
hid_bus_reprobe_drivers(struct device * dev,void * data)2468 static int hid_bus_reprobe_drivers(struct device *dev, void *data)
2469 {
2470 struct hid_driver *hdrv = data;
2471 struct hid_device *hdev = to_hid_device(dev);
2472
2473 if (hdev->driver == hdrv && !hdrv->match(hdev, hid_ignore_special_drivers) &&
2474 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) {
2475 return device_reprobe(dev);
2476 }
2477
2478 return 0;
2479 }
2480
hid_bus_driver_added(struct device_driver * drv,void * data)2481 static int hid_bus_driver_added(struct device_driver *drv, void *data)
2482 {
2483 struct hid_driver *hdrv = to_hid_driver(drv);
2484
2485 if (hdrv->match) {
2486 bus_for_each_dev(&hid_bus_type, NULL, hdrv, hid_bus_reprobe_drivers);
2487 }
2488
2489 return 0;
2490 }
2491
bus_removed_driver(struct device_driver * drv,void * data)2492 static int bus_removed_driver(struct device_driver *drv, void *data)
2493 {
2494 return bus_rescan_devices(&hid_bus_type);
2495 }
2496
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)2497 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, const char *mod_name)
2498 {
2499 int ret;
2500 hdrv->driver.name = hdrv->name;
2501 hdrv->driver.bus = &hid_bus_type;
2502 hdrv->driver.owner = owner;
2503 hdrv->driver.mod_name = mod_name;
2504 INIT_LIST_HEAD(&hdrv->dyn_list);
2505 spin_lock_init(&hdrv->dyn_lock);
2506 ret = driver_register(&hdrv->driver);
2507 if (ret == 0) {
2508 bus_for_each_drv(&hid_bus_type, NULL, NULL, hid_bus_driver_added);
2509 }
2510 return ret;
2511 }
2512 EXPORT_SYMBOL_GPL(__hid_register_driver);
2513
hid_unregister_driver(struct hid_driver * hdrv)2514 void hid_unregister_driver(struct hid_driver *hdrv)
2515 {
2516 driver_unregister(&hdrv->driver);
2517 hid_free_dynids(hdrv);
2518
2519 bus_for_each_drv(&hid_bus_type, NULL, hdrv, bus_removed_driver);
2520 }
2521 EXPORT_SYMBOL_GPL(hid_unregister_driver);
2522
hid_check_keys_pressed(struct hid_device * hid)2523 int hid_check_keys_pressed(struct hid_device *hid)
2524 {
2525 struct hid_input *hidinput;
2526 int i;
2527
2528 if (!(hid->claimed & HID_CLAIMED_INPUT)) {
2529 return 0;
2530 }
2531
2532 list_for_each_entry(hidinput, &hid->inputs, list)
2533 {
2534 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) {
2535 if (hidinput->input->key[i]) {
2536 return 1;
2537 }
2538 }
2539 }
2540
2541 return 0;
2542 }
2543
2544 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
2545
hid_init(void)2546 static int __init hid_init(void)
2547 {
2548 int ret;
2549
2550 if (hid_debug) {
2551 pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
2552 "debugfs is now used for inspecting the device (report descriptor, reports)\n");
2553 }
2554
2555 ret = bus_register(&hid_bus_type);
2556 if (ret) {
2557 pr_err("can't register hid bus\n");
2558 goto err;
2559 }
2560
2561 ret = hidraw_init();
2562 if (ret) {
2563 goto err_bus;
2564 }
2565
2566 hid_debug_init();
2567
2568 return 0;
2569 err_bus:
2570 bus_unregister(&hid_bus_type);
2571 err:
2572 return ret;
2573 }
2574
hid_exit(void)2575 static void __exit hid_exit(void)
2576 {
2577 hid_debug_exit();
2578 hidraw_exit();
2579 bus_unregister(&hid_bus_type);
2580 hid_quirks_exit(HID_BUS_ANY);
2581 }
2582
2583 module_init(hid_init);
2584 module_exit(hid_exit);
2585
2586 MODULE_AUTHOR("Andreas Gal");
2587 MODULE_AUTHOR("Vojtech Pavlik");
2588 MODULE_AUTHOR("Jiri Kosina");
2589 MODULE_LICENSE("GPL");
2590