• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  HID support for Linux
4  *
5  *  Copyright (c) 1999 Andreas Gal
6  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8  *  Copyright (c) 2006-2012 Jiri Kosina
9  */
10 
11 /*
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <asm/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35 
36 #include "hid-ids.h"
37 
38 /*
39  * Version Information
40  */
41 
42 #define DRIVER_DESC "HID core driver"
43 
44 int hid_debug = 0;
45 module_param_named(debug, hid_debug, int, 0600);
46 MODULE_PARM_DESC(debug, "toggle HID debugging messages");
47 EXPORT_SYMBOL_GPL(hid_debug);
48 
49 static int hid_ignore_special_drivers = 0;
50 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
51 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
52 
53 /*
54  * Register a new report for a device.
55  */
56 
hid_register_report(struct hid_device * device,unsigned int type,unsigned int id,unsigned int application)57 struct hid_report *hid_register_report(struct hid_device *device,
58 				       unsigned int type, unsigned int id,
59 				       unsigned int application)
60 {
61 	struct hid_report_enum *report_enum = device->report_enum + type;
62 	struct hid_report *report;
63 
64 	if (id >= HID_MAX_IDS)
65 		return NULL;
66 	if (report_enum->report_id_hash[id])
67 		return report_enum->report_id_hash[id];
68 
69 	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
70 	if (!report)
71 		return NULL;
72 
73 	if (id != 0)
74 		report_enum->numbered = 1;
75 
76 	report->id = id;
77 	report->type = type;
78 	report->size = 0;
79 	report->device = device;
80 	report->application = application;
81 	report_enum->report_id_hash[id] = report;
82 
83 	list_add_tail(&report->list, &report_enum->report_list);
84 
85 	return report;
86 }
87 EXPORT_SYMBOL_GPL(hid_register_report);
88 
89 /*
90  * Register a new field for this report.
91  */
92 
hid_register_field(struct hid_report * report,unsigned usages)93 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
94 {
95 	struct hid_field *field;
96 
97 	if (report->maxfield == HID_MAX_FIELDS) {
98 		hid_err(report->device, "too many fields in report\n");
99 		return NULL;
100 	}
101 
102 	field = kzalloc((sizeof(struct hid_field) +
103 			 usages * sizeof(struct hid_usage) +
104 			 usages * sizeof(unsigned)), GFP_KERNEL);
105 	if (!field)
106 		return NULL;
107 
108 	field->index = report->maxfield++;
109 	report->field[field->index] = field;
110 	field->usage = (struct hid_usage *)(field + 1);
111 	field->value = (s32 *)(field->usage + usages);
112 	field->report = report;
113 
114 	return field;
115 }
116 
117 /*
118  * Open a collection. The type/usage is pushed on the stack.
119  */
120 
open_collection(struct hid_parser * parser,unsigned type)121 static int open_collection(struct hid_parser *parser, unsigned type)
122 {
123 	struct hid_collection *collection;
124 	unsigned usage;
125 	int collection_index;
126 
127 	usage = parser->local.usage[0];
128 
129 	if (parser->collection_stack_ptr == parser->collection_stack_size) {
130 		unsigned int *collection_stack;
131 		unsigned int new_size = parser->collection_stack_size +
132 					HID_COLLECTION_STACK_SIZE;
133 
134 		collection_stack = krealloc(parser->collection_stack,
135 					    new_size * sizeof(unsigned int),
136 					    GFP_KERNEL);
137 		if (!collection_stack)
138 			return -ENOMEM;
139 
140 		parser->collection_stack = collection_stack;
141 		parser->collection_stack_size = new_size;
142 	}
143 
144 	if (parser->device->maxcollection == parser->device->collection_size) {
145 		collection = kmalloc(
146 				array3_size(sizeof(struct hid_collection),
147 					    parser->device->collection_size,
148 					    2),
149 				GFP_KERNEL);
150 		if (collection == NULL) {
151 			hid_err(parser->device, "failed to reallocate collection array\n");
152 			return -ENOMEM;
153 		}
154 		memcpy(collection, parser->device->collection,
155 			sizeof(struct hid_collection) *
156 			parser->device->collection_size);
157 		memset(collection + parser->device->collection_size, 0,
158 			sizeof(struct hid_collection) *
159 			parser->device->collection_size);
160 		kfree(parser->device->collection);
161 		parser->device->collection = collection;
162 		parser->device->collection_size *= 2;
163 	}
164 
165 	parser->collection_stack[parser->collection_stack_ptr++] =
166 		parser->device->maxcollection;
167 
168 	collection_index = parser->device->maxcollection++;
169 	collection = parser->device->collection + collection_index;
170 	collection->type = type;
171 	collection->usage = usage;
172 	collection->level = parser->collection_stack_ptr - 1;
173 	collection->parent_idx = (collection->level == 0) ? -1 :
174 		parser->collection_stack[collection->level - 1];
175 
176 	if (type == HID_COLLECTION_APPLICATION)
177 		parser->device->maxapplication++;
178 
179 	return 0;
180 }
181 
182 /*
183  * Close a collection.
184  */
185 
close_collection(struct hid_parser * parser)186 static int close_collection(struct hid_parser *parser)
187 {
188 	if (!parser->collection_stack_ptr) {
189 		hid_err(parser->device, "collection stack underflow\n");
190 		return -EINVAL;
191 	}
192 	parser->collection_stack_ptr--;
193 	return 0;
194 }
195 
196 /*
197  * Climb up the stack, search for the specified collection type
198  * and return the usage.
199  */
200 
hid_lookup_collection(struct hid_parser * parser,unsigned type)201 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
202 {
203 	struct hid_collection *collection = parser->device->collection;
204 	int n;
205 
206 	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
207 		unsigned index = parser->collection_stack[n];
208 		if (collection[index].type == type)
209 			return collection[index].usage;
210 	}
211 	return 0; /* we know nothing about this usage type */
212 }
213 
214 /*
215  * Concatenate usage which defines 16 bits or less with the
216  * currently defined usage page to form a 32 bit usage
217  */
218 
complete_usage(struct hid_parser * parser,unsigned int index)219 static void complete_usage(struct hid_parser *parser, unsigned int index)
220 {
221 	parser->local.usage[index] &= 0xFFFF;
222 	parser->local.usage[index] |=
223 		(parser->global.usage_page & 0xFFFF) << 16;
224 }
225 
226 /*
227  * Add a usage to the temporary parser table.
228  */
229 
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)230 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
231 {
232 	if (parser->local.usage_index >= HID_MAX_USAGES) {
233 		hid_err(parser->device, "usage index exceeded\n");
234 		return -1;
235 	}
236 	parser->local.usage[parser->local.usage_index] = usage;
237 
238 	/*
239 	 * If Usage item only includes usage id, concatenate it with
240 	 * currently defined usage page
241 	 */
242 	if (size <= 2)
243 		complete_usage(parser, parser->local.usage_index);
244 
245 	parser->local.usage_size[parser->local.usage_index] = size;
246 	parser->local.collection_index[parser->local.usage_index] =
247 		parser->collection_stack_ptr ?
248 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
249 	parser->local.usage_index++;
250 	return 0;
251 }
252 
253 /*
254  * Register a new field for this report.
255  */
256 
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)257 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
258 {
259 	struct hid_report *report;
260 	struct hid_field *field;
261 	unsigned int usages;
262 	unsigned int offset;
263 	unsigned int i;
264 	unsigned int application;
265 
266 	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
267 
268 	report = hid_register_report(parser->device, report_type,
269 				     parser->global.report_id, application);
270 	if (!report) {
271 		hid_err(parser->device, "hid_register_report failed\n");
272 		return -1;
273 	}
274 
275 	/* Handle both signed and unsigned cases properly */
276 	if ((parser->global.logical_minimum < 0 &&
277 		parser->global.logical_maximum <
278 		parser->global.logical_minimum) ||
279 		(parser->global.logical_minimum >= 0 &&
280 		(__u32)parser->global.logical_maximum <
281 		(__u32)parser->global.logical_minimum)) {
282 		dbg_hid("logical range invalid 0x%x 0x%x\n",
283 			parser->global.logical_minimum,
284 			parser->global.logical_maximum);
285 		return -1;
286 	}
287 
288 	offset = report->size;
289 	report->size += parser->global.report_size * parser->global.report_count;
290 
291 	/* Total size check: Allow for possible report index byte */
292 	if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
293 		hid_err(parser->device, "report is too long\n");
294 		return -1;
295 	}
296 
297 	if (!parser->local.usage_index) /* Ignore padding fields */
298 		return 0;
299 
300 	usages = max_t(unsigned, parser->local.usage_index,
301 				 parser->global.report_count);
302 
303 	field = hid_register_field(report, usages);
304 	if (!field)
305 		return 0;
306 
307 	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
308 	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
309 	field->application = application;
310 
311 	for (i = 0; i < usages; i++) {
312 		unsigned j = i;
313 		/* Duplicate the last usage we parsed if we have excess values */
314 		if (i >= parser->local.usage_index)
315 			j = parser->local.usage_index - 1;
316 		field->usage[i].hid = parser->local.usage[j];
317 		field->usage[i].collection_index =
318 			parser->local.collection_index[j];
319 		field->usage[i].usage_index = i;
320 		field->usage[i].resolution_multiplier = 1;
321 	}
322 
323 	field->maxusage = usages;
324 	field->flags = flags;
325 	field->report_offset = offset;
326 	field->report_type = report_type;
327 	field->report_size = parser->global.report_size;
328 	field->report_count = parser->global.report_count;
329 	field->logical_minimum = parser->global.logical_minimum;
330 	field->logical_maximum = parser->global.logical_maximum;
331 	field->physical_minimum = parser->global.physical_minimum;
332 	field->physical_maximum = parser->global.physical_maximum;
333 	field->unit_exponent = parser->global.unit_exponent;
334 	field->unit = parser->global.unit;
335 
336 	return 0;
337 }
338 
339 /*
340  * Read data value from item.
341  */
342 
item_udata(struct hid_item * item)343 static u32 item_udata(struct hid_item *item)
344 {
345 	switch (item->size) {
346 	case 1: return item->data.u8;
347 	case 2: return item->data.u16;
348 	case 4: return item->data.u32;
349 	}
350 	return 0;
351 }
352 
item_sdata(struct hid_item * item)353 static s32 item_sdata(struct hid_item *item)
354 {
355 	switch (item->size) {
356 	case 1: return item->data.s8;
357 	case 2: return item->data.s16;
358 	case 4: return item->data.s32;
359 	}
360 	return 0;
361 }
362 
363 /*
364  * Process a global item.
365  */
366 
hid_parser_global(struct hid_parser * parser,struct hid_item * item)367 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
368 {
369 	__s32 raw_value;
370 	switch (item->tag) {
371 	case HID_GLOBAL_ITEM_TAG_PUSH:
372 
373 		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
374 			hid_err(parser->device, "global environment stack overflow\n");
375 			return -1;
376 		}
377 
378 		memcpy(parser->global_stack + parser->global_stack_ptr++,
379 			&parser->global, sizeof(struct hid_global));
380 		return 0;
381 
382 	case HID_GLOBAL_ITEM_TAG_POP:
383 
384 		if (!parser->global_stack_ptr) {
385 			hid_err(parser->device, "global environment stack underflow\n");
386 			return -1;
387 		}
388 
389 		memcpy(&parser->global, parser->global_stack +
390 			--parser->global_stack_ptr, sizeof(struct hid_global));
391 		return 0;
392 
393 	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
394 		parser->global.usage_page = item_udata(item);
395 		return 0;
396 
397 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
398 		parser->global.logical_minimum = item_sdata(item);
399 		return 0;
400 
401 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
402 		if (parser->global.logical_minimum < 0)
403 			parser->global.logical_maximum = item_sdata(item);
404 		else
405 			parser->global.logical_maximum = item_udata(item);
406 		return 0;
407 
408 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
409 		parser->global.physical_minimum = item_sdata(item);
410 		return 0;
411 
412 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
413 		if (parser->global.physical_minimum < 0)
414 			parser->global.physical_maximum = item_sdata(item);
415 		else
416 			parser->global.physical_maximum = item_udata(item);
417 		return 0;
418 
419 	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
420 		/* Many devices provide unit exponent as a two's complement
421 		 * nibble due to the common misunderstanding of HID
422 		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
423 		 * both this and the standard encoding. */
424 		raw_value = item_sdata(item);
425 		if (!(raw_value & 0xfffffff0))
426 			parser->global.unit_exponent = hid_snto32(raw_value, 4);
427 		else
428 			parser->global.unit_exponent = raw_value;
429 		return 0;
430 
431 	case HID_GLOBAL_ITEM_TAG_UNIT:
432 		parser->global.unit = item_udata(item);
433 		return 0;
434 
435 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
436 		parser->global.report_size = item_udata(item);
437 		if (parser->global.report_size > 256) {
438 			hid_err(parser->device, "invalid report_size %d\n",
439 					parser->global.report_size);
440 			return -1;
441 		}
442 		return 0;
443 
444 	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
445 		parser->global.report_count = item_udata(item);
446 		if (parser->global.report_count > HID_MAX_USAGES) {
447 			hid_err(parser->device, "invalid report_count %d\n",
448 					parser->global.report_count);
449 			return -1;
450 		}
451 		return 0;
452 
453 	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
454 		parser->global.report_id = item_udata(item);
455 		if (parser->global.report_id == 0 ||
456 		    parser->global.report_id >= HID_MAX_IDS) {
457 			hid_err(parser->device, "report_id %u is invalid\n",
458 				parser->global.report_id);
459 			return -1;
460 		}
461 		return 0;
462 
463 	default:
464 		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
465 		return -1;
466 	}
467 }
468 
469 /*
470  * Process a local item.
471  */
472 
hid_parser_local(struct hid_parser * parser,struct hid_item * item)473 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
474 {
475 	__u32 data;
476 	unsigned n;
477 	__u32 count;
478 
479 	data = item_udata(item);
480 
481 	switch (item->tag) {
482 	case HID_LOCAL_ITEM_TAG_DELIMITER:
483 
484 		if (data) {
485 			/*
486 			 * We treat items before the first delimiter
487 			 * as global to all usage sets (branch 0).
488 			 * In the moment we process only these global
489 			 * items and the first delimiter set.
490 			 */
491 			if (parser->local.delimiter_depth != 0) {
492 				hid_err(parser->device, "nested delimiters\n");
493 				return -1;
494 			}
495 			parser->local.delimiter_depth++;
496 			parser->local.delimiter_branch++;
497 		} else {
498 			if (parser->local.delimiter_depth < 1) {
499 				hid_err(parser->device, "bogus close delimiter\n");
500 				return -1;
501 			}
502 			parser->local.delimiter_depth--;
503 		}
504 		return 0;
505 
506 	case HID_LOCAL_ITEM_TAG_USAGE:
507 
508 		if (parser->local.delimiter_branch > 1) {
509 			dbg_hid("alternative usage ignored\n");
510 			return 0;
511 		}
512 
513 		return hid_add_usage(parser, data, item->size);
514 
515 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
516 
517 		if (parser->local.delimiter_branch > 1) {
518 			dbg_hid("alternative usage ignored\n");
519 			return 0;
520 		}
521 
522 		parser->local.usage_minimum = data;
523 		return 0;
524 
525 	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
526 
527 		if (parser->local.delimiter_branch > 1) {
528 			dbg_hid("alternative usage ignored\n");
529 			return 0;
530 		}
531 
532 		count = data - parser->local.usage_minimum;
533 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
534 			/*
535 			 * We do not warn if the name is not set, we are
536 			 * actually pre-scanning the device.
537 			 */
538 			if (dev_name(&parser->device->dev))
539 				hid_warn(parser->device,
540 					 "ignoring exceeding usage max\n");
541 			data = HID_MAX_USAGES - parser->local.usage_index +
542 				parser->local.usage_minimum - 1;
543 			if (data <= 0) {
544 				hid_err(parser->device,
545 					"no more usage index available\n");
546 				return -1;
547 			}
548 		}
549 
550 		for (n = parser->local.usage_minimum; n <= data; n++)
551 			if (hid_add_usage(parser, n, item->size)) {
552 				dbg_hid("hid_add_usage failed\n");
553 				return -1;
554 			}
555 		return 0;
556 
557 	default:
558 
559 		dbg_hid("unknown local item tag 0x%x\n", item->tag);
560 		return 0;
561 	}
562 	return 0;
563 }
564 
565 /*
566  * Concatenate Usage Pages into Usages where relevant:
567  * As per specification, 6.2.2.8: "When the parser encounters a main item it
568  * concatenates the last declared Usage Page with a Usage to form a complete
569  * usage value."
570  */
571 
hid_concatenate_last_usage_page(struct hid_parser * parser)572 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
573 {
574 	int i;
575 	unsigned int usage_page;
576 	unsigned int current_page;
577 
578 	if (!parser->local.usage_index)
579 		return;
580 
581 	usage_page = parser->global.usage_page;
582 
583 	/*
584 	 * Concatenate usage page again only if last declared Usage Page
585 	 * has not been already used in previous usages concatenation
586 	 */
587 	for (i = parser->local.usage_index - 1; i >= 0; i--) {
588 		if (parser->local.usage_size[i] > 2)
589 			/* Ignore extended usages */
590 			continue;
591 
592 		current_page = parser->local.usage[i] >> 16;
593 		if (current_page == usage_page)
594 			break;
595 
596 		complete_usage(parser, i);
597 	}
598 }
599 
600 /*
601  * Process a main item.
602  */
603 
hid_parser_main(struct hid_parser * parser,struct hid_item * item)604 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
605 {
606 	__u32 data;
607 	int ret;
608 
609 	hid_concatenate_last_usage_page(parser);
610 
611 	data = item_udata(item);
612 
613 	switch (item->tag) {
614 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
615 		ret = open_collection(parser, data & 0xff);
616 		break;
617 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
618 		ret = close_collection(parser);
619 		break;
620 	case HID_MAIN_ITEM_TAG_INPUT:
621 		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
622 		break;
623 	case HID_MAIN_ITEM_TAG_OUTPUT:
624 		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
625 		break;
626 	case HID_MAIN_ITEM_TAG_FEATURE:
627 		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
628 		break;
629 	default:
630 		hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
631 		ret = 0;
632 	}
633 
634 	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
635 
636 	return ret;
637 }
638 
639 /*
640  * Process a reserved item.
641  */
642 
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)643 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
644 {
645 	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
646 	return 0;
647 }
648 
649 /*
650  * Free a report and all registered fields. The field->usage and
651  * field->value table's are allocated behind the field, so we need
652  * only to free(field) itself.
653  */
654 
hid_free_report(struct hid_report * report)655 static void hid_free_report(struct hid_report *report)
656 {
657 	unsigned n;
658 
659 	for (n = 0; n < report->maxfield; n++)
660 		kfree(report->field[n]);
661 	kfree(report);
662 }
663 
664 /*
665  * Close report. This function returns the device
666  * state to the point prior to hid_open_report().
667  */
hid_close_report(struct hid_device * device)668 static void hid_close_report(struct hid_device *device)
669 {
670 	unsigned i, j;
671 
672 	for (i = 0; i < HID_REPORT_TYPES; i++) {
673 		struct hid_report_enum *report_enum = device->report_enum + i;
674 
675 		for (j = 0; j < HID_MAX_IDS; j++) {
676 			struct hid_report *report = report_enum->report_id_hash[j];
677 			if (report)
678 				hid_free_report(report);
679 		}
680 		memset(report_enum, 0, sizeof(*report_enum));
681 		INIT_LIST_HEAD(&report_enum->report_list);
682 	}
683 
684 	kfree(device->rdesc);
685 	device->rdesc = NULL;
686 	device->rsize = 0;
687 
688 	kfree(device->collection);
689 	device->collection = NULL;
690 	device->collection_size = 0;
691 	device->maxcollection = 0;
692 	device->maxapplication = 0;
693 
694 	device->status &= ~HID_STAT_PARSED;
695 }
696 
697 /*
698  * Free a device structure, all reports, and all fields.
699  */
700 
hid_device_release(struct device * dev)701 static void hid_device_release(struct device *dev)
702 {
703 	struct hid_device *hid = to_hid_device(dev);
704 
705 	hid_close_report(hid);
706 	kfree(hid->dev_rdesc);
707 	kfree(hid);
708 }
709 
710 /*
711  * Fetch a report description item from the data stream. We support long
712  * items, though they are not used yet.
713  */
714 
fetch_item(__u8 * start,__u8 * end,struct hid_item * item)715 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
716 {
717 	u8 b;
718 
719 	if ((end - start) <= 0)
720 		return NULL;
721 
722 	b = *start++;
723 
724 	item->type = (b >> 2) & 3;
725 	item->tag  = (b >> 4) & 15;
726 
727 	if (item->tag == HID_ITEM_TAG_LONG) {
728 
729 		item->format = HID_ITEM_FORMAT_LONG;
730 
731 		if ((end - start) < 2)
732 			return NULL;
733 
734 		item->size = *start++;
735 		item->tag  = *start++;
736 
737 		if ((end - start) < item->size)
738 			return NULL;
739 
740 		item->data.longdata = start;
741 		start += item->size;
742 		return start;
743 	}
744 
745 	item->format = HID_ITEM_FORMAT_SHORT;
746 	item->size = b & 3;
747 
748 	switch (item->size) {
749 	case 0:
750 		return start;
751 
752 	case 1:
753 		if ((end - start) < 1)
754 			return NULL;
755 		item->data.u8 = *start++;
756 		return start;
757 
758 	case 2:
759 		if ((end - start) < 2)
760 			return NULL;
761 		item->data.u16 = get_unaligned_le16(start);
762 		start = (__u8 *)((__le16 *)start + 1);
763 		return start;
764 
765 	case 3:
766 		item->size++;
767 		if ((end - start) < 4)
768 			return NULL;
769 		item->data.u32 = get_unaligned_le32(start);
770 		start = (__u8 *)((__le32 *)start + 1);
771 		return start;
772 	}
773 
774 	return NULL;
775 }
776 
hid_scan_input_usage(struct hid_parser * parser,u32 usage)777 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
778 {
779 	struct hid_device *hid = parser->device;
780 
781 	if (usage == HID_DG_CONTACTID)
782 		hid->group = HID_GROUP_MULTITOUCH;
783 }
784 
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)785 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
786 {
787 	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
788 	    parser->global.report_size == 8)
789 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
790 
791 	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
792 	    parser->global.report_size == 8)
793 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
794 }
795 
hid_scan_collection(struct hid_parser * parser,unsigned type)796 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
797 {
798 	struct hid_device *hid = parser->device;
799 	int i;
800 
801 	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
802 	    type == HID_COLLECTION_PHYSICAL)
803 		hid->group = HID_GROUP_SENSOR_HUB;
804 
805 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
806 	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
807 	    hid->group == HID_GROUP_MULTITOUCH)
808 		hid->group = HID_GROUP_GENERIC;
809 
810 	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
811 		for (i = 0; i < parser->local.usage_index; i++)
812 			if (parser->local.usage[i] == HID_GD_POINTER)
813 				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
814 
815 	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
816 		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
817 
818 	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
819 		for (i = 0; i < parser->local.usage_index; i++)
820 			if (parser->local.usage[i] ==
821 					(HID_UP_GOOGLEVENDOR | 0x0001))
822 				parser->device->group =
823 					HID_GROUP_VIVALDI;
824 }
825 
hid_scan_main(struct hid_parser * parser,struct hid_item * item)826 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
827 {
828 	__u32 data;
829 	int i;
830 
831 	hid_concatenate_last_usage_page(parser);
832 
833 	data = item_udata(item);
834 
835 	switch (item->tag) {
836 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
837 		hid_scan_collection(parser, data & 0xff);
838 		break;
839 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
840 		break;
841 	case HID_MAIN_ITEM_TAG_INPUT:
842 		/* ignore constant inputs, they will be ignored by hid-input */
843 		if (data & HID_MAIN_ITEM_CONSTANT)
844 			break;
845 		for (i = 0; i < parser->local.usage_index; i++)
846 			hid_scan_input_usage(parser, parser->local.usage[i]);
847 		break;
848 	case HID_MAIN_ITEM_TAG_OUTPUT:
849 		break;
850 	case HID_MAIN_ITEM_TAG_FEATURE:
851 		for (i = 0; i < parser->local.usage_index; i++)
852 			hid_scan_feature_usage(parser, parser->local.usage[i]);
853 		break;
854 	}
855 
856 	/* Reset the local parser environment */
857 	memset(&parser->local, 0, sizeof(parser->local));
858 
859 	return 0;
860 }
861 
862 /*
863  * Scan a report descriptor before the device is added to the bus.
864  * Sets device groups and other properties that determine what driver
865  * to load.
866  */
hid_scan_report(struct hid_device * hid)867 static int hid_scan_report(struct hid_device *hid)
868 {
869 	struct hid_parser *parser;
870 	struct hid_item item;
871 	__u8 *start = hid->dev_rdesc;
872 	__u8 *end = start + hid->dev_rsize;
873 	static int (*dispatch_type[])(struct hid_parser *parser,
874 				      struct hid_item *item) = {
875 		hid_scan_main,
876 		hid_parser_global,
877 		hid_parser_local,
878 		hid_parser_reserved
879 	};
880 
881 	parser = vzalloc(sizeof(struct hid_parser));
882 	if (!parser)
883 		return -ENOMEM;
884 
885 	parser->device = hid;
886 	hid->group = HID_GROUP_GENERIC;
887 
888 	/*
889 	 * The parsing is simpler than the one in hid_open_report() as we should
890 	 * be robust against hid errors. Those errors will be raised by
891 	 * hid_open_report() anyway.
892 	 */
893 	while ((start = fetch_item(start, end, &item)) != NULL)
894 		dispatch_type[item.type](parser, &item);
895 
896 	/*
897 	 * Handle special flags set during scanning.
898 	 */
899 	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
900 	    (hid->group == HID_GROUP_MULTITOUCH))
901 		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
902 
903 	/*
904 	 * Vendor specific handlings
905 	 */
906 	switch (hid->vendor) {
907 	case USB_VENDOR_ID_WACOM:
908 		hid->group = HID_GROUP_WACOM;
909 		break;
910 	case USB_VENDOR_ID_SYNAPTICS:
911 		if (hid->group == HID_GROUP_GENERIC)
912 			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
913 			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
914 				/*
915 				 * hid-rmi should take care of them,
916 				 * not hid-generic
917 				 */
918 				hid->group = HID_GROUP_RMI;
919 		break;
920 	}
921 
922 	kfree(parser->collection_stack);
923 	vfree(parser);
924 	return 0;
925 }
926 
927 /**
928  * hid_parse_report - parse device report
929  *
930  * @hid: hid device
931  * @start: report start
932  * @size: report size
933  *
934  * Allocate the device report as read by the bus driver. This function should
935  * only be called from parse() in ll drivers.
936  */
hid_parse_report(struct hid_device * hid,__u8 * start,unsigned size)937 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
938 {
939 	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
940 	if (!hid->dev_rdesc)
941 		return -ENOMEM;
942 	hid->dev_rsize = size;
943 	return 0;
944 }
945 EXPORT_SYMBOL_GPL(hid_parse_report);
946 
947 static const char * const hid_report_names[] = {
948 	"HID_INPUT_REPORT",
949 	"HID_OUTPUT_REPORT",
950 	"HID_FEATURE_REPORT",
951 };
952 /**
953  * hid_validate_values - validate existing device report's value indexes
954  *
955  * @hid: hid device
956  * @type: which report type to examine
957  * @id: which report ID to examine (0 for first)
958  * @field_index: which report field to examine
959  * @report_counts: expected number of values
960  *
961  * Validate the number of values in a given field of a given report, after
962  * parsing.
963  */
hid_validate_values(struct hid_device * hid,unsigned int type,unsigned int id,unsigned int field_index,unsigned int report_counts)964 struct hid_report *hid_validate_values(struct hid_device *hid,
965 				       unsigned int type, unsigned int id,
966 				       unsigned int field_index,
967 				       unsigned int report_counts)
968 {
969 	struct hid_report *report;
970 
971 	if (type > HID_FEATURE_REPORT) {
972 		hid_err(hid, "invalid HID report type %u\n", type);
973 		return NULL;
974 	}
975 
976 	if (id >= HID_MAX_IDS) {
977 		hid_err(hid, "invalid HID report id %u\n", id);
978 		return NULL;
979 	}
980 
981 	/*
982 	 * Explicitly not using hid_get_report() here since it depends on
983 	 * ->numbered being checked, which may not always be the case when
984 	 * drivers go to access report values.
985 	 */
986 	if (id == 0) {
987 		/*
988 		 * Validating on id 0 means we should examine the first
989 		 * report in the list.
990 		 */
991 		report = list_first_entry_or_null(
992 				&hid->report_enum[type].report_list,
993 				struct hid_report, list);
994 	} else {
995 		report = hid->report_enum[type].report_id_hash[id];
996 	}
997 	if (!report) {
998 		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
999 		return NULL;
1000 	}
1001 	if (report->maxfield <= field_index) {
1002 		hid_err(hid, "not enough fields in %s %u\n",
1003 			hid_report_names[type], id);
1004 		return NULL;
1005 	}
1006 	if (report->field[field_index]->report_count < report_counts) {
1007 		hid_err(hid, "not enough values in %s %u field %u\n",
1008 			hid_report_names[type], id, field_index);
1009 		return NULL;
1010 	}
1011 	return report;
1012 }
1013 EXPORT_SYMBOL_GPL(hid_validate_values);
1014 
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1015 static int hid_calculate_multiplier(struct hid_device *hid,
1016 				     struct hid_field *multiplier)
1017 {
1018 	int m;
1019 	__s32 v = *multiplier->value;
1020 	__s32 lmin = multiplier->logical_minimum;
1021 	__s32 lmax = multiplier->logical_maximum;
1022 	__s32 pmin = multiplier->physical_minimum;
1023 	__s32 pmax = multiplier->physical_maximum;
1024 
1025 	/*
1026 	 * "Because OS implementations will generally divide the control's
1027 	 * reported count by the Effective Resolution Multiplier, designers
1028 	 * should take care not to establish a potential Effective
1029 	 * Resolution Multiplier of zero."
1030 	 * HID Usage Table, v1.12, Section 4.3.1, p31
1031 	 */
1032 	if (lmax - lmin == 0)
1033 		return 1;
1034 	/*
1035 	 * Handling the unit exponent is left as an exercise to whoever
1036 	 * finds a device where that exponent is not 0.
1037 	 */
1038 	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1039 	if (unlikely(multiplier->unit_exponent != 0)) {
1040 		hid_warn(hid,
1041 			 "unsupported Resolution Multiplier unit exponent %d\n",
1042 			 multiplier->unit_exponent);
1043 	}
1044 
1045 	/* There are no devices with an effective multiplier > 255 */
1046 	if (unlikely(m == 0 || m > 255 || m < -255)) {
1047 		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1048 		m = 1;
1049 	}
1050 
1051 	return m;
1052 }
1053 
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1054 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1055 					  struct hid_field *field,
1056 					  struct hid_collection *multiplier_collection,
1057 					  int effective_multiplier)
1058 {
1059 	struct hid_collection *collection;
1060 	struct hid_usage *usage;
1061 	int i;
1062 
1063 	/*
1064 	 * If multiplier_collection is NULL, the multiplier applies
1065 	 * to all fields in the report.
1066 	 * Otherwise, it is the Logical Collection the multiplier applies to
1067 	 * but our field may be in a subcollection of that collection.
1068 	 */
1069 	for (i = 0; i < field->maxusage; i++) {
1070 		usage = &field->usage[i];
1071 
1072 		collection = &hid->collection[usage->collection_index];
1073 		while (collection->parent_idx != -1 &&
1074 		       collection != multiplier_collection)
1075 			collection = &hid->collection[collection->parent_idx];
1076 
1077 		if (collection->parent_idx != -1 ||
1078 		    multiplier_collection == NULL)
1079 			usage->resolution_multiplier = effective_multiplier;
1080 
1081 	}
1082 }
1083 
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1084 static void hid_apply_multiplier(struct hid_device *hid,
1085 				 struct hid_field *multiplier)
1086 {
1087 	struct hid_report_enum *rep_enum;
1088 	struct hid_report *rep;
1089 	struct hid_field *field;
1090 	struct hid_collection *multiplier_collection;
1091 	int effective_multiplier;
1092 	int i;
1093 
1094 	/*
1095 	 * "The Resolution Multiplier control must be contained in the same
1096 	 * Logical Collection as the control(s) to which it is to be applied.
1097 	 * If no Resolution Multiplier is defined, then the Resolution
1098 	 * Multiplier defaults to 1.  If more than one control exists in a
1099 	 * Logical Collection, the Resolution Multiplier is associated with
1100 	 * all controls in the collection. If no Logical Collection is
1101 	 * defined, the Resolution Multiplier is associated with all
1102 	 * controls in the report."
1103 	 * HID Usage Table, v1.12, Section 4.3.1, p30
1104 	 *
1105 	 * Thus, search from the current collection upwards until we find a
1106 	 * logical collection. Then search all fields for that same parent
1107 	 * collection. Those are the fields the multiplier applies to.
1108 	 *
1109 	 * If we have more than one multiplier, it will overwrite the
1110 	 * applicable fields later.
1111 	 */
1112 	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1113 	while (multiplier_collection->parent_idx != -1 &&
1114 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1115 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1116 
1117 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1118 
1119 	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1120 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1121 		for (i = 0; i < rep->maxfield; i++) {
1122 			field = rep->field[i];
1123 			hid_apply_multiplier_to_field(hid, field,
1124 						      multiplier_collection,
1125 						      effective_multiplier);
1126 		}
1127 	}
1128 }
1129 
1130 /*
1131  * hid_setup_resolution_multiplier - set up all resolution multipliers
1132  *
1133  * @device: hid device
1134  *
1135  * Search for all Resolution Multiplier Feature Reports and apply their
1136  * value to all matching Input items. This only updates the internal struct
1137  * fields.
1138  *
1139  * The Resolution Multiplier is applied by the hardware. If the multiplier
1140  * is anything other than 1, the hardware will send pre-multiplied events
1141  * so that the same physical interaction generates an accumulated
1142  *	accumulated_value = value * * multiplier
1143  * This may be achieved by sending
1144  * - "value * multiplier" for each event, or
1145  * - "value" but "multiplier" times as frequently, or
1146  * - a combination of the above
1147  * The only guarantee is that the same physical interaction always generates
1148  * an accumulated 'value * multiplier'.
1149  *
1150  * This function must be called before any event processing and after
1151  * any SetRequest to the Resolution Multiplier.
1152  */
hid_setup_resolution_multiplier(struct hid_device * hid)1153 void hid_setup_resolution_multiplier(struct hid_device *hid)
1154 {
1155 	struct hid_report_enum *rep_enum;
1156 	struct hid_report *rep;
1157 	struct hid_usage *usage;
1158 	int i, j;
1159 
1160 	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1161 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1162 		for (i = 0; i < rep->maxfield; i++) {
1163 			/* Ignore if report count is out of bounds. */
1164 			if (rep->field[i]->report_count < 1)
1165 				continue;
1166 
1167 			for (j = 0; j < rep->field[i]->maxusage; j++) {
1168 				usage = &rep->field[i]->usage[j];
1169 				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1170 					hid_apply_multiplier(hid,
1171 							     rep->field[i]);
1172 			}
1173 		}
1174 	}
1175 }
1176 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1177 
1178 /**
1179  * hid_open_report - open a driver-specific device report
1180  *
1181  * @device: hid device
1182  *
1183  * Parse a report description into a hid_device structure. Reports are
1184  * enumerated, fields are attached to these reports.
1185  * 0 returned on success, otherwise nonzero error value.
1186  *
1187  * This function (or the equivalent hid_parse() macro) should only be
1188  * called from probe() in drivers, before starting the device.
1189  */
hid_open_report(struct hid_device * device)1190 int hid_open_report(struct hid_device *device)
1191 {
1192 	struct hid_parser *parser;
1193 	struct hid_item item;
1194 	unsigned int size;
1195 	__u8 *start;
1196 	__u8 *buf;
1197 	__u8 *end;
1198 	__u8 *next;
1199 	int ret;
1200 	static int (*dispatch_type[])(struct hid_parser *parser,
1201 				      struct hid_item *item) = {
1202 		hid_parser_main,
1203 		hid_parser_global,
1204 		hid_parser_local,
1205 		hid_parser_reserved
1206 	};
1207 
1208 	if (WARN_ON(device->status & HID_STAT_PARSED))
1209 		return -EBUSY;
1210 
1211 	start = device->dev_rdesc;
1212 	if (WARN_ON(!start))
1213 		return -ENODEV;
1214 	size = device->dev_rsize;
1215 
1216 	buf = kmemdup(start, size, GFP_KERNEL);
1217 	if (buf == NULL)
1218 		return -ENOMEM;
1219 
1220 	if (device->driver->report_fixup)
1221 		start = device->driver->report_fixup(device, buf, &size);
1222 	else
1223 		start = buf;
1224 
1225 	start = kmemdup(start, size, GFP_KERNEL);
1226 	kfree(buf);
1227 	if (start == NULL)
1228 		return -ENOMEM;
1229 
1230 	device->rdesc = start;
1231 	device->rsize = size;
1232 
1233 	parser = vzalloc(sizeof(struct hid_parser));
1234 	if (!parser) {
1235 		ret = -ENOMEM;
1236 		goto alloc_err;
1237 	}
1238 
1239 	parser->device = device;
1240 
1241 	end = start + size;
1242 
1243 	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1244 				     sizeof(struct hid_collection), GFP_KERNEL);
1245 	if (!device->collection) {
1246 		ret = -ENOMEM;
1247 		goto err;
1248 	}
1249 	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1250 
1251 	ret = -EINVAL;
1252 	while ((next = fetch_item(start, end, &item)) != NULL) {
1253 		start = next;
1254 
1255 		if (item.format != HID_ITEM_FORMAT_SHORT) {
1256 			hid_err(device, "unexpected long global item\n");
1257 			goto err;
1258 		}
1259 
1260 		if (dispatch_type[item.type](parser, &item)) {
1261 			hid_err(device, "item %u %u %u %u parsing failed\n",
1262 				item.format, (unsigned)item.size,
1263 				(unsigned)item.type, (unsigned)item.tag);
1264 			goto err;
1265 		}
1266 
1267 		if (start == end) {
1268 			if (parser->collection_stack_ptr) {
1269 				hid_err(device, "unbalanced collection at end of report description\n");
1270 				goto err;
1271 			}
1272 			if (parser->local.delimiter_depth) {
1273 				hid_err(device, "unbalanced delimiter at end of report description\n");
1274 				goto err;
1275 			}
1276 
1277 			/*
1278 			 * fetch initial values in case the device's
1279 			 * default multiplier isn't the recommended 1
1280 			 */
1281 			hid_setup_resolution_multiplier(device);
1282 
1283 			kfree(parser->collection_stack);
1284 			vfree(parser);
1285 			device->status |= HID_STAT_PARSED;
1286 
1287 			return 0;
1288 		}
1289 	}
1290 
1291 	hid_err(device, "item fetching failed at offset %u/%u\n",
1292 		size - (unsigned int)(end - start), size);
1293 err:
1294 	kfree(parser->collection_stack);
1295 alloc_err:
1296 	vfree(parser);
1297 	hid_close_report(device);
1298 	return ret;
1299 }
1300 EXPORT_SYMBOL_GPL(hid_open_report);
1301 
1302 /*
1303  * Convert a signed n-bit integer to signed 32-bit integer. Common
1304  * cases are done through the compiler, the screwed things has to be
1305  * done by hand.
1306  */
1307 
snto32(__u32 value,unsigned n)1308 static s32 snto32(__u32 value, unsigned n)
1309 {
1310 	if (!value || !n)
1311 		return 0;
1312 
1313 	if (n > 32)
1314 		n = 32;
1315 
1316 	switch (n) {
1317 	case 8:  return ((__s8)value);
1318 	case 16: return ((__s16)value);
1319 	case 32: return ((__s32)value);
1320 	}
1321 	return value & (1 << (n - 1)) ? value | (~0U << n) : value;
1322 }
1323 
hid_snto32(__u32 value,unsigned n)1324 s32 hid_snto32(__u32 value, unsigned n)
1325 {
1326 	return snto32(value, n);
1327 }
1328 EXPORT_SYMBOL_GPL(hid_snto32);
1329 
1330 /*
1331  * Convert a signed 32-bit integer to a signed n-bit integer.
1332  */
1333 
s32ton(__s32 value,unsigned n)1334 static u32 s32ton(__s32 value, unsigned n)
1335 {
1336 	s32 a = value >> (n - 1);
1337 	if (a && a != -1)
1338 		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
1339 	return value & ((1 << n) - 1);
1340 }
1341 
1342 /*
1343  * Extract/implement a data field from/to a little endian report (bit array).
1344  *
1345  * Code sort-of follows HID spec:
1346  *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1347  *
1348  * While the USB HID spec allows unlimited length bit fields in "report
1349  * descriptors", most devices never use more than 16 bits.
1350  * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1351  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1352  */
1353 
__extract(u8 * report,unsigned offset,int n)1354 static u32 __extract(u8 *report, unsigned offset, int n)
1355 {
1356 	unsigned int idx = offset / 8;
1357 	unsigned int bit_nr = 0;
1358 	unsigned int bit_shift = offset % 8;
1359 	int bits_to_copy = 8 - bit_shift;
1360 	u32 value = 0;
1361 	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1362 
1363 	while (n > 0) {
1364 		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1365 		n -= bits_to_copy;
1366 		bit_nr += bits_to_copy;
1367 		bits_to_copy = 8;
1368 		bit_shift = 0;
1369 		idx++;
1370 	}
1371 
1372 	return value & mask;
1373 }
1374 
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1375 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1376 			unsigned offset, unsigned n)
1377 {
1378 	if (n > 32) {
1379 		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1380 			      __func__, n, current->comm);
1381 		n = 32;
1382 	}
1383 
1384 	return __extract(report, offset, n);
1385 }
1386 EXPORT_SYMBOL_GPL(hid_field_extract);
1387 
1388 /*
1389  * "implement" : set bits in a little endian bit stream.
1390  * Same concepts as "extract" (see comments above).
1391  * The data mangled in the bit stream remains in little endian
1392  * order the whole time. It make more sense to talk about
1393  * endianness of register values by considering a register
1394  * a "cached" copy of the little endian bit stream.
1395  */
1396 
__implement(u8 * report,unsigned offset,int n,u32 value)1397 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1398 {
1399 	unsigned int idx = offset / 8;
1400 	unsigned int bit_shift = offset % 8;
1401 	int bits_to_set = 8 - bit_shift;
1402 
1403 	while (n - bits_to_set >= 0) {
1404 		report[idx] &= ~(0xff << bit_shift);
1405 		report[idx] |= value << bit_shift;
1406 		value >>= bits_to_set;
1407 		n -= bits_to_set;
1408 		bits_to_set = 8;
1409 		bit_shift = 0;
1410 		idx++;
1411 	}
1412 
1413 	/* last nibble */
1414 	if (n) {
1415 		u8 bit_mask = ((1U << n) - 1);
1416 		report[idx] &= ~(bit_mask << bit_shift);
1417 		report[idx] |= value << bit_shift;
1418 	}
1419 }
1420 
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1421 static void implement(const struct hid_device *hid, u8 *report,
1422 		      unsigned offset, unsigned n, u32 value)
1423 {
1424 	if (unlikely(n > 32)) {
1425 		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1426 			 __func__, n, current->comm);
1427 		n = 32;
1428 	} else if (n < 32) {
1429 		u32 m = (1U << n) - 1;
1430 
1431 		if (unlikely(value > m)) {
1432 			hid_warn(hid,
1433 				 "%s() called with too large value %d (n: %d)! (%s)\n",
1434 				 __func__, value, n, current->comm);
1435 			WARN_ON(1);
1436 			value &= m;
1437 		}
1438 	}
1439 
1440 	__implement(report, offset, n, value);
1441 }
1442 
1443 /*
1444  * Search an array for a value.
1445  */
1446 
search(__s32 * array,__s32 value,unsigned n)1447 static int search(__s32 *array, __s32 value, unsigned n)
1448 {
1449 	while (n--) {
1450 		if (*array++ == value)
1451 			return 0;
1452 	}
1453 	return -1;
1454 }
1455 
1456 /**
1457  * hid_match_report - check if driver's raw_event should be called
1458  *
1459  * @hid: hid device
1460  * @report: hid report to match against
1461  *
1462  * compare hid->driver->report_table->report_type to report->type
1463  */
hid_match_report(struct hid_device * hid,struct hid_report * report)1464 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1465 {
1466 	const struct hid_report_id *id = hid->driver->report_table;
1467 
1468 	if (!id) /* NULL means all */
1469 		return 1;
1470 
1471 	for (; id->report_type != HID_TERMINATOR; id++)
1472 		if (id->report_type == HID_ANY_ID ||
1473 				id->report_type == report->type)
1474 			return 1;
1475 	return 0;
1476 }
1477 
1478 /**
1479  * hid_match_usage - check if driver's event should be called
1480  *
1481  * @hid: hid device
1482  * @usage: usage to match against
1483  *
1484  * compare hid->driver->usage_table->usage_{type,code} to
1485  * usage->usage_{type,code}
1486  */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1487 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1488 {
1489 	const struct hid_usage_id *id = hid->driver->usage_table;
1490 
1491 	if (!id) /* NULL means all */
1492 		return 1;
1493 
1494 	for (; id->usage_type != HID_ANY_ID - 1; id++)
1495 		if ((id->usage_hid == HID_ANY_ID ||
1496 				id->usage_hid == usage->hid) &&
1497 				(id->usage_type == HID_ANY_ID ||
1498 				id->usage_type == usage->type) &&
1499 				(id->usage_code == HID_ANY_ID ||
1500 				 id->usage_code == usage->code))
1501 			return 1;
1502 	return 0;
1503 }
1504 
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1505 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1506 		struct hid_usage *usage, __s32 value, int interrupt)
1507 {
1508 	struct hid_driver *hdrv = hid->driver;
1509 	int ret;
1510 
1511 	if (!list_empty(&hid->debug_list))
1512 		hid_dump_input(hid, usage, value);
1513 
1514 	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1515 		ret = hdrv->event(hid, field, usage, value);
1516 		if (ret != 0) {
1517 			if (ret < 0)
1518 				hid_err(hid, "%s's event failed with %d\n",
1519 						hdrv->name, ret);
1520 			return;
1521 		}
1522 	}
1523 
1524 	if (hid->claimed & HID_CLAIMED_INPUT)
1525 		hidinput_hid_event(hid, field, usage, value);
1526 	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1527 		hid->hiddev_hid_event(hid, field, usage, value);
1528 }
1529 
1530 /*
1531  * Analyse a received field, and fetch the data from it. The field
1532  * content is stored for next report processing (we do differential
1533  * reporting to the layer).
1534  */
1535 
hid_input_field(struct hid_device * hid,struct hid_field * field,__u8 * data,int interrupt)1536 static void hid_input_field(struct hid_device *hid, struct hid_field *field,
1537 			    __u8 *data, int interrupt)
1538 {
1539 	unsigned n;
1540 	unsigned count = field->report_count;
1541 	unsigned offset = field->report_offset;
1542 	unsigned size = field->report_size;
1543 	__s32 min = field->logical_minimum;
1544 	__s32 max = field->logical_maximum;
1545 	__s32 *value;
1546 
1547 	value = kmalloc_array(count, sizeof(__s32), GFP_ATOMIC);
1548 	if (!value)
1549 		return;
1550 
1551 	for (n = 0; n < count; n++) {
1552 
1553 		value[n] = min < 0 ?
1554 			snto32(hid_field_extract(hid, data, offset + n * size,
1555 			       size), size) :
1556 			hid_field_extract(hid, data, offset + n * size, size);
1557 
1558 		/* Ignore report if ErrorRollOver */
1559 		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1560 		    value[n] >= min && value[n] <= max &&
1561 		    value[n] - min < field->maxusage &&
1562 		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
1563 			goto exit;
1564 	}
1565 
1566 	for (n = 0; n < count; n++) {
1567 
1568 		if (HID_MAIN_ITEM_VARIABLE & field->flags) {
1569 			hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
1570 			continue;
1571 		}
1572 
1573 		if (field->value[n] >= min && field->value[n] <= max
1574 			&& field->value[n] - min < field->maxusage
1575 			&& field->usage[field->value[n] - min].hid
1576 			&& search(value, field->value[n], count))
1577 				hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
1578 
1579 		if (value[n] >= min && value[n] <= max
1580 			&& value[n] - min < field->maxusage
1581 			&& field->usage[value[n] - min].hid
1582 			&& search(field->value, value[n], count))
1583 				hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
1584 	}
1585 
1586 	memcpy(field->value, value, count * sizeof(__s32));
1587 exit:
1588 	kfree(value);
1589 }
1590 
1591 /*
1592  * Output the field into the report.
1593  */
1594 
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1595 static void hid_output_field(const struct hid_device *hid,
1596 			     struct hid_field *field, __u8 *data)
1597 {
1598 	unsigned count = field->report_count;
1599 	unsigned offset = field->report_offset;
1600 	unsigned size = field->report_size;
1601 	unsigned n;
1602 
1603 	for (n = 0; n < count; n++) {
1604 		if (field->logical_minimum < 0)	/* signed values */
1605 			implement(hid, data, offset + n * size, size,
1606 				  s32ton(field->value[n], size));
1607 		else				/* unsigned values */
1608 			implement(hid, data, offset + n * size, size,
1609 				  field->value[n]);
1610 	}
1611 }
1612 
1613 /*
1614  * Compute the size of a report.
1615  */
hid_compute_report_size(struct hid_report * report)1616 static size_t hid_compute_report_size(struct hid_report *report)
1617 {
1618 	if (report->size)
1619 		return ((report->size - 1) >> 3) + 1;
1620 
1621 	return 0;
1622 }
1623 
1624 /*
1625  * Create a report. 'data' has to be allocated using
1626  * hid_alloc_report_buf() so that it has proper size.
1627  */
1628 
hid_output_report(struct hid_report * report,__u8 * data)1629 void hid_output_report(struct hid_report *report, __u8 *data)
1630 {
1631 	unsigned n;
1632 
1633 	if (report->id > 0)
1634 		*data++ = report->id;
1635 
1636 	memset(data, 0, hid_compute_report_size(report));
1637 	for (n = 0; n < report->maxfield; n++)
1638 		hid_output_field(report->device, report->field[n], data);
1639 }
1640 EXPORT_SYMBOL_GPL(hid_output_report);
1641 
1642 /*
1643  * Allocator for buffer that is going to be passed to hid_output_report()
1644  */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1645 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1646 {
1647 	/*
1648 	 * 7 extra bytes are necessary to achieve proper functionality
1649 	 * of implement() working on 8 byte chunks
1650 	 */
1651 
1652 	u32 len = hid_report_len(report) + 7;
1653 
1654 	return kmalloc(len, flags);
1655 }
1656 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1657 
1658 /*
1659  * Set a field value. The report this field belongs to has to be
1660  * created and transferred to the device, to set this value in the
1661  * device.
1662  */
1663 
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1664 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1665 {
1666 	unsigned size;
1667 
1668 	if (!field)
1669 		return -1;
1670 
1671 	size = field->report_size;
1672 
1673 	hid_dump_input(field->report->device, field->usage + offset, value);
1674 
1675 	if (offset >= field->report_count) {
1676 		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1677 				offset, field->report_count);
1678 		return -1;
1679 	}
1680 	if (field->logical_minimum < 0) {
1681 		if (value != snto32(s32ton(value, size), size)) {
1682 			hid_err(field->report->device, "value %d is out of range\n", value);
1683 			return -1;
1684 		}
1685 	}
1686 	field->value[offset] = value;
1687 	return 0;
1688 }
1689 EXPORT_SYMBOL_GPL(hid_set_field);
1690 
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1691 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1692 		const u8 *data)
1693 {
1694 	struct hid_report *report;
1695 	unsigned int n = 0;	/* Normally report number is 0 */
1696 
1697 	/* Device uses numbered reports, data[0] is report number */
1698 	if (report_enum->numbered)
1699 		n = *data;
1700 
1701 	report = report_enum->report_id_hash[n];
1702 	if (report == NULL)
1703 		dbg_hid("undefined report_id %u received\n", n);
1704 
1705 	return report;
1706 }
1707 
1708 /*
1709  * Implement a generic .request() callback, using .raw_request()
1710  * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1711  */
__hid_request(struct hid_device * hid,struct hid_report * report,int reqtype)1712 int __hid_request(struct hid_device *hid, struct hid_report *report,
1713 		int reqtype)
1714 {
1715 	char *buf;
1716 	int ret;
1717 	u32 len;
1718 
1719 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1720 	if (!buf)
1721 		return -ENOMEM;
1722 
1723 	len = hid_report_len(report);
1724 
1725 	if (reqtype == HID_REQ_SET_REPORT)
1726 		hid_output_report(report, buf);
1727 
1728 	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1729 					  report->type, reqtype);
1730 	if (ret < 0) {
1731 		dbg_hid("unable to complete request: %d\n", ret);
1732 		goto out;
1733 	}
1734 
1735 	if (reqtype == HID_REQ_GET_REPORT)
1736 		hid_input_report(hid, report->type, buf, ret, 0);
1737 
1738 	ret = 0;
1739 
1740 out:
1741 	kfree(buf);
1742 	return ret;
1743 }
1744 EXPORT_SYMBOL_GPL(__hid_request);
1745 
hid_report_raw_event(struct hid_device * hid,int type,u8 * data,u32 size,int interrupt)1746 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1747 		int interrupt)
1748 {
1749 	struct hid_report_enum *report_enum = hid->report_enum + type;
1750 	struct hid_report *report;
1751 	struct hid_driver *hdrv;
1752 	unsigned int a;
1753 	u32 rsize, csize = size;
1754 	u8 *cdata = data;
1755 	int ret = 0;
1756 
1757 	report = hid_get_report(report_enum, data);
1758 	if (!report)
1759 		goto out;
1760 
1761 	if (report_enum->numbered) {
1762 		cdata++;
1763 		csize--;
1764 	}
1765 
1766 	rsize = hid_compute_report_size(report);
1767 
1768 	if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
1769 		rsize = HID_MAX_BUFFER_SIZE - 1;
1770 	else if (rsize > HID_MAX_BUFFER_SIZE)
1771 		rsize = HID_MAX_BUFFER_SIZE;
1772 
1773 	if (csize < rsize) {
1774 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
1775 				csize, rsize);
1776 		memset(cdata + csize, 0, rsize - csize);
1777 	}
1778 
1779 	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
1780 		hid->hiddev_report_event(hid, report);
1781 	if (hid->claimed & HID_CLAIMED_HIDRAW) {
1782 		ret = hidraw_report_event(hid, data, size);
1783 		if (ret)
1784 			goto out;
1785 	}
1786 
1787 	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
1788 		for (a = 0; a < report->maxfield; a++)
1789 			hid_input_field(hid, report->field[a], cdata, interrupt);
1790 		hdrv = hid->driver;
1791 		if (hdrv && hdrv->report)
1792 			hdrv->report(hid, report);
1793 	}
1794 
1795 	if (hid->claimed & HID_CLAIMED_INPUT)
1796 		hidinput_report_event(hid, report);
1797 out:
1798 	return ret;
1799 }
1800 EXPORT_SYMBOL_GPL(hid_report_raw_event);
1801 
1802 /**
1803  * hid_input_report - report data from lower layer (usb, bt...)
1804  *
1805  * @hid: hid device
1806  * @type: HID report type (HID_*_REPORT)
1807  * @data: report contents
1808  * @size: size of data parameter
1809  * @interrupt: distinguish between interrupt and control transfers
1810  *
1811  * This is data entry for lower layers.
1812  */
hid_input_report(struct hid_device * hid,int type,u8 * data,u32 size,int interrupt)1813 int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
1814 {
1815 	struct hid_report_enum *report_enum;
1816 	struct hid_driver *hdrv;
1817 	struct hid_report *report;
1818 	int ret = 0;
1819 
1820 	if (!hid)
1821 		return -ENODEV;
1822 
1823 	if (down_trylock(&hid->driver_input_lock))
1824 		return -EBUSY;
1825 
1826 	if (!hid->driver) {
1827 		ret = -ENODEV;
1828 		goto unlock;
1829 	}
1830 	report_enum = hid->report_enum + type;
1831 	hdrv = hid->driver;
1832 
1833 	if (!size) {
1834 		dbg_hid("empty report\n");
1835 		ret = -1;
1836 		goto unlock;
1837 	}
1838 
1839 	/* Avoid unnecessary overhead if debugfs is disabled */
1840 	if (!list_empty(&hid->debug_list))
1841 		hid_dump_report(hid, type, data, size);
1842 
1843 	report = hid_get_report(report_enum, data);
1844 
1845 	if (!report) {
1846 		ret = -1;
1847 		goto unlock;
1848 	}
1849 
1850 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
1851 		ret = hdrv->raw_event(hid, report, data, size);
1852 		if (ret < 0)
1853 			goto unlock;
1854 	}
1855 
1856 	ret = hid_report_raw_event(hid, type, data, size, interrupt);
1857 
1858 unlock:
1859 	up(&hid->driver_input_lock);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL_GPL(hid_input_report);
1863 
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)1864 bool hid_match_one_id(const struct hid_device *hdev,
1865 		      const struct hid_device_id *id)
1866 {
1867 	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
1868 		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
1869 		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
1870 		(id->product == HID_ANY_ID || id->product == hdev->product);
1871 }
1872 
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)1873 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
1874 		const struct hid_device_id *id)
1875 {
1876 	for (; id->bus; id++)
1877 		if (hid_match_one_id(hdev, id))
1878 			return id;
1879 
1880 	return NULL;
1881 }
1882 
1883 static const struct hid_device_id hid_hiddev_list[] = {
1884 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
1885 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
1886 	{ }
1887 };
1888 
hid_hiddev(struct hid_device * hdev)1889 static bool hid_hiddev(struct hid_device *hdev)
1890 {
1891 	return !!hid_match_id(hdev, hid_hiddev_list);
1892 }
1893 
1894 
1895 static ssize_t
read_report_descriptor(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1896 read_report_descriptor(struct file *filp, struct kobject *kobj,
1897 		struct bin_attribute *attr,
1898 		char *buf, loff_t off, size_t count)
1899 {
1900 	struct device *dev = kobj_to_dev(kobj);
1901 	struct hid_device *hdev = to_hid_device(dev);
1902 
1903 	if (off >= hdev->rsize)
1904 		return 0;
1905 
1906 	if (off + count > hdev->rsize)
1907 		count = hdev->rsize - off;
1908 
1909 	memcpy(buf, hdev->rdesc + off, count);
1910 
1911 	return count;
1912 }
1913 
1914 static ssize_t
show_country(struct device * dev,struct device_attribute * attr,char * buf)1915 show_country(struct device *dev, struct device_attribute *attr,
1916 		char *buf)
1917 {
1918 	struct hid_device *hdev = to_hid_device(dev);
1919 
1920 	return sprintf(buf, "%02x\n", hdev->country & 0xff);
1921 }
1922 
1923 static struct bin_attribute dev_bin_attr_report_desc = {
1924 	.attr = { .name = "report_descriptor", .mode = 0444 },
1925 	.read = read_report_descriptor,
1926 	.size = HID_MAX_DESCRIPTOR_SIZE,
1927 };
1928 
1929 static const struct device_attribute dev_attr_country = {
1930 	.attr = { .name = "country", .mode = 0444 },
1931 	.show = show_country,
1932 };
1933 
hid_connect(struct hid_device * hdev,unsigned int connect_mask)1934 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
1935 {
1936 	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
1937 		"Joystick", "Gamepad", "Keyboard", "Keypad",
1938 		"Multi-Axis Controller"
1939 	};
1940 	const char *type, *bus;
1941 	char buf[64] = "";
1942 	unsigned int i;
1943 	int len;
1944 	int ret;
1945 
1946 	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
1947 		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
1948 	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
1949 		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
1950 	if (hdev->bus != BUS_USB)
1951 		connect_mask &= ~HID_CONNECT_HIDDEV;
1952 	if (hid_hiddev(hdev))
1953 		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
1954 
1955 	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
1956 				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
1957 		hdev->claimed |= HID_CLAIMED_INPUT;
1958 
1959 	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
1960 			!hdev->hiddev_connect(hdev,
1961 				connect_mask & HID_CONNECT_HIDDEV_FORCE))
1962 		hdev->claimed |= HID_CLAIMED_HIDDEV;
1963 	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
1964 		hdev->claimed |= HID_CLAIMED_HIDRAW;
1965 
1966 	if (connect_mask & HID_CONNECT_DRIVER)
1967 		hdev->claimed |= HID_CLAIMED_DRIVER;
1968 
1969 	/* Drivers with the ->raw_event callback set are not required to connect
1970 	 * to any other listener. */
1971 	if (!hdev->claimed && !hdev->driver->raw_event) {
1972 		hid_err(hdev, "device has no listeners, quitting\n");
1973 		return -ENODEV;
1974 	}
1975 
1976 	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
1977 			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
1978 		hdev->ff_init(hdev);
1979 
1980 	len = 0;
1981 	if (hdev->claimed & HID_CLAIMED_INPUT)
1982 		len += sprintf(buf + len, "input");
1983 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
1984 		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
1985 				((struct hiddev *)hdev->hiddev)->minor);
1986 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
1987 		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
1988 				((struct hidraw *)hdev->hidraw)->minor);
1989 
1990 	type = "Device";
1991 	for (i = 0; i < hdev->maxcollection; i++) {
1992 		struct hid_collection *col = &hdev->collection[i];
1993 		if (col->type == HID_COLLECTION_APPLICATION &&
1994 		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
1995 		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
1996 			type = types[col->usage & 0xffff];
1997 			break;
1998 		}
1999 	}
2000 
2001 	switch (hdev->bus) {
2002 	case BUS_USB:
2003 		bus = "USB";
2004 		break;
2005 	case BUS_BLUETOOTH:
2006 		bus = "BLUETOOTH";
2007 		break;
2008 	case BUS_I2C:
2009 		bus = "I2C";
2010 		break;
2011 	case BUS_VIRTUAL:
2012 		bus = "VIRTUAL";
2013 		break;
2014 	default:
2015 		bus = "<UNKNOWN>";
2016 	}
2017 
2018 	ret = device_create_file(&hdev->dev, &dev_attr_country);
2019 	if (ret)
2020 		hid_warn(hdev,
2021 			 "can't create sysfs country code attribute err: %d\n", ret);
2022 
2023 	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2024 		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2025 		 type, hdev->name, hdev->phys);
2026 
2027 	return 0;
2028 }
2029 EXPORT_SYMBOL_GPL(hid_connect);
2030 
hid_disconnect(struct hid_device * hdev)2031 void hid_disconnect(struct hid_device *hdev)
2032 {
2033 	device_remove_file(&hdev->dev, &dev_attr_country);
2034 	if (hdev->claimed & HID_CLAIMED_INPUT)
2035 		hidinput_disconnect(hdev);
2036 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2037 		hdev->hiddev_disconnect(hdev);
2038 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2039 		hidraw_disconnect(hdev);
2040 	hdev->claimed = 0;
2041 }
2042 EXPORT_SYMBOL_GPL(hid_disconnect);
2043 
2044 /**
2045  * hid_hw_start - start underlying HW
2046  * @hdev: hid device
2047  * @connect_mask: which outputs to connect, see HID_CONNECT_*
2048  *
2049  * Call this in probe function *after* hid_parse. This will setup HW
2050  * buffers and start the device (if not defeirred to device open).
2051  * hid_hw_stop must be called if this was successful.
2052  */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2053 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2054 {
2055 	int error;
2056 
2057 	error = hdev->ll_driver->start(hdev);
2058 	if (error)
2059 		return error;
2060 
2061 	if (connect_mask) {
2062 		error = hid_connect(hdev, connect_mask);
2063 		if (error) {
2064 			hdev->ll_driver->stop(hdev);
2065 			return error;
2066 		}
2067 	}
2068 
2069 	return 0;
2070 }
2071 EXPORT_SYMBOL_GPL(hid_hw_start);
2072 
2073 /**
2074  * hid_hw_stop - stop underlying HW
2075  * @hdev: hid device
2076  *
2077  * This is usually called from remove function or from probe when something
2078  * failed and hid_hw_start was called already.
2079  */
hid_hw_stop(struct hid_device * hdev)2080 void hid_hw_stop(struct hid_device *hdev)
2081 {
2082 	hid_disconnect(hdev);
2083 	hdev->ll_driver->stop(hdev);
2084 }
2085 EXPORT_SYMBOL_GPL(hid_hw_stop);
2086 
2087 /**
2088  * hid_hw_open - signal underlying HW to start delivering events
2089  * @hdev: hid device
2090  *
2091  * Tell underlying HW to start delivering events from the device.
2092  * This function should be called sometime after successful call
2093  * to hid_hw_start().
2094  */
hid_hw_open(struct hid_device * hdev)2095 int hid_hw_open(struct hid_device *hdev)
2096 {
2097 	int ret;
2098 
2099 	ret = mutex_lock_killable(&hdev->ll_open_lock);
2100 	if (ret)
2101 		return ret;
2102 
2103 	if (!hdev->ll_open_count++) {
2104 		ret = hdev->ll_driver->open(hdev);
2105 		if (ret)
2106 			hdev->ll_open_count--;
2107 	}
2108 
2109 	mutex_unlock(&hdev->ll_open_lock);
2110 	return ret;
2111 }
2112 EXPORT_SYMBOL_GPL(hid_hw_open);
2113 
2114 /**
2115  * hid_hw_close - signal underlaying HW to stop delivering events
2116  *
2117  * @hdev: hid device
2118  *
2119  * This function indicates that we are not interested in the events
2120  * from this device anymore. Delivery of events may or may not stop,
2121  * depending on the number of users still outstanding.
2122  */
hid_hw_close(struct hid_device * hdev)2123 void hid_hw_close(struct hid_device *hdev)
2124 {
2125 	mutex_lock(&hdev->ll_open_lock);
2126 	if (!--hdev->ll_open_count)
2127 		hdev->ll_driver->close(hdev);
2128 	mutex_unlock(&hdev->ll_open_lock);
2129 }
2130 EXPORT_SYMBOL_GPL(hid_hw_close);
2131 
2132 struct hid_dynid {
2133 	struct list_head list;
2134 	struct hid_device_id id;
2135 };
2136 
2137 /**
2138  * store_new_id - add a new HID device ID to this driver and re-probe devices
2139  * @drv: target device driver
2140  * @buf: buffer for scanning device ID data
2141  * @count: input size
2142  *
2143  * Adds a new dynamic hid device ID to this driver,
2144  * and causes the driver to probe for all devices again.
2145  */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2146 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2147 		size_t count)
2148 {
2149 	struct hid_driver *hdrv = to_hid_driver(drv);
2150 	struct hid_dynid *dynid;
2151 	__u32 bus, vendor, product;
2152 	unsigned long driver_data = 0;
2153 	int ret;
2154 
2155 	ret = sscanf(buf, "%x %x %x %lx",
2156 			&bus, &vendor, &product, &driver_data);
2157 	if (ret < 3)
2158 		return -EINVAL;
2159 
2160 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2161 	if (!dynid)
2162 		return -ENOMEM;
2163 
2164 	dynid->id.bus = bus;
2165 	dynid->id.group = HID_GROUP_ANY;
2166 	dynid->id.vendor = vendor;
2167 	dynid->id.product = product;
2168 	dynid->id.driver_data = driver_data;
2169 
2170 	spin_lock(&hdrv->dyn_lock);
2171 	list_add_tail(&dynid->list, &hdrv->dyn_list);
2172 	spin_unlock(&hdrv->dyn_lock);
2173 
2174 	ret = driver_attach(&hdrv->driver);
2175 
2176 	return ret ? : count;
2177 }
2178 static DRIVER_ATTR_WO(new_id);
2179 
2180 static struct attribute *hid_drv_attrs[] = {
2181 	&driver_attr_new_id.attr,
2182 	NULL,
2183 };
2184 ATTRIBUTE_GROUPS(hid_drv);
2185 
hid_free_dynids(struct hid_driver * hdrv)2186 static void hid_free_dynids(struct hid_driver *hdrv)
2187 {
2188 	struct hid_dynid *dynid, *n;
2189 
2190 	spin_lock(&hdrv->dyn_lock);
2191 	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2192 		list_del(&dynid->list);
2193 		kfree(dynid);
2194 	}
2195 	spin_unlock(&hdrv->dyn_lock);
2196 }
2197 
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2198 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2199 					     struct hid_driver *hdrv)
2200 {
2201 	struct hid_dynid *dynid;
2202 
2203 	spin_lock(&hdrv->dyn_lock);
2204 	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2205 		if (hid_match_one_id(hdev, &dynid->id)) {
2206 			spin_unlock(&hdrv->dyn_lock);
2207 			return &dynid->id;
2208 		}
2209 	}
2210 	spin_unlock(&hdrv->dyn_lock);
2211 
2212 	return hid_match_id(hdev, hdrv->id_table);
2213 }
2214 EXPORT_SYMBOL_GPL(hid_match_device);
2215 
hid_bus_match(struct device * dev,struct device_driver * drv)2216 static int hid_bus_match(struct device *dev, struct device_driver *drv)
2217 {
2218 	struct hid_driver *hdrv = to_hid_driver(drv);
2219 	struct hid_device *hdev = to_hid_device(dev);
2220 
2221 	return hid_match_device(hdev, hdrv) != NULL;
2222 }
2223 
2224 /**
2225  * hid_compare_device_paths - check if both devices share the same path
2226  * @hdev_a: hid device
2227  * @hdev_b: hid device
2228  * @separator: char to use as separator
2229  *
2230  * Check if two devices share the same path up to the last occurrence of
2231  * the separator char. Both paths must exist (i.e., zero-length paths
2232  * don't match).
2233  */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2234 bool hid_compare_device_paths(struct hid_device *hdev_a,
2235 			      struct hid_device *hdev_b, char separator)
2236 {
2237 	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2238 	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2239 
2240 	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2241 		return false;
2242 
2243 	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2244 }
2245 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2246 
hid_device_probe(struct device * dev)2247 static int hid_device_probe(struct device *dev)
2248 {
2249 	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2250 	struct hid_device *hdev = to_hid_device(dev);
2251 	const struct hid_device_id *id;
2252 	int ret = 0;
2253 
2254 	if (down_interruptible(&hdev->driver_input_lock)) {
2255 		ret = -EINTR;
2256 		goto end;
2257 	}
2258 	hdev->io_started = false;
2259 
2260 	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2261 
2262 	if (!hdev->driver) {
2263 		id = hid_match_device(hdev, hdrv);
2264 		if (id == NULL) {
2265 			ret = -ENODEV;
2266 			goto unlock;
2267 		}
2268 
2269 		if (hdrv->match) {
2270 			if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
2271 				ret = -ENODEV;
2272 				goto unlock;
2273 			}
2274 		} else {
2275 			/*
2276 			 * hid-generic implements .match(), so if
2277 			 * hid_ignore_special_drivers is set, we can safely
2278 			 * return.
2279 			 */
2280 			if (hid_ignore_special_drivers) {
2281 				ret = -ENODEV;
2282 				goto unlock;
2283 			}
2284 		}
2285 
2286 		/* reset the quirks that has been previously set */
2287 		hdev->quirks = hid_lookup_quirk(hdev);
2288 		hdev->driver = hdrv;
2289 		if (hdrv->probe) {
2290 			ret = hdrv->probe(hdev, id);
2291 		} else { /* default probe */
2292 			ret = hid_open_report(hdev);
2293 			if (!ret)
2294 				ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2295 		}
2296 		if (ret) {
2297 			hid_close_report(hdev);
2298 			hdev->driver = NULL;
2299 		}
2300 	}
2301 unlock:
2302 	if (!hdev->io_started)
2303 		up(&hdev->driver_input_lock);
2304 end:
2305 	return ret;
2306 }
2307 
hid_device_remove(struct device * dev)2308 static int hid_device_remove(struct device *dev)
2309 {
2310 	struct hid_device *hdev = to_hid_device(dev);
2311 	struct hid_driver *hdrv;
2312 
2313 	down(&hdev->driver_input_lock);
2314 	hdev->io_started = false;
2315 
2316 	hdrv = hdev->driver;
2317 	if (hdrv) {
2318 		if (hdrv->remove)
2319 			hdrv->remove(hdev);
2320 		else /* default remove */
2321 			hid_hw_stop(hdev);
2322 		hid_close_report(hdev);
2323 		hdev->driver = NULL;
2324 	}
2325 
2326 	if (!hdev->io_started)
2327 		up(&hdev->driver_input_lock);
2328 
2329 	return 0;
2330 }
2331 
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2332 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2333 			     char *buf)
2334 {
2335 	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2336 
2337 	return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2338 			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2339 }
2340 static DEVICE_ATTR_RO(modalias);
2341 
2342 static struct attribute *hid_dev_attrs[] = {
2343 	&dev_attr_modalias.attr,
2344 	NULL,
2345 };
2346 static struct bin_attribute *hid_dev_bin_attrs[] = {
2347 	&dev_bin_attr_report_desc,
2348 	NULL
2349 };
2350 static const struct attribute_group hid_dev_group = {
2351 	.attrs = hid_dev_attrs,
2352 	.bin_attrs = hid_dev_bin_attrs,
2353 };
2354 __ATTRIBUTE_GROUPS(hid_dev);
2355 
hid_uevent(struct device * dev,struct kobj_uevent_env * env)2356 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
2357 {
2358 	struct hid_device *hdev = to_hid_device(dev);
2359 
2360 	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2361 			hdev->bus, hdev->vendor, hdev->product))
2362 		return -ENOMEM;
2363 
2364 	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2365 		return -ENOMEM;
2366 
2367 	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2368 		return -ENOMEM;
2369 
2370 	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2371 		return -ENOMEM;
2372 
2373 	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2374 			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2375 		return -ENOMEM;
2376 
2377 	return 0;
2378 }
2379 
2380 struct bus_type hid_bus_type = {
2381 	.name		= "hid",
2382 	.dev_groups	= hid_dev_groups,
2383 	.drv_groups	= hid_drv_groups,
2384 	.match		= hid_bus_match,
2385 	.probe		= hid_device_probe,
2386 	.remove		= hid_device_remove,
2387 	.uevent		= hid_uevent,
2388 };
2389 EXPORT_SYMBOL(hid_bus_type);
2390 
hid_add_device(struct hid_device * hdev)2391 int hid_add_device(struct hid_device *hdev)
2392 {
2393 	static atomic_t id = ATOMIC_INIT(0);
2394 	int ret;
2395 
2396 	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2397 		return -EBUSY;
2398 
2399 	hdev->quirks = hid_lookup_quirk(hdev);
2400 
2401 	/* we need to kill them here, otherwise they will stay allocated to
2402 	 * wait for coming driver */
2403 	if (hid_ignore(hdev))
2404 		return -ENODEV;
2405 
2406 	/*
2407 	 * Check for the mandatory transport channel.
2408 	 */
2409 	 if (!hdev->ll_driver->raw_request) {
2410 		hid_err(hdev, "transport driver missing .raw_request()\n");
2411 		return -EINVAL;
2412 	 }
2413 
2414 	/*
2415 	 * Read the device report descriptor once and use as template
2416 	 * for the driver-specific modifications.
2417 	 */
2418 	ret = hdev->ll_driver->parse(hdev);
2419 	if (ret)
2420 		return ret;
2421 	if (!hdev->dev_rdesc)
2422 		return -ENODEV;
2423 
2424 	/*
2425 	 * Scan generic devices for group information
2426 	 */
2427 	if (hid_ignore_special_drivers) {
2428 		hdev->group = HID_GROUP_GENERIC;
2429 	} else if (!hdev->group &&
2430 		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2431 		ret = hid_scan_report(hdev);
2432 		if (ret)
2433 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2434 	}
2435 
2436 	/* XXX hack, any other cleaner solution after the driver core
2437 	 * is converted to allow more than 20 bytes as the device name? */
2438 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2439 		     hdev->vendor, hdev->product, atomic_inc_return(&id));
2440 
2441 	hid_debug_register(hdev, dev_name(&hdev->dev));
2442 	ret = device_add(&hdev->dev);
2443 	if (!ret)
2444 		hdev->status |= HID_STAT_ADDED;
2445 	else
2446 		hid_debug_unregister(hdev);
2447 
2448 	return ret;
2449 }
2450 EXPORT_SYMBOL_GPL(hid_add_device);
2451 
2452 /**
2453  * hid_allocate_device - allocate new hid device descriptor
2454  *
2455  * Allocate and initialize hid device, so that hid_destroy_device might be
2456  * used to free it.
2457  *
2458  * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2459  * error value.
2460  */
hid_allocate_device(void)2461 struct hid_device *hid_allocate_device(void)
2462 {
2463 	struct hid_device *hdev;
2464 	int ret = -ENOMEM;
2465 
2466 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2467 	if (hdev == NULL)
2468 		return ERR_PTR(ret);
2469 
2470 	device_initialize(&hdev->dev);
2471 	hdev->dev.release = hid_device_release;
2472 	hdev->dev.bus = &hid_bus_type;
2473 	device_enable_async_suspend(&hdev->dev);
2474 
2475 	hid_close_report(hdev);
2476 
2477 	init_waitqueue_head(&hdev->debug_wait);
2478 	INIT_LIST_HEAD(&hdev->debug_list);
2479 	spin_lock_init(&hdev->debug_list_lock);
2480 	sema_init(&hdev->driver_input_lock, 1);
2481 	mutex_init(&hdev->ll_open_lock);
2482 
2483 	return hdev;
2484 }
2485 EXPORT_SYMBOL_GPL(hid_allocate_device);
2486 
hid_remove_device(struct hid_device * hdev)2487 static void hid_remove_device(struct hid_device *hdev)
2488 {
2489 	if (hdev->status & HID_STAT_ADDED) {
2490 		device_del(&hdev->dev);
2491 		hid_debug_unregister(hdev);
2492 		hdev->status &= ~HID_STAT_ADDED;
2493 	}
2494 	kfree(hdev->dev_rdesc);
2495 	hdev->dev_rdesc = NULL;
2496 	hdev->dev_rsize = 0;
2497 }
2498 
2499 /**
2500  * hid_destroy_device - free previously allocated device
2501  *
2502  * @hdev: hid device
2503  *
2504  * If you allocate hid_device through hid_allocate_device, you should ever
2505  * free by this function.
2506  */
hid_destroy_device(struct hid_device * hdev)2507 void hid_destroy_device(struct hid_device *hdev)
2508 {
2509 	hid_remove_device(hdev);
2510 	put_device(&hdev->dev);
2511 }
2512 EXPORT_SYMBOL_GPL(hid_destroy_device);
2513 
2514 
__hid_bus_reprobe_drivers(struct device * dev,void * data)2515 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2516 {
2517 	struct hid_driver *hdrv = data;
2518 	struct hid_device *hdev = to_hid_device(dev);
2519 
2520 	if (hdev->driver == hdrv &&
2521 	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
2522 	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2523 		return device_reprobe(dev);
2524 
2525 	return 0;
2526 }
2527 
__hid_bus_driver_added(struct device_driver * drv,void * data)2528 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
2529 {
2530 	struct hid_driver *hdrv = to_hid_driver(drv);
2531 
2532 	if (hdrv->match) {
2533 		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
2534 				 __hid_bus_reprobe_drivers);
2535 	}
2536 
2537 	return 0;
2538 }
2539 
__bus_removed_driver(struct device_driver * drv,void * data)2540 static int __bus_removed_driver(struct device_driver *drv, void *data)
2541 {
2542 	return bus_rescan_devices(&hid_bus_type);
2543 }
2544 
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)2545 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
2546 		const char *mod_name)
2547 {
2548 	int ret;
2549 
2550 	hdrv->driver.name = hdrv->name;
2551 	hdrv->driver.bus = &hid_bus_type;
2552 	hdrv->driver.owner = owner;
2553 	hdrv->driver.mod_name = mod_name;
2554 
2555 	INIT_LIST_HEAD(&hdrv->dyn_list);
2556 	spin_lock_init(&hdrv->dyn_lock);
2557 
2558 	ret = driver_register(&hdrv->driver);
2559 
2560 	if (ret == 0)
2561 		bus_for_each_drv(&hid_bus_type, NULL, NULL,
2562 				 __hid_bus_driver_added);
2563 
2564 	return ret;
2565 }
2566 EXPORT_SYMBOL_GPL(__hid_register_driver);
2567 
hid_unregister_driver(struct hid_driver * hdrv)2568 void hid_unregister_driver(struct hid_driver *hdrv)
2569 {
2570 	driver_unregister(&hdrv->driver);
2571 	hid_free_dynids(hdrv);
2572 
2573 	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
2574 }
2575 EXPORT_SYMBOL_GPL(hid_unregister_driver);
2576 
hid_check_keys_pressed(struct hid_device * hid)2577 int hid_check_keys_pressed(struct hid_device *hid)
2578 {
2579 	struct hid_input *hidinput;
2580 	int i;
2581 
2582 	if (!(hid->claimed & HID_CLAIMED_INPUT))
2583 		return 0;
2584 
2585 	list_for_each_entry(hidinput, &hid->inputs, list) {
2586 		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
2587 			if (hidinput->input->key[i])
2588 				return 1;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
2595 
hid_init(void)2596 static int __init hid_init(void)
2597 {
2598 	int ret;
2599 
2600 	if (hid_debug)
2601 		pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
2602 			"debugfs is now used for inspecting the device (report descriptor, reports)\n");
2603 
2604 	ret = bus_register(&hid_bus_type);
2605 	if (ret) {
2606 		pr_err("can't register hid bus\n");
2607 		goto err;
2608 	}
2609 
2610 	ret = hidraw_init();
2611 	if (ret)
2612 		goto err_bus;
2613 
2614 	hid_debug_init();
2615 
2616 	return 0;
2617 err_bus:
2618 	bus_unregister(&hid_bus_type);
2619 err:
2620 	return ret;
2621 }
2622 
hid_exit(void)2623 static void __exit hid_exit(void)
2624 {
2625 	hid_debug_exit();
2626 	hidraw_exit();
2627 	bus_unregister(&hid_bus_type);
2628 	hid_quirks_exit(HID_BUS_ANY);
2629 }
2630 
2631 module_init(hid_init);
2632 module_exit(hid_exit);
2633 
2634 MODULE_AUTHOR("Andreas Gal");
2635 MODULE_AUTHOR("Vojtech Pavlik");
2636 MODULE_AUTHOR("Jiri Kosina");
2637 MODULE_LICENSE("GPL");
2638