1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * The Serio abstraction module
4 *
5 * Copyright (c) 1999-2004 Vojtech Pavlik
6 * Copyright (c) 2004 Dmitry Torokhov
7 * Copyright (c) 2003 Daniele Bellucci
8 */
9
10 /*
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/stddef.h>
16 #include <linux/module.h>
17 #include <linux/serio.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22 #include <linux/mutex.h>
23
24 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
25 MODULE_DESCRIPTION("Serio abstraction core");
26 MODULE_LICENSE("GPL");
27
28 /*
29 * serio_mutex protects entire serio subsystem and is taken every time
30 * serio port or driver registered or unregistered.
31 */
32 static DEFINE_MUTEX(serio_mutex);
33
34 static LIST_HEAD(serio_list);
35
36 static void serio_add_port(struct serio *serio);
37 static int serio_reconnect_port(struct serio *serio);
38 static void serio_disconnect_port(struct serio *serio);
39 static void serio_reconnect_subtree(struct serio *serio);
40 static void serio_attach_driver(struct serio_driver *drv);
41
serio_connect_driver(struct serio * serio,struct serio_driver * drv)42 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
43 {
44 int retval;
45
46 mutex_lock(&serio->drv_mutex);
47 retval = drv->connect(serio, drv);
48 mutex_unlock(&serio->drv_mutex);
49
50 return retval;
51 }
52
serio_reconnect_driver(struct serio * serio)53 static int serio_reconnect_driver(struct serio *serio)
54 {
55 int retval = -1;
56
57 mutex_lock(&serio->drv_mutex);
58 if (serio->drv && serio->drv->reconnect)
59 retval = serio->drv->reconnect(serio);
60 mutex_unlock(&serio->drv_mutex);
61
62 return retval;
63 }
64
serio_disconnect_driver(struct serio * serio)65 static void serio_disconnect_driver(struct serio *serio)
66 {
67 mutex_lock(&serio->drv_mutex);
68 if (serio->drv)
69 serio->drv->disconnect(serio);
70 mutex_unlock(&serio->drv_mutex);
71 }
72
serio_match_port(const struct serio_device_id * ids,struct serio * serio)73 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
74 {
75 while (ids->type || ids->proto) {
76 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
77 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
78 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
79 (ids->id == SERIO_ANY || ids->id == serio->id.id))
80 return 1;
81 ids++;
82 }
83 return 0;
84 }
85
86 /*
87 * Basic serio -> driver core mappings
88 */
89
serio_bind_driver(struct serio * serio,struct serio_driver * drv)90 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
91 {
92 int error;
93
94 if (serio_match_port(drv->id_table, serio)) {
95
96 serio->dev.driver = &drv->driver;
97 if (serio_connect_driver(serio, drv)) {
98 serio->dev.driver = NULL;
99 return -ENODEV;
100 }
101
102 error = device_bind_driver(&serio->dev);
103 if (error) {
104 dev_warn(&serio->dev,
105 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
106 serio->phys, serio->name,
107 drv->description, error);
108 serio_disconnect_driver(serio);
109 serio->dev.driver = NULL;
110 return error;
111 }
112 }
113 return 0;
114 }
115
serio_find_driver(struct serio * serio)116 static void serio_find_driver(struct serio *serio)
117 {
118 int error;
119
120 error = device_attach(&serio->dev);
121 if (error < 0 && error != -EPROBE_DEFER)
122 dev_warn(&serio->dev,
123 "device_attach() failed for %s (%s), error: %d\n",
124 serio->phys, serio->name, error);
125 }
126
127
128 /*
129 * Serio event processing.
130 */
131
132 enum serio_event_type {
133 SERIO_RESCAN_PORT,
134 SERIO_RECONNECT_PORT,
135 SERIO_RECONNECT_SUBTREE,
136 SERIO_REGISTER_PORT,
137 SERIO_ATTACH_DRIVER,
138 };
139
140 struct serio_event {
141 enum serio_event_type type;
142 void *object;
143 struct module *owner;
144 struct list_head node;
145 };
146
147 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
148 static LIST_HEAD(serio_event_list);
149
serio_get_event(void)150 static struct serio_event *serio_get_event(void)
151 {
152 struct serio_event *event = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&serio_event_lock, flags);
156
157 if (!list_empty(&serio_event_list)) {
158 event = list_first_entry(&serio_event_list,
159 struct serio_event, node);
160 list_del_init(&event->node);
161 }
162
163 spin_unlock_irqrestore(&serio_event_lock, flags);
164 return event;
165 }
166
serio_free_event(struct serio_event * event)167 static void serio_free_event(struct serio_event *event)
168 {
169 module_put(event->owner);
170 kfree(event);
171 }
172
serio_remove_duplicate_events(void * object,enum serio_event_type type)173 static void serio_remove_duplicate_events(void *object,
174 enum serio_event_type type)
175 {
176 struct serio_event *e, *next;
177 unsigned long flags;
178
179 spin_lock_irqsave(&serio_event_lock, flags);
180
181 list_for_each_entry_safe(e, next, &serio_event_list, node) {
182 if (object == e->object) {
183 /*
184 * If this event is of different type we should not
185 * look further - we only suppress duplicate events
186 * that were sent back-to-back.
187 */
188 if (type != e->type)
189 break;
190
191 list_del_init(&e->node);
192 serio_free_event(e);
193 }
194 }
195
196 spin_unlock_irqrestore(&serio_event_lock, flags);
197 }
198
serio_handle_event(struct work_struct * work)199 static void serio_handle_event(struct work_struct *work)
200 {
201 struct serio_event *event;
202
203 mutex_lock(&serio_mutex);
204
205 while ((event = serio_get_event())) {
206
207 switch (event->type) {
208
209 case SERIO_REGISTER_PORT:
210 serio_add_port(event->object);
211 break;
212
213 case SERIO_RECONNECT_PORT:
214 serio_reconnect_port(event->object);
215 break;
216
217 case SERIO_RESCAN_PORT:
218 serio_disconnect_port(event->object);
219 serio_find_driver(event->object);
220 break;
221
222 case SERIO_RECONNECT_SUBTREE:
223 serio_reconnect_subtree(event->object);
224 break;
225
226 case SERIO_ATTACH_DRIVER:
227 serio_attach_driver(event->object);
228 break;
229 }
230
231 serio_remove_duplicate_events(event->object, event->type);
232 serio_free_event(event);
233 }
234
235 mutex_unlock(&serio_mutex);
236 }
237
238 static DECLARE_WORK(serio_event_work, serio_handle_event);
239
serio_queue_event(void * object,struct module * owner,enum serio_event_type event_type)240 static int serio_queue_event(void *object, struct module *owner,
241 enum serio_event_type event_type)
242 {
243 unsigned long flags;
244 struct serio_event *event;
245 int retval = 0;
246
247 spin_lock_irqsave(&serio_event_lock, flags);
248
249 /*
250 * Scan event list for the other events for the same serio port,
251 * starting with the most recent one. If event is the same we
252 * do not need add new one. If event is of different type we
253 * need to add this event and should not look further because
254 * we need to preseve sequence of distinct events.
255 */
256 list_for_each_entry_reverse(event, &serio_event_list, node) {
257 if (event->object == object) {
258 if (event->type == event_type)
259 goto out;
260 break;
261 }
262 }
263
264 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
265 if (!event) {
266 pr_err("Not enough memory to queue event %d\n", event_type);
267 retval = -ENOMEM;
268 goto out;
269 }
270
271 if (!try_module_get(owner)) {
272 pr_warn("Can't get module reference, dropping event %d\n",
273 event_type);
274 kfree(event);
275 retval = -EINVAL;
276 goto out;
277 }
278
279 event->type = event_type;
280 event->object = object;
281 event->owner = owner;
282
283 list_add_tail(&event->node, &serio_event_list);
284 queue_work(system_long_wq, &serio_event_work);
285
286 out:
287 spin_unlock_irqrestore(&serio_event_lock, flags);
288 return retval;
289 }
290
291 /*
292 * Remove all events that have been submitted for a given
293 * object, be it serio port or driver.
294 */
serio_remove_pending_events(void * object)295 static void serio_remove_pending_events(void *object)
296 {
297 struct serio_event *event, *next;
298 unsigned long flags;
299
300 spin_lock_irqsave(&serio_event_lock, flags);
301
302 list_for_each_entry_safe(event, next, &serio_event_list, node) {
303 if (event->object == object) {
304 list_del_init(&event->node);
305 serio_free_event(event);
306 }
307 }
308
309 spin_unlock_irqrestore(&serio_event_lock, flags);
310 }
311
312 /*
313 * Locate child serio port (if any) that has not been fully registered yet.
314 *
315 * Children are registered by driver's connect() handler so there can't be a
316 * grandchild pending registration together with a child.
317 */
serio_get_pending_child(struct serio * parent)318 static struct serio *serio_get_pending_child(struct serio *parent)
319 {
320 struct serio_event *event;
321 struct serio *serio, *child = NULL;
322 unsigned long flags;
323
324 spin_lock_irqsave(&serio_event_lock, flags);
325
326 list_for_each_entry(event, &serio_event_list, node) {
327 if (event->type == SERIO_REGISTER_PORT) {
328 serio = event->object;
329 if (serio->parent == parent) {
330 child = serio;
331 break;
332 }
333 }
334 }
335
336 spin_unlock_irqrestore(&serio_event_lock, flags);
337 return child;
338 }
339
340 /*
341 * Serio port operations
342 */
343
serio_show_description(struct device * dev,struct device_attribute * attr,char * buf)344 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
345 {
346 struct serio *serio = to_serio_port(dev);
347 return sprintf(buf, "%s\n", serio->name);
348 }
349
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)350 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
351 {
352 struct serio *serio = to_serio_port(dev);
353
354 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
355 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
356 }
357
type_show(struct device * dev,struct device_attribute * attr,char * buf)358 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
359 {
360 struct serio *serio = to_serio_port(dev);
361 return sprintf(buf, "%02x\n", serio->id.type);
362 }
363
proto_show(struct device * dev,struct device_attribute * attr,char * buf)364 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
365 {
366 struct serio *serio = to_serio_port(dev);
367 return sprintf(buf, "%02x\n", serio->id.proto);
368 }
369
id_show(struct device * dev,struct device_attribute * attr,char * buf)370 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
371 {
372 struct serio *serio = to_serio_port(dev);
373 return sprintf(buf, "%02x\n", serio->id.id);
374 }
375
extra_show(struct device * dev,struct device_attribute * attr,char * buf)376 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
377 {
378 struct serio *serio = to_serio_port(dev);
379 return sprintf(buf, "%02x\n", serio->id.extra);
380 }
381
drvctl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)382 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
383 {
384 struct serio *serio = to_serio_port(dev);
385 struct device_driver *drv;
386 int error;
387
388 error = mutex_lock_interruptible(&serio_mutex);
389 if (error)
390 return error;
391
392 if (!strncmp(buf, "none", count)) {
393 serio_disconnect_port(serio);
394 } else if (!strncmp(buf, "reconnect", count)) {
395 serio_reconnect_subtree(serio);
396 } else if (!strncmp(buf, "rescan", count)) {
397 serio_disconnect_port(serio);
398 serio_find_driver(serio);
399 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
400 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
401 serio_disconnect_port(serio);
402 error = serio_bind_driver(serio, to_serio_driver(drv));
403 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
404 } else {
405 error = -EINVAL;
406 }
407
408 mutex_unlock(&serio_mutex);
409
410 return error ? error : count;
411 }
412
serio_show_bind_mode(struct device * dev,struct device_attribute * attr,char * buf)413 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
414 {
415 struct serio *serio = to_serio_port(dev);
416 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
417 }
418
serio_set_bind_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)419 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
420 {
421 struct serio *serio = to_serio_port(dev);
422 int retval;
423
424 retval = count;
425 if (!strncmp(buf, "manual", count)) {
426 serio->manual_bind = true;
427 } else if (!strncmp(buf, "auto", count)) {
428 serio->manual_bind = false;
429 } else {
430 retval = -EINVAL;
431 }
432
433 return retval;
434 }
435
firmware_id_show(struct device * dev,struct device_attribute * attr,char * buf)436 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
437 {
438 struct serio *serio = to_serio_port(dev);
439
440 return sprintf(buf, "%s\n", serio->firmware_id);
441 }
442
443 static DEVICE_ATTR_RO(type);
444 static DEVICE_ATTR_RO(proto);
445 static DEVICE_ATTR_RO(id);
446 static DEVICE_ATTR_RO(extra);
447
448 static struct attribute *serio_device_id_attrs[] = {
449 &dev_attr_type.attr,
450 &dev_attr_proto.attr,
451 &dev_attr_id.attr,
452 &dev_attr_extra.attr,
453 NULL
454 };
455
456 static const struct attribute_group serio_id_attr_group = {
457 .name = "id",
458 .attrs = serio_device_id_attrs,
459 };
460
461 static DEVICE_ATTR_RO(modalias);
462 static DEVICE_ATTR_WO(drvctl);
463 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
464 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
465 static DEVICE_ATTR_RO(firmware_id);
466
467 static struct attribute *serio_device_attrs[] = {
468 &dev_attr_modalias.attr,
469 &dev_attr_description.attr,
470 &dev_attr_drvctl.attr,
471 &dev_attr_bind_mode.attr,
472 &dev_attr_firmware_id.attr,
473 NULL
474 };
475
476 static const struct attribute_group serio_device_attr_group = {
477 .attrs = serio_device_attrs,
478 };
479
480 static const struct attribute_group *serio_device_attr_groups[] = {
481 &serio_id_attr_group,
482 &serio_device_attr_group,
483 NULL
484 };
485
serio_release_port(struct device * dev)486 static void serio_release_port(struct device *dev)
487 {
488 struct serio *serio = to_serio_port(dev);
489
490 kfree(serio);
491 module_put(THIS_MODULE);
492 }
493
494 /*
495 * Prepare serio port for registration.
496 */
serio_init_port(struct serio * serio)497 static void serio_init_port(struct serio *serio)
498 {
499 static atomic_t serio_no = ATOMIC_INIT(-1);
500
501 __module_get(THIS_MODULE);
502
503 INIT_LIST_HEAD(&serio->node);
504 INIT_LIST_HEAD(&serio->child_node);
505 INIT_LIST_HEAD(&serio->children);
506 spin_lock_init(&serio->lock);
507 mutex_init(&serio->drv_mutex);
508 device_initialize(&serio->dev);
509 dev_set_name(&serio->dev, "serio%lu",
510 (unsigned long)atomic_inc_return(&serio_no));
511 serio->dev.bus = &serio_bus;
512 serio->dev.release = serio_release_port;
513 serio->dev.groups = serio_device_attr_groups;
514 if (serio->parent) {
515 serio->dev.parent = &serio->parent->dev;
516 serio->depth = serio->parent->depth + 1;
517 } else
518 serio->depth = 0;
519 lockdep_set_subclass(&serio->lock, serio->depth);
520 }
521
522 /*
523 * Complete serio port registration.
524 * Driver core will attempt to find appropriate driver for the port.
525 */
serio_add_port(struct serio * serio)526 static void serio_add_port(struct serio *serio)
527 {
528 struct serio *parent = serio->parent;
529 int error;
530
531 if (parent) {
532 serio_pause_rx(parent);
533 list_add_tail(&serio->child_node, &parent->children);
534 serio_continue_rx(parent);
535 }
536
537 list_add_tail(&serio->node, &serio_list);
538
539 if (serio->start)
540 serio->start(serio);
541
542 error = device_add(&serio->dev);
543 if (error)
544 dev_err(&serio->dev,
545 "device_add() failed for %s (%s), error: %d\n",
546 serio->phys, serio->name, error);
547 }
548
549 /*
550 * serio_destroy_port() completes unregistration process and removes
551 * port from the system
552 */
serio_destroy_port(struct serio * serio)553 static void serio_destroy_port(struct serio *serio)
554 {
555 struct serio *child;
556
557 while ((child = serio_get_pending_child(serio)) != NULL) {
558 serio_remove_pending_events(child);
559 put_device(&child->dev);
560 }
561
562 if (serio->stop)
563 serio->stop(serio);
564
565 if (serio->parent) {
566 serio_pause_rx(serio->parent);
567 list_del_init(&serio->child_node);
568 serio_continue_rx(serio->parent);
569 serio->parent = NULL;
570 }
571
572 if (device_is_registered(&serio->dev))
573 device_del(&serio->dev);
574
575 list_del_init(&serio->node);
576 serio_remove_pending_events(serio);
577 put_device(&serio->dev);
578 }
579
580 /*
581 * Reconnect serio port (re-initialize attached device).
582 * If reconnect fails (old device is no longer attached or
583 * there was no device to begin with) we do full rescan in
584 * hope of finding a driver for the port.
585 */
serio_reconnect_port(struct serio * serio)586 static int serio_reconnect_port(struct serio *serio)
587 {
588 int error = serio_reconnect_driver(serio);
589
590 if (error) {
591 serio_disconnect_port(serio);
592 serio_find_driver(serio);
593 }
594
595 return error;
596 }
597
598 /*
599 * Reconnect serio port and all its children (re-initialize attached
600 * devices).
601 */
serio_reconnect_subtree(struct serio * root)602 static void serio_reconnect_subtree(struct serio *root)
603 {
604 struct serio *s = root;
605 int error;
606
607 do {
608 error = serio_reconnect_port(s);
609 if (!error) {
610 /*
611 * Reconnect was successful, move on to do the
612 * first child.
613 */
614 if (!list_empty(&s->children)) {
615 s = list_first_entry(&s->children,
616 struct serio, child_node);
617 continue;
618 }
619 }
620
621 /*
622 * Either it was a leaf node or reconnect failed and it
623 * became a leaf node. Continue reconnecting starting with
624 * the next sibling of the parent node.
625 */
626 while (s != root) {
627 struct serio *parent = s->parent;
628
629 if (!list_is_last(&s->child_node, &parent->children)) {
630 s = list_entry(s->child_node.next,
631 struct serio, child_node);
632 break;
633 }
634
635 s = parent;
636 }
637 } while (s != root);
638 }
639
640 /*
641 * serio_disconnect_port() unbinds a port from its driver. As a side effect
642 * all children ports are unbound and destroyed.
643 */
serio_disconnect_port(struct serio * serio)644 static void serio_disconnect_port(struct serio *serio)
645 {
646 struct serio *s = serio;
647
648 /*
649 * Children ports should be disconnected and destroyed
650 * first; we travel the tree in depth-first order.
651 */
652 while (!list_empty(&serio->children)) {
653
654 /* Locate a leaf */
655 while (!list_empty(&s->children))
656 s = list_first_entry(&s->children,
657 struct serio, child_node);
658
659 /*
660 * Prune this leaf node unless it is the one we
661 * started with.
662 */
663 if (s != serio) {
664 struct serio *parent = s->parent;
665
666 device_release_driver(&s->dev);
667 serio_destroy_port(s);
668
669 s = parent;
670 }
671 }
672
673 /*
674 * OK, no children left, now disconnect this port.
675 */
676 device_release_driver(&serio->dev);
677 }
678
serio_rescan(struct serio * serio)679 void serio_rescan(struct serio *serio)
680 {
681 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
682 }
683 EXPORT_SYMBOL(serio_rescan);
684
serio_reconnect(struct serio * serio)685 void serio_reconnect(struct serio *serio)
686 {
687 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
688 }
689 EXPORT_SYMBOL(serio_reconnect);
690
691 /*
692 * Submits register request to kseriod for subsequent execution.
693 * Note that port registration is always asynchronous.
694 */
__serio_register_port(struct serio * serio,struct module * owner)695 void __serio_register_port(struct serio *serio, struct module *owner)
696 {
697 serio_init_port(serio);
698 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
699 }
700 EXPORT_SYMBOL(__serio_register_port);
701
702 /*
703 * Synchronously unregisters serio port.
704 */
serio_unregister_port(struct serio * serio)705 void serio_unregister_port(struct serio *serio)
706 {
707 mutex_lock(&serio_mutex);
708 serio_disconnect_port(serio);
709 serio_destroy_port(serio);
710 mutex_unlock(&serio_mutex);
711 }
712 EXPORT_SYMBOL(serio_unregister_port);
713
714 /*
715 * Safely unregisters children ports if they are present.
716 */
serio_unregister_child_port(struct serio * serio)717 void serio_unregister_child_port(struct serio *serio)
718 {
719 struct serio *s, *next;
720
721 mutex_lock(&serio_mutex);
722 list_for_each_entry_safe(s, next, &serio->children, child_node) {
723 serio_disconnect_port(s);
724 serio_destroy_port(s);
725 }
726 mutex_unlock(&serio_mutex);
727 }
728 EXPORT_SYMBOL(serio_unregister_child_port);
729
730
731 /*
732 * Serio driver operations
733 */
734
description_show(struct device_driver * drv,char * buf)735 static ssize_t description_show(struct device_driver *drv, char *buf)
736 {
737 struct serio_driver *driver = to_serio_driver(drv);
738 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
739 }
740 static DRIVER_ATTR_RO(description);
741
bind_mode_show(struct device_driver * drv,char * buf)742 static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
743 {
744 struct serio_driver *serio_drv = to_serio_driver(drv);
745 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
746 }
747
bind_mode_store(struct device_driver * drv,const char * buf,size_t count)748 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
749 {
750 struct serio_driver *serio_drv = to_serio_driver(drv);
751 int retval;
752
753 retval = count;
754 if (!strncmp(buf, "manual", count)) {
755 serio_drv->manual_bind = true;
756 } else if (!strncmp(buf, "auto", count)) {
757 serio_drv->manual_bind = false;
758 } else {
759 retval = -EINVAL;
760 }
761
762 return retval;
763 }
764 static DRIVER_ATTR_RW(bind_mode);
765
766 static struct attribute *serio_driver_attrs[] = {
767 &driver_attr_description.attr,
768 &driver_attr_bind_mode.attr,
769 NULL,
770 };
771 ATTRIBUTE_GROUPS(serio_driver);
772
serio_driver_probe(struct device * dev)773 static int serio_driver_probe(struct device *dev)
774 {
775 struct serio *serio = to_serio_port(dev);
776 struct serio_driver *drv = to_serio_driver(dev->driver);
777
778 return serio_connect_driver(serio, drv);
779 }
780
serio_driver_remove(struct device * dev)781 static int serio_driver_remove(struct device *dev)
782 {
783 struct serio *serio = to_serio_port(dev);
784
785 serio_disconnect_driver(serio);
786 return 0;
787 }
788
serio_cleanup(struct serio * serio)789 static void serio_cleanup(struct serio *serio)
790 {
791 mutex_lock(&serio->drv_mutex);
792 if (serio->drv && serio->drv->cleanup)
793 serio->drv->cleanup(serio);
794 mutex_unlock(&serio->drv_mutex);
795 }
796
serio_shutdown(struct device * dev)797 static void serio_shutdown(struct device *dev)
798 {
799 struct serio *serio = to_serio_port(dev);
800
801 serio_cleanup(serio);
802 }
803
serio_attach_driver(struct serio_driver * drv)804 static void serio_attach_driver(struct serio_driver *drv)
805 {
806 int error;
807
808 error = driver_attach(&drv->driver);
809 if (error)
810 pr_warn("driver_attach() failed for %s with error %d\n",
811 drv->driver.name, error);
812 }
813
__serio_register_driver(struct serio_driver * drv,struct module * owner,const char * mod_name)814 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
815 {
816 bool manual_bind = drv->manual_bind;
817 int error;
818
819 drv->driver.bus = &serio_bus;
820 drv->driver.owner = owner;
821 drv->driver.mod_name = mod_name;
822
823 /*
824 * Temporarily disable automatic binding because probing
825 * takes long time and we are better off doing it in kseriod
826 */
827 drv->manual_bind = true;
828
829 error = driver_register(&drv->driver);
830 if (error) {
831 pr_err("driver_register() failed for %s, error: %d\n",
832 drv->driver.name, error);
833 return error;
834 }
835
836 /*
837 * Restore original bind mode and let kseriod bind the
838 * driver to free ports
839 */
840 if (!manual_bind) {
841 drv->manual_bind = false;
842 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
843 if (error) {
844 driver_unregister(&drv->driver);
845 return error;
846 }
847 }
848
849 return 0;
850 }
851 EXPORT_SYMBOL(__serio_register_driver);
852
serio_unregister_driver(struct serio_driver * drv)853 void serio_unregister_driver(struct serio_driver *drv)
854 {
855 struct serio *serio;
856
857 mutex_lock(&serio_mutex);
858
859 drv->manual_bind = true; /* so serio_find_driver ignores it */
860 serio_remove_pending_events(drv);
861
862 start_over:
863 list_for_each_entry(serio, &serio_list, node) {
864 if (serio->drv == drv) {
865 serio_disconnect_port(serio);
866 serio_find_driver(serio);
867 /* we could've deleted some ports, restart */
868 goto start_over;
869 }
870 }
871
872 driver_unregister(&drv->driver);
873 mutex_unlock(&serio_mutex);
874 }
875 EXPORT_SYMBOL(serio_unregister_driver);
876
serio_set_drv(struct serio * serio,struct serio_driver * drv)877 static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
878 {
879 serio_pause_rx(serio);
880 serio->drv = drv;
881 serio_continue_rx(serio);
882 }
883
serio_bus_match(struct device * dev,struct device_driver * drv)884 static int serio_bus_match(struct device *dev, struct device_driver *drv)
885 {
886 struct serio *serio = to_serio_port(dev);
887 struct serio_driver *serio_drv = to_serio_driver(drv);
888
889 if (serio->manual_bind || serio_drv->manual_bind)
890 return 0;
891
892 return serio_match_port(serio_drv->id_table, serio);
893 }
894
895 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \
896 do { \
897 int err = add_uevent_var(env, fmt, val); \
898 if (err) \
899 return err; \
900 } while (0)
901
serio_uevent(struct device * dev,struct kobj_uevent_env * env)902 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
903 {
904 struct serio *serio;
905
906 if (!dev)
907 return -ENODEV;
908
909 serio = to_serio_port(dev);
910
911 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
912 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
913 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
914 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
915
916 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
917 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
918
919 if (serio->firmware_id[0])
920 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
921 serio->firmware_id);
922
923 return 0;
924 }
925 #undef SERIO_ADD_UEVENT_VAR
926
927 #ifdef CONFIG_PM
serio_suspend(struct device * dev)928 static int serio_suspend(struct device *dev)
929 {
930 struct serio *serio = to_serio_port(dev);
931
932 serio_cleanup(serio);
933
934 return 0;
935 }
936
serio_resume(struct device * dev)937 static int serio_resume(struct device *dev)
938 {
939 struct serio *serio = to_serio_port(dev);
940 int error = -ENOENT;
941
942 mutex_lock(&serio->drv_mutex);
943 if (serio->drv && serio->drv->fast_reconnect) {
944 error = serio->drv->fast_reconnect(serio);
945 if (error && error != -ENOENT)
946 dev_warn(dev, "fast reconnect failed with error %d\n",
947 error);
948 }
949 mutex_unlock(&serio->drv_mutex);
950
951 if (error) {
952 /*
953 * Driver reconnect can take a while, so better let
954 * kseriod deal with it.
955 */
956 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
957 }
958
959 return 0;
960 }
961
962 static const struct dev_pm_ops serio_pm_ops = {
963 .suspend = serio_suspend,
964 .resume = serio_resume,
965 .poweroff = serio_suspend,
966 .restore = serio_resume,
967 };
968 #endif /* CONFIG_PM */
969
970 /* called from serio_driver->connect/disconnect methods under serio_mutex */
serio_open(struct serio * serio,struct serio_driver * drv)971 int serio_open(struct serio *serio, struct serio_driver *drv)
972 {
973 serio_set_drv(serio, drv);
974
975 if (serio->open && serio->open(serio)) {
976 serio_set_drv(serio, NULL);
977 return -1;
978 }
979 return 0;
980 }
981 EXPORT_SYMBOL(serio_open);
982
983 /* called from serio_driver->connect/disconnect methods under serio_mutex */
serio_close(struct serio * serio)984 void serio_close(struct serio *serio)
985 {
986 if (serio->close)
987 serio->close(serio);
988
989 serio_set_drv(serio, NULL);
990 }
991 EXPORT_SYMBOL(serio_close);
992
serio_interrupt(struct serio * serio,unsigned char data,unsigned int dfl)993 irqreturn_t serio_interrupt(struct serio *serio,
994 unsigned char data, unsigned int dfl)
995 {
996 unsigned long flags;
997 irqreturn_t ret = IRQ_NONE;
998
999 spin_lock_irqsave(&serio->lock, flags);
1000
1001 if (likely(serio->drv)) {
1002 ret = serio->drv->interrupt(serio, data, dfl);
1003 } else if (!dfl && device_is_registered(&serio->dev)) {
1004 serio_rescan(serio);
1005 ret = IRQ_HANDLED;
1006 }
1007
1008 spin_unlock_irqrestore(&serio->lock, flags);
1009
1010 return ret;
1011 }
1012 EXPORT_SYMBOL(serio_interrupt);
1013
1014 struct bus_type serio_bus = {
1015 .name = "serio",
1016 .drv_groups = serio_driver_groups,
1017 .match = serio_bus_match,
1018 .uevent = serio_uevent,
1019 .probe = serio_driver_probe,
1020 .remove = serio_driver_remove,
1021 .shutdown = serio_shutdown,
1022 #ifdef CONFIG_PM
1023 .pm = &serio_pm_ops,
1024 #endif
1025 };
1026 EXPORT_SYMBOL(serio_bus);
1027
serio_init(void)1028 static int __init serio_init(void)
1029 {
1030 int error;
1031
1032 error = bus_register(&serio_bus);
1033 if (error) {
1034 pr_err("Failed to register serio bus, error: %d\n", error);
1035 return error;
1036 }
1037
1038 return 0;
1039 }
1040
serio_exit(void)1041 static void __exit serio_exit(void)
1042 {
1043 bus_unregister(&serio_bus);
1044
1045 /*
1046 * There should not be any outstanding events but work may
1047 * still be scheduled so simply cancel it.
1048 */
1049 cancel_work_sync(&serio_event_work);
1050 }
1051
1052 subsys_initcall(serio_init);
1053 module_exit(serio_exit);
1054