1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/usb.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/tty.h>
12 #include <linux/tty_driver.h>
13 #include <linux/tty_flip.h>
14 #include <linux/slab.h>
15 #include <linux/usb/cdc.h>
16
17 #include "gdm_mux.h"
18
19 static u16 packet_type_for_tty_index[TTY_MAX_COUNT] = {0xF011, 0xF010};
20
21 #define USB_DEVICE_CDC_DATA(vid, pid) \
22 .match_flags = \
23 USB_DEVICE_ID_MATCH_DEVICE |\
24 USB_DEVICE_ID_MATCH_INT_CLASS |\
25 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
26 .idVendor = vid,\
27 .idProduct = pid,\
28 .bInterfaceClass = USB_CLASS_COMM,\
29 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
30
31 static const struct usb_device_id id_table[] = {
32 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
33 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
34 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
35 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
36 {}
37 };
38
39 MODULE_DEVICE_TABLE(usb, id_table);
40
packet_type_to_tty_index(u16 packet_type)41 static int packet_type_to_tty_index(u16 packet_type)
42 {
43 int i;
44
45 for (i = 0; i < TTY_MAX_COUNT; i++) {
46 if (packet_type_for_tty_index[i] == packet_type)
47 return i;
48 }
49
50 return -1;
51 }
52
alloc_mux_tx(int len)53 static struct mux_tx *alloc_mux_tx(int len)
54 {
55 struct mux_tx *t;
56
57 t = kzalloc(sizeof(*t), GFP_ATOMIC);
58 if (!t)
59 return NULL;
60
61 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
62 t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
63 if (!t->urb || !t->buf) {
64 usb_free_urb(t->urb);
65 kfree(t->buf);
66 kfree(t);
67 return NULL;
68 }
69
70 return t;
71 }
72
free_mux_tx(struct mux_tx * t)73 static void free_mux_tx(struct mux_tx *t)
74 {
75 if (t) {
76 usb_free_urb(t->urb);
77 kfree(t->buf);
78 kfree(t);
79 }
80 }
81
alloc_mux_rx(void)82 static struct mux_rx *alloc_mux_rx(void)
83 {
84 struct mux_rx *r;
85
86 r = kzalloc(sizeof(*r), GFP_KERNEL);
87 if (!r)
88 return NULL;
89
90 r->urb = usb_alloc_urb(0, GFP_KERNEL);
91 r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
92 if (!r->urb || !r->buf) {
93 usb_free_urb(r->urb);
94 kfree(r->buf);
95 kfree(r);
96 return NULL;
97 }
98
99 return r;
100 }
101
free_mux_rx(struct mux_rx * r)102 static void free_mux_rx(struct mux_rx *r)
103 {
104 if (r) {
105 usb_free_urb(r->urb);
106 kfree(r->buf);
107 kfree(r);
108 }
109 }
110
get_rx_struct(struct rx_cxt * rx)111 static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
112 {
113 struct mux_rx *r;
114 unsigned long flags;
115
116 spin_lock_irqsave(&rx->free_list_lock, flags);
117
118 if (list_empty(&rx->rx_free_list)) {
119 spin_unlock_irqrestore(&rx->free_list_lock, flags);
120 return NULL;
121 }
122
123 r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
124 list_del(&r->free_list);
125
126 spin_unlock_irqrestore(&rx->free_list_lock, flags);
127
128 return r;
129 }
130
put_rx_struct(struct rx_cxt * rx,struct mux_rx * r)131 static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
132 {
133 unsigned long flags;
134
135 spin_lock_irqsave(&rx->free_list_lock, flags);
136 list_add_tail(&r->free_list, &rx->rx_free_list);
137 spin_unlock_irqrestore(&rx->free_list_lock, flags);
138 }
139
up_to_host(struct mux_rx * r)140 static int up_to_host(struct mux_rx *r)
141 {
142 struct mux_dev *mux_dev = r->mux_dev;
143 struct mux_pkt_header *mux_header;
144 unsigned int start_flag;
145 unsigned int payload_size;
146 unsigned short packet_type;
147 int total_len;
148 u32 packet_size_sum = r->offset;
149 int index;
150 int ret = TO_HOST_INVALID_PACKET;
151 int len = r->len;
152
153 while (1) {
154 mux_header = (struct mux_pkt_header *)(r->buf +
155 packet_size_sum);
156 start_flag = __le32_to_cpu(mux_header->start_flag);
157 payload_size = __le32_to_cpu(mux_header->payload_size);
158 packet_type = __le16_to_cpu(mux_header->packet_type);
159
160 if (start_flag != START_FLAG) {
161 pr_err("invalid START_FLAG %x\n", start_flag);
162 break;
163 }
164
165 total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
166
167 if (len - packet_size_sum < total_len) {
168 pr_err("invalid payload : %d %d %04x\n",
169 payload_size, len, packet_type);
170 break;
171 }
172
173 index = packet_type_to_tty_index(packet_type);
174 if (index < 0) {
175 pr_err("invalid index %d\n", index);
176 break;
177 }
178
179 ret = r->callback(mux_header->data,
180 payload_size,
181 index,
182 mux_dev->tty_dev,
183 RECV_PACKET_PROCESS_CONTINUE
184 );
185 if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
186 r->offset += packet_size_sum;
187 break;
188 }
189
190 packet_size_sum += total_len;
191 if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
192 ret = r->callback(NULL,
193 0,
194 index,
195 mux_dev->tty_dev,
196 RECV_PACKET_PROCESS_COMPLETE
197 );
198 break;
199 }
200 }
201
202 return ret;
203 }
204
do_rx(struct work_struct * work)205 static void do_rx(struct work_struct *work)
206 {
207 struct mux_dev *mux_dev =
208 container_of(work, struct mux_dev, work_rx.work);
209 struct mux_rx *r;
210 struct rx_cxt *rx = &mux_dev->rx;
211 unsigned long flags;
212 int ret = 0;
213
214 while (1) {
215 spin_lock_irqsave(&rx->to_host_lock, flags);
216 if (list_empty(&rx->to_host_list)) {
217 spin_unlock_irqrestore(&rx->to_host_lock, flags);
218 break;
219 }
220 r = list_entry(rx->to_host_list.next, struct mux_rx,
221 to_host_list);
222 list_del(&r->to_host_list);
223 spin_unlock_irqrestore(&rx->to_host_lock, flags);
224
225 ret = up_to_host(r);
226 if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
227 pr_err("failed to send mux data to host\n");
228 else
229 put_rx_struct(rx, r);
230 }
231 }
232
remove_rx_submit_list(struct mux_rx * r,struct rx_cxt * rx)233 static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
234 {
235 unsigned long flags;
236 struct mux_rx *r_remove, *r_remove_next;
237
238 spin_lock_irqsave(&rx->submit_list_lock, flags);
239 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
240 rx_submit_list) {
241 if (r == r_remove)
242 list_del(&r->rx_submit_list);
243 }
244 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
245 }
246
gdm_mux_rcv_complete(struct urb * urb)247 static void gdm_mux_rcv_complete(struct urb *urb)
248 {
249 struct mux_rx *r = urb->context;
250 struct mux_dev *mux_dev = r->mux_dev;
251 struct rx_cxt *rx = &mux_dev->rx;
252 unsigned long flags;
253
254 remove_rx_submit_list(r, rx);
255
256 if (urb->status) {
257 if (mux_dev->usb_state == PM_NORMAL)
258 dev_err(&urb->dev->dev, "%s: urb status error %d\n",
259 __func__, urb->status);
260 put_rx_struct(rx, r);
261 } else {
262 r->len = r->urb->actual_length;
263 spin_lock_irqsave(&rx->to_host_lock, flags);
264 list_add_tail(&r->to_host_list, &rx->to_host_list);
265 schedule_work(&mux_dev->work_rx.work);
266 spin_unlock_irqrestore(&rx->to_host_lock, flags);
267 }
268 }
269
gdm_mux_recv(void * priv_dev,int (* cb)(void * data,int len,int tty_index,struct tty_dev * tty_dev,int complete))270 static int gdm_mux_recv(void *priv_dev,
271 int (*cb)(void *data, int len, int tty_index,
272 struct tty_dev *tty_dev, int complete))
273 {
274 struct mux_dev *mux_dev = priv_dev;
275 struct usb_device *usbdev = mux_dev->usbdev;
276 struct mux_rx *r;
277 struct rx_cxt *rx = &mux_dev->rx;
278 unsigned long flags;
279 int ret;
280
281 if (!usbdev) {
282 pr_err("device is disconnected\n");
283 return -ENODEV;
284 }
285
286 r = get_rx_struct(rx);
287 if (!r) {
288 pr_err("get_rx_struct fail\n");
289 return -ENOMEM;
290 }
291
292 r->offset = 0;
293 r->mux_dev = (void *)mux_dev;
294 r->callback = cb;
295 mux_dev->rx_cb = cb;
296
297 usb_fill_bulk_urb(r->urb,
298 usbdev,
299 usb_rcvbulkpipe(usbdev, 0x86),
300 r->buf,
301 MUX_RX_MAX_SIZE,
302 gdm_mux_rcv_complete,
303 r);
304
305 spin_lock_irqsave(&rx->submit_list_lock, flags);
306 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
307 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
308
309 ret = usb_submit_urb(r->urb, GFP_KERNEL);
310
311 if (ret) {
312 spin_lock_irqsave(&rx->submit_list_lock, flags);
313 list_del(&r->rx_submit_list);
314 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
315
316 put_rx_struct(rx, r);
317
318 pr_err("usb_submit_urb ret=%d\n", ret);
319 }
320
321 usb_mark_last_busy(usbdev);
322
323 return ret;
324 }
325
gdm_mux_send_complete(struct urb * urb)326 static void gdm_mux_send_complete(struct urb *urb)
327 {
328 struct mux_tx *t = urb->context;
329
330 if (urb->status == -ECONNRESET) {
331 dev_info(&urb->dev->dev, "CONNRESET\n");
332 free_mux_tx(t);
333 return;
334 }
335
336 if (t->callback)
337 t->callback(t->cb_data);
338
339 free_mux_tx(t);
340 }
341
gdm_mux_send(void * priv_dev,void * data,int len,int tty_index,void (* cb)(void * data),void * cb_data)342 static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
343 void (*cb)(void *data), void *cb_data)
344 {
345 struct mux_dev *mux_dev = priv_dev;
346 struct usb_device *usbdev = mux_dev->usbdev;
347 struct mux_pkt_header *mux_header;
348 struct mux_tx *t = NULL;
349 static u32 seq_num = 1;
350 int total_len;
351 int ret;
352 unsigned long flags;
353
354 if (mux_dev->usb_state == PM_SUSPEND) {
355 ret = usb_autopm_get_interface(mux_dev->intf);
356 if (!ret)
357 usb_autopm_put_interface(mux_dev->intf);
358 }
359
360 spin_lock_irqsave(&mux_dev->write_lock, flags);
361
362 total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
363
364 t = alloc_mux_tx(total_len);
365 if (!t) {
366 pr_err("alloc_mux_tx fail\n");
367 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
368 return -ENOMEM;
369 }
370
371 mux_header = (struct mux_pkt_header *)t->buf;
372 mux_header->start_flag = __cpu_to_le32(START_FLAG);
373 mux_header->seq_num = __cpu_to_le32(seq_num++);
374 mux_header->payload_size = __cpu_to_le32((u32)len);
375 mux_header->packet_type = __cpu_to_le16(packet_type_for_tty_index[tty_index]);
376
377 memcpy(t->buf + MUX_HEADER_SIZE, data, len);
378 memset(t->buf + MUX_HEADER_SIZE + len, 0,
379 total_len - MUX_HEADER_SIZE - len);
380
381 t->len = total_len;
382 t->callback = cb;
383 t->cb_data = cb_data;
384
385 usb_fill_bulk_urb(t->urb,
386 usbdev,
387 usb_sndbulkpipe(usbdev, 5),
388 t->buf,
389 total_len,
390 gdm_mux_send_complete,
391 t);
392
393 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
394
395 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
396
397 if (ret)
398 pr_err("usb_submit_urb Error: %d\n", ret);
399
400 usb_mark_last_busy(usbdev);
401
402 return ret;
403 }
404
gdm_mux_send_control(void * priv_dev,int request,int value,void * buf,int len)405 static int gdm_mux_send_control(void *priv_dev, int request, int value,
406 void *buf, int len)
407 {
408 struct mux_dev *mux_dev = priv_dev;
409 struct usb_device *usbdev = mux_dev->usbdev;
410 int ret;
411
412 ret = usb_control_msg(usbdev,
413 usb_sndctrlpipe(usbdev, 0),
414 request,
415 USB_RT_ACM,
416 value,
417 2,
418 buf,
419 len,
420 5000
421 );
422
423 if (ret < 0)
424 pr_err("usb_control_msg error: %d\n", ret);
425
426 return min(ret, 0);
427 }
428
release_usb(struct mux_dev * mux_dev)429 static void release_usb(struct mux_dev *mux_dev)
430 {
431 struct rx_cxt *rx = &mux_dev->rx;
432 struct mux_rx *r, *r_next;
433 unsigned long flags;
434
435 cancel_delayed_work(&mux_dev->work_rx);
436
437 spin_lock_irqsave(&rx->submit_list_lock, flags);
438 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
439 rx_submit_list) {
440 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
441 usb_kill_urb(r->urb);
442 spin_lock_irqsave(&rx->submit_list_lock, flags);
443 }
444 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
445
446 spin_lock_irqsave(&rx->free_list_lock, flags);
447 list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
448 list_del(&r->free_list);
449 free_mux_rx(r);
450 }
451 spin_unlock_irqrestore(&rx->free_list_lock, flags);
452
453 spin_lock_irqsave(&rx->to_host_lock, flags);
454 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
455 if (r->mux_dev == (void *)mux_dev) {
456 list_del(&r->to_host_list);
457 free_mux_rx(r);
458 }
459 }
460 spin_unlock_irqrestore(&rx->to_host_lock, flags);
461 }
462
init_usb(struct mux_dev * mux_dev)463 static int init_usb(struct mux_dev *mux_dev)
464 {
465 struct mux_rx *r;
466 struct rx_cxt *rx = &mux_dev->rx;
467 int ret = 0;
468 int i;
469
470 spin_lock_init(&mux_dev->write_lock);
471 INIT_LIST_HEAD(&rx->to_host_list);
472 INIT_LIST_HEAD(&rx->rx_submit_list);
473 INIT_LIST_HEAD(&rx->rx_free_list);
474 spin_lock_init(&rx->to_host_lock);
475 spin_lock_init(&rx->submit_list_lock);
476 spin_lock_init(&rx->free_list_lock);
477
478 for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
479 r = alloc_mux_rx();
480 if (!r) {
481 ret = -ENOMEM;
482 break;
483 }
484
485 list_add(&r->free_list, &rx->rx_free_list);
486 }
487
488 INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
489
490 return ret;
491 }
492
gdm_mux_probe(struct usb_interface * intf,const struct usb_device_id * id)493 static int gdm_mux_probe(struct usb_interface *intf,
494 const struct usb_device_id *id)
495 {
496 struct mux_dev *mux_dev;
497 struct tty_dev *tty_dev;
498 u16 idVendor, idProduct;
499 int bInterfaceNumber;
500 int ret;
501 int i;
502 struct usb_device *usbdev = interface_to_usbdev(intf);
503
504 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
505
506 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
507 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
508
509 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
510
511 if (bInterfaceNumber != 2)
512 return -ENODEV;
513
514 mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
515 if (!mux_dev)
516 return -ENOMEM;
517
518 tty_dev = kzalloc(sizeof(*tty_dev), GFP_KERNEL);
519 if (!tty_dev) {
520 ret = -ENOMEM;
521 goto err_free_mux;
522 }
523
524 mux_dev->usbdev = usbdev;
525 mux_dev->control_intf = intf;
526
527 ret = init_usb(mux_dev);
528 if (ret)
529 goto err_free_usb;
530
531 tty_dev->priv_dev = (void *)mux_dev;
532 tty_dev->send_func = gdm_mux_send;
533 tty_dev->recv_func = gdm_mux_recv;
534 tty_dev->send_control = gdm_mux_send_control;
535
536 ret = register_lte_tty_device(tty_dev, &intf->dev);
537 if (ret)
538 goto err_unregister_tty;
539
540 for (i = 0; i < TTY_MAX_COUNT; i++)
541 mux_dev->tty_dev = tty_dev;
542
543 mux_dev->intf = intf;
544 mux_dev->usb_state = PM_NORMAL;
545
546 usb_get_dev(usbdev);
547 usb_set_intfdata(intf, tty_dev);
548
549 return 0;
550
551 err_unregister_tty:
552 unregister_lte_tty_device(tty_dev);
553 err_free_usb:
554 release_usb(mux_dev);
555 kfree(tty_dev);
556 err_free_mux:
557 kfree(mux_dev);
558
559 return ret;
560 }
561
gdm_mux_disconnect(struct usb_interface * intf)562 static void gdm_mux_disconnect(struct usb_interface *intf)
563 {
564 struct tty_dev *tty_dev;
565 struct mux_dev *mux_dev;
566 struct usb_device *usbdev = interface_to_usbdev(intf);
567
568 tty_dev = usb_get_intfdata(intf);
569
570 mux_dev = tty_dev->priv_dev;
571
572 release_usb(mux_dev);
573 unregister_lte_tty_device(tty_dev);
574
575 kfree(mux_dev);
576 kfree(tty_dev);
577
578 usb_put_dev(usbdev);
579 }
580
gdm_mux_suspend(struct usb_interface * intf,pm_message_t pm_msg)581 static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
582 {
583 struct tty_dev *tty_dev;
584 struct mux_dev *mux_dev;
585 struct rx_cxt *rx;
586 struct mux_rx *r, *r_next;
587 unsigned long flags;
588
589 tty_dev = usb_get_intfdata(intf);
590 mux_dev = tty_dev->priv_dev;
591 rx = &mux_dev->rx;
592
593 cancel_work_sync(&mux_dev->work_rx.work);
594
595 if (mux_dev->usb_state != PM_NORMAL) {
596 dev_err(intf->usb_dev, "usb suspend - invalid state\n");
597 return -1;
598 }
599
600 mux_dev->usb_state = PM_SUSPEND;
601
602 spin_lock_irqsave(&rx->submit_list_lock, flags);
603 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
604 rx_submit_list) {
605 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
606 usb_kill_urb(r->urb);
607 spin_lock_irqsave(&rx->submit_list_lock, flags);
608 }
609 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
610
611 return 0;
612 }
613
gdm_mux_resume(struct usb_interface * intf)614 static int gdm_mux_resume(struct usb_interface *intf)
615 {
616 struct tty_dev *tty_dev;
617 struct mux_dev *mux_dev;
618 u8 i;
619
620 tty_dev = usb_get_intfdata(intf);
621 mux_dev = tty_dev->priv_dev;
622
623 if (mux_dev->usb_state != PM_SUSPEND) {
624 dev_err(intf->usb_dev, "usb resume - invalid state\n");
625 return -1;
626 }
627
628 mux_dev->usb_state = PM_NORMAL;
629
630 for (i = 0; i < MAX_ISSUE_NUM; i++)
631 gdm_mux_recv(mux_dev, mux_dev->rx_cb);
632
633 return 0;
634 }
635
636 static struct usb_driver gdm_mux_driver = {
637 .name = "gdm_mux",
638 .probe = gdm_mux_probe,
639 .disconnect = gdm_mux_disconnect,
640 .id_table = id_table,
641 .supports_autosuspend = 1,
642 .suspend = gdm_mux_suspend,
643 .resume = gdm_mux_resume,
644 .reset_resume = gdm_mux_resume,
645 };
646
gdm_usb_mux_init(void)647 static int __init gdm_usb_mux_init(void)
648 {
649 int ret;
650
651 ret = register_lte_tty_driver();
652 if (ret)
653 return ret;
654
655 return usb_register(&gdm_mux_driver);
656 }
657
gdm_usb_mux_exit(void)658 static void __exit gdm_usb_mux_exit(void)
659 {
660 usb_deregister(&gdm_mux_driver);
661 unregister_lte_tty_driver();
662 }
663
664 module_init(gdm_usb_mux_init);
665 module_exit(gdm_usb_mux_exit);
666
667 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
668 MODULE_LICENSE("GPL");
669