• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/sched.h>
20 #include <linux/kthread.h>
21 #include <linux/usb/cdc.h>
22 #include <linux/wait.h>
23 #include <linux/if_ether.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "gdm_usb.h"
27 #include "gdm_lte.h"
28 #include "hci.h"
29 #include "hci_packet.h"
30 #include "gdm_endian.h"
31 
32 #define USB_DEVICE_CDC_DATA(vid, pid) \
33 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
34 		USB_DEVICE_ID_MATCH_INT_CLASS | \
35 		USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
36 	.idVendor = vid,\
37 	.idProduct = pid,\
38 	.bInterfaceClass = USB_CLASS_COMM,\
39 	.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
40 
41 #define USB_DEVICE_MASS_DATA(vid, pid) \
42 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
43 		USB_DEVICE_ID_MATCH_INT_INFO,\
44 	.idVendor = vid,\
45 	.idProduct = pid,\
46 	.bInterfaceSubClass = USB_SC_SCSI, \
47 	.bInterfaceClass = USB_CLASS_MASS_STORAGE,\
48 	.bInterfaceProtocol = USB_PR_BULK
49 
50 static const struct usb_device_id id_table[] = {
51 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
52 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
53 	{ }
54 };
55 
56 MODULE_DEVICE_TABLE(usb, id_table);
57 
58 static void do_tx(struct work_struct *work);
59 static void do_rx(struct work_struct *work);
60 
61 static int gdm_usb_recv(void *priv_dev,
62 			int (*cb)(void *cb_data,
63 				  void *data, int len, int context),
64 			void *cb_data,
65 			int context);
66 
request_mac_address(struct lte_udev * udev)67 static int request_mac_address(struct lte_udev *udev)
68 {
69 	u8 buf[16] = {0,};
70 	struct hci_packet *hci = (struct hci_packet *)buf;
71 	struct usb_device *usbdev = udev->usbdev;
72 	int actual;
73 	int ret = -1;
74 
75 	hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
76 	hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
77 	hci->data[0] = MAC_ADDRESS;
78 
79 	ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
80 			   &actual, 1000);
81 
82 	udev->request_mac_addr = 1;
83 
84 	return ret;
85 }
86 
alloc_tx_struct(int len)87 static struct usb_tx *alloc_tx_struct(int len)
88 {
89 	struct usb_tx *t = NULL;
90 	int ret = 0;
91 
92 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
93 	if (!t) {
94 		ret = -ENOMEM;
95 		goto out;
96 	}
97 
98 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
99 	if (!(len % 512))
100 		len++;
101 
102 	t->buf = kmalloc(len, GFP_ATOMIC);
103 	if (!t->urb || !t->buf) {
104 		ret = -ENOMEM;
105 		goto out;
106 	}
107 
108 out:
109 	if (ret < 0) {
110 		if (t) {
111 			usb_free_urb(t->urb);
112 			kfree(t->buf);
113 			kfree(t);
114 		}
115 		return NULL;
116 	}
117 
118 	return t;
119 }
120 
alloc_tx_sdu_struct(void)121 static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
122 {
123 	struct usb_tx_sdu *t_sdu;
124 
125 	t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
126 	if (!t_sdu)
127 		return NULL;
128 
129 	t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
130 	if (!t_sdu->buf) {
131 		kfree(t_sdu);
132 		return NULL;
133 	}
134 
135 	return t_sdu;
136 }
137 
free_tx_struct(struct usb_tx * t)138 static void free_tx_struct(struct usb_tx *t)
139 {
140 	if (t) {
141 		usb_free_urb(t->urb);
142 		kfree(t->buf);
143 		kfree(t);
144 	}
145 }
146 
free_tx_sdu_struct(struct usb_tx_sdu * t_sdu)147 static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
148 {
149 	if (t_sdu) {
150 		kfree(t_sdu->buf);
151 		kfree(t_sdu);
152 	}
153 }
154 
get_tx_sdu_struct(struct tx_cxt * tx,int * no_spc)155 static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
156 {
157 	struct usb_tx_sdu *t_sdu;
158 
159 	if (list_empty(&tx->free_list))
160 		return NULL;
161 
162 	t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
163 	list_del(&t_sdu->list);
164 
165 	tx->avail_count--;
166 
167 	*no_spc = list_empty(&tx->free_list) ? 1 : 0;
168 
169 	return t_sdu;
170 }
171 
put_tx_struct(struct tx_cxt * tx,struct usb_tx_sdu * t_sdu)172 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
173 {
174 	list_add_tail(&t_sdu->list, &tx->free_list);
175 	tx->avail_count++;
176 }
177 
alloc_rx_struct(void)178 static struct usb_rx *alloc_rx_struct(void)
179 {
180 	struct usb_rx *r = NULL;
181 	int ret = 0;
182 
183 	r = kmalloc(sizeof(*r), GFP_KERNEL);
184 	if (!r) {
185 		ret = -ENOMEM;
186 		goto out;
187 	}
188 
189 	r->urb = usb_alloc_urb(0, GFP_KERNEL);
190 	r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
191 	if (!r->urb || !r->buf) {
192 		ret = -ENOMEM;
193 		goto out;
194 	}
195 out:
196 
197 	if (ret < 0) {
198 		if (r) {
199 			usb_free_urb(r->urb);
200 			kfree(r->buf);
201 			kfree(r);
202 		}
203 		return NULL;
204 	}
205 
206 	return r;
207 }
208 
free_rx_struct(struct usb_rx * r)209 static void free_rx_struct(struct usb_rx *r)
210 {
211 	if (r) {
212 		usb_free_urb(r->urb);
213 		kfree(r->buf);
214 		kfree(r);
215 	}
216 }
217 
get_rx_struct(struct rx_cxt * rx,int * no_spc)218 static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
219 {
220 	struct usb_rx *r;
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&rx->rx_lock, flags);
224 
225 	if (list_empty(&rx->free_list)) {
226 		spin_unlock_irqrestore(&rx->rx_lock, flags);
227 		return NULL;
228 	}
229 
230 	r = list_entry(rx->free_list.next, struct usb_rx, free_list);
231 	list_del(&r->free_list);
232 
233 	rx->avail_count--;
234 
235 	*no_spc = list_empty(&rx->free_list) ? 1 : 0;
236 
237 	spin_unlock_irqrestore(&rx->rx_lock, flags);
238 
239 	return r;
240 }
241 
put_rx_struct(struct rx_cxt * rx,struct usb_rx * r)242 static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
243 {
244 	unsigned long flags;
245 
246 	spin_lock_irqsave(&rx->rx_lock, flags);
247 
248 	list_add_tail(&r->free_list, &rx->free_list);
249 	rx->avail_count++;
250 
251 	spin_unlock_irqrestore(&rx->rx_lock, flags);
252 }
253 
release_usb(struct lte_udev * udev)254 static void release_usb(struct lte_udev *udev)
255 {
256 	struct rx_cxt	*rx = &udev->rx;
257 	struct tx_cxt	*tx = &udev->tx;
258 	struct usb_tx	*t, *t_next;
259 	struct usb_rx	*r, *r_next;
260 	struct usb_tx_sdu	*t_sdu, *t_sdu_next;
261 	unsigned long flags;
262 
263 	spin_lock_irqsave(&tx->lock, flags);
264 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
265 		list_del(&t_sdu->list);
266 		free_tx_sdu_struct(t_sdu);
267 	}
268 
269 	list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
270 		list_del(&t->list);
271 		free_tx_struct(t);
272 	}
273 
274 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
275 		list_del(&t_sdu->list);
276 		free_tx_sdu_struct(t_sdu);
277 	}
278 	spin_unlock_irqrestore(&tx->lock, flags);
279 
280 	spin_lock_irqsave(&rx->submit_lock, flags);
281 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
282 				 rx_submit_list) {
283 		spin_unlock_irqrestore(&rx->submit_lock, flags);
284 		usb_kill_urb(r->urb);
285 		spin_lock_irqsave(&rx->submit_lock, flags);
286 	}
287 	spin_unlock_irqrestore(&rx->submit_lock, flags);
288 
289 	spin_lock_irqsave(&rx->rx_lock, flags);
290 	list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
291 		list_del(&r->free_list);
292 		free_rx_struct(r);
293 	}
294 	spin_unlock_irqrestore(&rx->rx_lock, flags);
295 
296 	spin_lock_irqsave(&rx->to_host_lock, flags);
297 	list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
298 		if (r->index == (void *)udev) {
299 			list_del(&r->to_host_list);
300 			free_rx_struct(r);
301 		}
302 	}
303 	spin_unlock_irqrestore(&rx->to_host_lock, flags);
304 }
305 
init_usb(struct lte_udev * udev)306 static int init_usb(struct lte_udev *udev)
307 {
308 	int ret = 0;
309 	int i;
310 	struct tx_cxt *tx = &udev->tx;
311 	struct rx_cxt *rx = &udev->rx;
312 	struct usb_tx_sdu *t_sdu = NULL;
313 	struct usb_rx *r = NULL;
314 
315 	udev->send_complete = 1;
316 	udev->tx_stop = 0;
317 	udev->request_mac_addr = 0;
318 	udev->usb_state = PM_NORMAL;
319 
320 	INIT_LIST_HEAD(&tx->sdu_list);
321 	INIT_LIST_HEAD(&tx->hci_list);
322 	INIT_LIST_HEAD(&tx->free_list);
323 	INIT_LIST_HEAD(&rx->rx_submit_list);
324 	INIT_LIST_HEAD(&rx->free_list);
325 	INIT_LIST_HEAD(&rx->to_host_list);
326 	spin_lock_init(&tx->lock);
327 	spin_lock_init(&rx->rx_lock);
328 	spin_lock_init(&rx->submit_lock);
329 	spin_lock_init(&rx->to_host_lock);
330 
331 	tx->avail_count = 0;
332 	rx->avail_count = 0;
333 
334 	udev->rx_cb = NULL;
335 
336 	for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
337 		t_sdu = alloc_tx_sdu_struct();
338 		if (!t_sdu) {
339 			ret = -ENOMEM;
340 			goto fail;
341 		}
342 
343 		list_add(&t_sdu->list, &tx->free_list);
344 		tx->avail_count++;
345 	}
346 
347 	for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
348 		r = alloc_rx_struct();
349 		if (!r) {
350 			ret = -ENOMEM;
351 			goto fail;
352 		}
353 
354 		list_add(&r->free_list, &rx->free_list);
355 		rx->avail_count++;
356 	}
357 	INIT_DELAYED_WORK(&udev->work_tx, do_tx);
358 	INIT_DELAYED_WORK(&udev->work_rx, do_rx);
359 	return 0;
360 fail:
361 	release_usb(udev);
362 	return ret;
363 }
364 
set_mac_address(u8 * data,void * arg)365 static int set_mac_address(u8 *data, void *arg)
366 {
367 	struct phy_dev *phy_dev = arg;
368 	struct lte_udev *udev = phy_dev->priv_dev;
369 	struct tlv *tlv = (struct tlv *)data;
370 	u8 mac_address[ETH_ALEN] = {0, };
371 
372 	if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
373 		memcpy(mac_address, tlv->data, tlv->len);
374 
375 		if (register_lte_device(phy_dev,
376 					&udev->intf->dev, mac_address) < 0)
377 			pr_err("register lte device failed\n");
378 
379 		udev->request_mac_addr = 0;
380 
381 		return 1;
382 	}
383 
384 	return 0;
385 }
386 
do_rx(struct work_struct * work)387 static void do_rx(struct work_struct *work)
388 {
389 	struct lte_udev *udev =
390 		container_of(work, struct lte_udev, work_rx.work);
391 	struct rx_cxt *rx = &udev->rx;
392 	struct usb_rx *r;
393 	struct hci_packet *hci;
394 	struct phy_dev *phy_dev;
395 	u16 cmd_evt;
396 	int ret;
397 	unsigned long flags;
398 
399 	while (1) {
400 		spin_lock_irqsave(&rx->to_host_lock, flags);
401 		if (list_empty(&rx->to_host_list)) {
402 			spin_unlock_irqrestore(&rx->to_host_lock, flags);
403 			break;
404 		}
405 		r = list_entry(rx->to_host_list.next,
406 			       struct usb_rx, to_host_list);
407 		list_del(&r->to_host_list);
408 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
409 
410 		phy_dev = r->cb_data;
411 		udev = phy_dev->priv_dev;
412 		hci = (struct hci_packet *)r->buf;
413 		cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
414 
415 		switch (cmd_evt) {
416 		case LTE_GET_INFORMATION_RESULT:
417 			if (set_mac_address(hci->data, r->cb_data) == 0) {
418 				r->callback(r->cb_data,
419 					    r->buf,
420 					    r->urb->actual_length,
421 					    KERNEL_THREAD);
422 			}
423 			break;
424 
425 		default:
426 			if (r->callback) {
427 				ret = r->callback(r->cb_data,
428 						  r->buf,
429 						  r->urb->actual_length,
430 						  KERNEL_THREAD);
431 
432 				if (ret == -EAGAIN)
433 					pr_err("failed to send received data\n");
434 			}
435 			break;
436 		}
437 
438 		put_rx_struct(rx, r);
439 
440 		gdm_usb_recv(udev,
441 			     r->callback,
442 			     r->cb_data,
443 			     USB_COMPLETE);
444 	}
445 }
446 
remove_rx_submit_list(struct usb_rx * r,struct rx_cxt * rx)447 static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
448 {
449 	unsigned long flags;
450 	struct usb_rx	*r_remove, *r_remove_next;
451 
452 	spin_lock_irqsave(&rx->submit_lock, flags);
453 	list_for_each_entry_safe(r_remove, r_remove_next,
454 				 &rx->rx_submit_list, rx_submit_list) {
455 		if (r == r_remove) {
456 			list_del(&r->rx_submit_list);
457 			break;
458 		}
459 	}
460 	spin_unlock_irqrestore(&rx->submit_lock, flags);
461 }
462 
gdm_usb_rcv_complete(struct urb * urb)463 static void gdm_usb_rcv_complete(struct urb *urb)
464 {
465 	struct usb_rx *r = urb->context;
466 	struct rx_cxt *rx = r->rx;
467 	unsigned long flags;
468 	struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
469 	struct usb_device *usbdev = udev->usbdev;
470 
471 	remove_rx_submit_list(r, rx);
472 
473 	if (!urb->status && r->callback) {
474 		spin_lock_irqsave(&rx->to_host_lock, flags);
475 		list_add_tail(&r->to_host_list, &rx->to_host_list);
476 		schedule_work(&udev->work_rx.work);
477 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
478 	} else {
479 		if (urb->status && udev->usb_state == PM_NORMAL)
480 			dev_err(&urb->dev->dev, "%s: urb status error %d\n",
481 				__func__, urb->status);
482 
483 		put_rx_struct(rx, r);
484 	}
485 
486 	usb_mark_last_busy(usbdev);
487 }
488 
gdm_usb_recv(void * priv_dev,int (* cb)(void * cb_data,void * data,int len,int context),void * cb_data,int context)489 static int gdm_usb_recv(void *priv_dev,
490 			int (*cb)(void *cb_data,
491 				  void *data, int len, int context),
492 			void *cb_data,
493 			int context)
494 {
495 	struct lte_udev *udev = priv_dev;
496 	struct usb_device *usbdev = udev->usbdev;
497 	struct rx_cxt *rx = &udev->rx;
498 	struct usb_rx *r;
499 	int no_spc;
500 	int ret;
501 	unsigned long flags;
502 
503 	if (!udev->usbdev) {
504 		pr_err("invalid device\n");
505 		return -ENODEV;
506 	}
507 
508 	r = get_rx_struct(rx, &no_spc);
509 	if (!r) {
510 		pr_err("Out of Memory\n");
511 		return -ENOMEM;
512 	}
513 
514 	udev->rx_cb = cb;
515 	r->callback = cb;
516 	r->cb_data = cb_data;
517 	r->index = (void *)udev;
518 	r->rx = rx;
519 
520 	usb_fill_bulk_urb(r->urb,
521 			  usbdev,
522 			  usb_rcvbulkpipe(usbdev, 0x83),
523 			  r->buf,
524 			  RX_BUF_SIZE,
525 			  gdm_usb_rcv_complete,
526 			  r);
527 
528 	spin_lock_irqsave(&rx->submit_lock, flags);
529 	list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
530 	spin_unlock_irqrestore(&rx->submit_lock, flags);
531 
532 	if (context == KERNEL_THREAD)
533 		ret = usb_submit_urb(r->urb, GFP_KERNEL);
534 	else
535 		ret = usb_submit_urb(r->urb, GFP_ATOMIC);
536 
537 	if (ret) {
538 		spin_lock_irqsave(&rx->submit_lock, flags);
539 		list_del(&r->rx_submit_list);
540 		spin_unlock_irqrestore(&rx->submit_lock, flags);
541 
542 		pr_err("usb_submit_urb failed (%p)\n", r);
543 		put_rx_struct(rx, r);
544 	}
545 
546 	return ret;
547 }
548 
gdm_usb_send_complete(struct urb * urb)549 static void gdm_usb_send_complete(struct urb *urb)
550 {
551 	struct usb_tx *t = urb->context;
552 	struct tx_cxt *tx = t->tx;
553 	struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
554 	unsigned long flags;
555 
556 	if (urb->status == -ECONNRESET) {
557 		dev_info(&urb->dev->dev, "CONNRESET\n");
558 		return;
559 	}
560 
561 	if (t->callback)
562 		t->callback(t->cb_data);
563 
564 	free_tx_struct(t);
565 
566 	spin_lock_irqsave(&tx->lock, flags);
567 	udev->send_complete = 1;
568 	schedule_work(&udev->work_tx.work);
569 	spin_unlock_irqrestore(&tx->lock, flags);
570 }
571 
send_tx_packet(struct usb_device * usbdev,struct usb_tx * t,u32 len)572 static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
573 {
574 	int ret = 0;
575 
576 	if (!(len % 512))
577 		len++;
578 
579 	usb_fill_bulk_urb(t->urb,
580 			  usbdev,
581 			  usb_sndbulkpipe(usbdev, 2),
582 			  t->buf,
583 			  len,
584 			  gdm_usb_send_complete,
585 			  t);
586 
587 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
588 
589 	if (ret)
590 		dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
591 			ret);
592 
593 	usb_mark_last_busy(usbdev);
594 
595 	return ret;
596 }
597 
packet_aggregation(struct lte_udev * udev,u8 * send_buf)598 static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
599 {
600 	struct tx_cxt *tx = &udev->tx;
601 	struct usb_tx_sdu *t_sdu = NULL;
602 	struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
603 	u16 send_len = 0;
604 	u16 num_packet = 0;
605 	unsigned long flags;
606 
607 	multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
608 
609 	while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
610 		spin_lock_irqsave(&tx->lock, flags);
611 		if (list_empty(&tx->sdu_list)) {
612 			spin_unlock_irqrestore(&tx->lock, flags);
613 			break;
614 		}
615 
616 		t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
617 		if (send_len + t_sdu->len > MAX_SDU_SIZE) {
618 			spin_unlock_irqrestore(&tx->lock, flags);
619 			break;
620 		}
621 
622 		list_del(&t_sdu->list);
623 		spin_unlock_irqrestore(&tx->lock, flags);
624 
625 		memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
626 
627 		send_len += (t_sdu->len + 3) & 0xfffc;
628 		num_packet++;
629 
630 		if (tx->avail_count > 10)
631 			t_sdu->callback(t_sdu->cb_data);
632 
633 		spin_lock_irqsave(&tx->lock, flags);
634 		put_tx_struct(tx, t_sdu);
635 		spin_unlock_irqrestore(&tx->lock, flags);
636 	}
637 
638 	multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
639 	multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
640 
641 	return send_len + offsetof(struct multi_sdu, data);
642 }
643 
do_tx(struct work_struct * work)644 static void do_tx(struct work_struct *work)
645 {
646 	struct lte_udev *udev =
647 		container_of(work, struct lte_udev, work_tx.work);
648 	struct usb_device *usbdev = udev->usbdev;
649 	struct tx_cxt *tx = &udev->tx;
650 	struct usb_tx *t = NULL;
651 	int is_send = 0;
652 	u32 len = 0;
653 	unsigned long flags;
654 
655 	if (!usb_autopm_get_interface(udev->intf))
656 		usb_autopm_put_interface(udev->intf);
657 
658 	if (udev->usb_state == PM_SUSPEND)
659 		return;
660 
661 	spin_lock_irqsave(&tx->lock, flags);
662 	if (!udev->send_complete) {
663 		spin_unlock_irqrestore(&tx->lock, flags);
664 		return;
665 	}
666 	udev->send_complete = 0;
667 
668 	if (!list_empty(&tx->hci_list)) {
669 		t = list_entry(tx->hci_list.next, struct usb_tx, list);
670 		list_del(&t->list);
671 		len = t->len;
672 		t->is_sdu = 0;
673 		is_send = 1;
674 	} else if (!list_empty(&tx->sdu_list)) {
675 		if (udev->tx_stop) {
676 			udev->send_complete = 1;
677 			spin_unlock_irqrestore(&tx->lock, flags);
678 			return;
679 		}
680 
681 		t = alloc_tx_struct(TX_BUF_SIZE);
682 		if (!t) {
683 			spin_unlock_irqrestore(&tx->lock, flags);
684 			return;
685 		}
686 		t->callback = NULL;
687 		t->tx = tx;
688 		t->is_sdu = 1;
689 		is_send = 1;
690 	}
691 
692 	if (!is_send) {
693 		udev->send_complete = 1;
694 		spin_unlock_irqrestore(&tx->lock, flags);
695 		return;
696 	}
697 	spin_unlock_irqrestore(&tx->lock, flags);
698 
699 	if (t->is_sdu)
700 		len = packet_aggregation(udev, t->buf);
701 
702 	if (send_tx_packet(usbdev, t, len)) {
703 		pr_err("send_tx_packet failed\n");
704 		t->callback = NULL;
705 		gdm_usb_send_complete(t->urb);
706 	}
707 }
708 
709 #define SDU_PARAM_LEN 12
gdm_usb_sdu_send(void * priv_dev,void * data,int len,unsigned int dft_eps_ID,unsigned int eps_ID,void (* cb)(void * data),void * cb_data,int dev_idx,int nic_type)710 static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
711 			    unsigned int dft_eps_ID, unsigned int eps_ID,
712 			    void (*cb)(void *data), void *cb_data,
713 			    int dev_idx, int nic_type)
714 {
715 	struct lte_udev *udev = priv_dev;
716 	struct tx_cxt *tx = &udev->tx;
717 	struct usb_tx_sdu *t_sdu;
718 	struct sdu *sdu = NULL;
719 	unsigned long flags;
720 	int no_spc = 0;
721 	u16 send_len;
722 
723 	if (!udev->usbdev) {
724 		pr_err("sdu send - invalid device\n");
725 		return TX_NO_DEV;
726 	}
727 
728 	spin_lock_irqsave(&tx->lock, flags);
729 	t_sdu = get_tx_sdu_struct(tx, &no_spc);
730 	spin_unlock_irqrestore(&tx->lock, flags);
731 
732 	if (!t_sdu) {
733 		pr_err("sdu send - free list empty\n");
734 		return TX_NO_SPC;
735 	}
736 
737 	sdu = (struct sdu *)t_sdu->buf;
738 	sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
739 	if (nic_type == NIC_TYPE_ARP) {
740 		send_len = len + SDU_PARAM_LEN;
741 		memcpy(sdu->data, data, len);
742 	} else {
743 		send_len = len - ETH_HLEN;
744 		send_len += SDU_PARAM_LEN;
745 		memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
746 	}
747 
748 	sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
749 	sdu->dft_eps_ID = gdm_cpu_to_dev32(&udev->gdm_ed, dft_eps_ID);
750 	sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, eps_ID);
751 	sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
752 
753 	t_sdu->len = send_len + HCI_HEADER_SIZE;
754 	t_sdu->callback = cb;
755 	t_sdu->cb_data = cb_data;
756 
757 	spin_lock_irqsave(&tx->lock, flags);
758 	list_add_tail(&t_sdu->list, &tx->sdu_list);
759 	schedule_work(&udev->work_tx.work);
760 	spin_unlock_irqrestore(&tx->lock, flags);
761 
762 	if (no_spc)
763 		return TX_NO_BUFFER;
764 
765 	return 0;
766 }
767 
gdm_usb_hci_send(void * priv_dev,void * data,int len,void (* cb)(void * data),void * cb_data)768 static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
769 			    void (*cb)(void *data), void *cb_data)
770 {
771 	struct lte_udev *udev = priv_dev;
772 	struct tx_cxt *tx = &udev->tx;
773 	struct usb_tx *t;
774 	unsigned long flags;
775 
776 	if (!udev->usbdev) {
777 		pr_err("hci send - invalid device\n");
778 		return -ENODEV;
779 	}
780 
781 	t = alloc_tx_struct(len);
782 	if (!t) {
783 		pr_err("hci_send - out of memory\n");
784 		return -ENOMEM;
785 	}
786 
787 	memcpy(t->buf, data, len);
788 	t->callback = cb;
789 	t->cb_data = cb_data;
790 	t->len = len;
791 	t->tx = tx;
792 	t->is_sdu = 0;
793 
794 	spin_lock_irqsave(&tx->lock, flags);
795 	list_add_tail(&t->list, &tx->hci_list);
796 	schedule_work(&udev->work_tx.work);
797 	spin_unlock_irqrestore(&tx->lock, flags);
798 
799 	return 0;
800 }
801 
gdm_usb_get_endian(void * priv_dev)802 static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
803 {
804 	struct lte_udev *udev = priv_dev;
805 
806 	return &udev->gdm_ed;
807 }
808 
gdm_usb_probe(struct usb_interface * intf,const struct usb_device_id * id)809 static int gdm_usb_probe(struct usb_interface *intf,
810 			 const struct usb_device_id *id)
811 {
812 	int ret = 0;
813 	struct phy_dev *phy_dev = NULL;
814 	struct lte_udev *udev = NULL;
815 	u16 idVendor, idProduct;
816 	int bInterfaceNumber;
817 	struct usb_device *usbdev = interface_to_usbdev(intf);
818 
819 	bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
820 	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
821 	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
822 
823 	pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
824 
825 	if (bInterfaceNumber > NETWORK_INTERFACE) {
826 		pr_info("not a network device\n");
827 		return -ENODEV;
828 	}
829 
830 	phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
831 	if (!phy_dev)
832 		return -ENOMEM;
833 
834 	udev = kzalloc(sizeof(*udev), GFP_KERNEL);
835 	if (!udev) {
836 		ret = -ENOMEM;
837 		goto err_udev;
838 	}
839 
840 	phy_dev->priv_dev = (void *)udev;
841 	phy_dev->send_hci_func = gdm_usb_hci_send;
842 	phy_dev->send_sdu_func = gdm_usb_sdu_send;
843 	phy_dev->rcv_func = gdm_usb_recv;
844 	phy_dev->get_endian = gdm_usb_get_endian;
845 
846 	udev->usbdev = usbdev;
847 	ret = init_usb(udev);
848 	if (ret < 0) {
849 		dev_err(intf->usb_dev, "init_usb func failed\n");
850 		goto err_init_usb;
851 	}
852 	udev->intf = intf;
853 
854 	intf->needs_remote_wakeup = 1;
855 	usb_enable_autosuspend(usbdev);
856 	pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
857 
858 	/* List up hosts with big endians, otherwise,
859 	 * defaults to little endian
860 	 */
861 	if (idProduct == PID_GDM7243)
862 		gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
863 	else
864 		gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
865 
866 	ret = request_mac_address(udev);
867 	if (ret < 0) {
868 		dev_err(intf->usb_dev, "request Mac address failed\n");
869 		goto err_mac_address;
870 	}
871 
872 	start_rx_proc(phy_dev);
873 	usb_get_dev(usbdev);
874 	usb_set_intfdata(intf, phy_dev);
875 
876 	return 0;
877 
878 err_mac_address:
879 	release_usb(udev);
880 err_init_usb:
881 	kfree(udev);
882 err_udev:
883 	kfree(phy_dev);
884 
885 	return ret;
886 }
887 
gdm_usb_disconnect(struct usb_interface * intf)888 static void gdm_usb_disconnect(struct usb_interface *intf)
889 {
890 	struct phy_dev *phy_dev;
891 	struct lte_udev *udev;
892 	u16 idVendor, idProduct;
893 	struct usb_device *usbdev;
894 
895 	usbdev = interface_to_usbdev(intf);
896 
897 	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
898 	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
899 
900 	phy_dev = usb_get_intfdata(intf);
901 
902 	udev = phy_dev->priv_dev;
903 	unregister_lte_device(phy_dev);
904 
905 	release_usb(udev);
906 
907 	kfree(udev);
908 	udev = NULL;
909 
910 	kfree(phy_dev);
911 	phy_dev = NULL;
912 
913 	usb_put_dev(usbdev);
914 }
915 
gdm_usb_suspend(struct usb_interface * intf,pm_message_t pm_msg)916 static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
917 {
918 	struct phy_dev *phy_dev;
919 	struct lte_udev *udev;
920 	struct rx_cxt *rx;
921 	struct usb_rx *r;
922 	struct usb_rx *r_next;
923 	unsigned long flags;
924 
925 	phy_dev = usb_get_intfdata(intf);
926 	udev = phy_dev->priv_dev;
927 	rx = &udev->rx;
928 	if (udev->usb_state != PM_NORMAL) {
929 		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
930 		return -1;
931 	}
932 
933 	udev->usb_state = PM_SUSPEND;
934 
935 	spin_lock_irqsave(&rx->submit_lock, flags);
936 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
937 				 rx_submit_list) {
938 		spin_unlock_irqrestore(&rx->submit_lock, flags);
939 		usb_kill_urb(r->urb);
940 		spin_lock_irqsave(&rx->submit_lock, flags);
941 	}
942 	spin_unlock_irqrestore(&rx->submit_lock, flags);
943 
944 	cancel_work_sync(&udev->work_tx.work);
945 	cancel_work_sync(&udev->work_rx.work);
946 
947 	return 0;
948 }
949 
gdm_usb_resume(struct usb_interface * intf)950 static int gdm_usb_resume(struct usb_interface *intf)
951 {
952 	struct phy_dev *phy_dev;
953 	struct lte_udev *udev;
954 	struct tx_cxt *tx;
955 	struct rx_cxt *rx;
956 	unsigned long flags;
957 	int issue_count;
958 	int i;
959 
960 	phy_dev = usb_get_intfdata(intf);
961 	udev = phy_dev->priv_dev;
962 	rx = &udev->rx;
963 
964 	if (udev->usb_state != PM_SUSPEND) {
965 		dev_err(intf->usb_dev, "usb resume - invalid state\n");
966 		return -1;
967 	}
968 	udev->usb_state = PM_NORMAL;
969 
970 	spin_lock_irqsave(&rx->rx_lock, flags);
971 	issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
972 	spin_unlock_irqrestore(&rx->rx_lock, flags);
973 
974 	if (issue_count >= 0) {
975 		for (i = 0; i < issue_count; i++)
976 			gdm_usb_recv(phy_dev->priv_dev,
977 				     udev->rx_cb,
978 				     phy_dev,
979 				     USB_COMPLETE);
980 	}
981 
982 	tx = &udev->tx;
983 	spin_lock_irqsave(&tx->lock, flags);
984 	schedule_work(&udev->work_tx.work);
985 	spin_unlock_irqrestore(&tx->lock, flags);
986 
987 	return 0;
988 }
989 
990 static struct usb_driver gdm_usb_lte_driver = {
991 	.name = "gdm_lte",
992 	.probe = gdm_usb_probe,
993 	.disconnect = gdm_usb_disconnect,
994 	.id_table = id_table,
995 	.supports_autosuspend = 1,
996 	.suspend = gdm_usb_suspend,
997 	.resume = gdm_usb_resume,
998 	.reset_resume = gdm_usb_resume,
999 };
1000 
gdm_usb_lte_init(void)1001 static int __init gdm_usb_lte_init(void)
1002 {
1003 	if (gdm_lte_event_init() < 0) {
1004 		pr_err("error creating event\n");
1005 		return -1;
1006 	}
1007 
1008 	return usb_register(&gdm_usb_lte_driver);
1009 }
1010 
gdm_usb_lte_exit(void)1011 static void __exit gdm_usb_lte_exit(void)
1012 {
1013 	gdm_lte_event_exit();
1014 
1015 	usb_deregister(&gdm_usb_lte_driver);
1016 }
1017 
1018 module_init(gdm_usb_lte_init);
1019 module_exit(gdm_usb_lte_exit);
1020 
1021 MODULE_VERSION(DRIVER_VERSION);
1022 MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1023 MODULE_LICENSE("GPL");
1024