1 /*
2 * CDC Ethernet based the networking peripherals of Huawei data card devices
3 * This driver is developed based on usbnet.c and cdc_ether.c
4 * Copyright (C) 2009 by Franko Fang (Huawei Technologies Co., Ltd.)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will support Huawei data card devices for Linux networking,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/workqueue.h>
27 #include <linux/mii.h>
28 #include <linux/usb.h>
29 #include <linux/sched.h>
30 #include <linux/ctype.h>
31 #include <linux/usb/cdc.h>
32 #include <linux/usbdevice_fs.h>
33 #include <linux/timer.h>
34 #include <linux/version.h>
35 #include <linux/slab.h>
36 /////////////////////////////////////////////////////////////////////////////////////////////////
37 #define DRIVER_VERSION "v0.5.4"
38 #define DRIVER_AUTHOR "Zhao PengFei<zhaopengfei@meigsmart.com>"
39 #define DRIVER_DESC "Meig ether driver for 4G data card device"
40 //////////////////////////////////////////////////////////////////////////////////////////////////////
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
42 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? (RX_MAX_QUEUE_MEMORY / (dev)->rx_urb_size) : 4)
43 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? (RX_MAX_QUEUE_MEMORY / (dev)->hard_mtu) : 4)
44
45 // reawaken network queue this soon after stopping; else watchdog barks
46 #define TX_TIMEOUT_JIFFIES (5 * HZ)
47
48 // throttle rx/tx briefly after some faults, so khubd might disconnect()
49 // us (it polls at HZ/4 usually) before we report too many false errors.
50 #define THROTTLE_JIFFIES (HZ / 8)
51
52 // between wakeups
53 #define UNLINK_TIMEOUT_MS 3
54 //////////////////////////////////////////////////////////////////////////////////////////////
55 // randomly generated ethernet address
56 static u8 node_id[ETH_ALEN];
57
58 static const char driver_name[] = "hw_cdc_net";
59
60 /* use ethtool to change the level for any given device */
61 static int msg_level = -1;
62 module_param(msg_level, int, 0);
63 MODULE_PARM_DESC(msg_level, "Override default message level");
64 //////////////////////////////////////////////////////////////////////////////////////////
65 #define HW_TLP_MASK_SYNC 0xF800
66 #define HW_TLP_MASK_LENGTH 0x07FF
67 #define HW_TLP_BITS_SYNC 0xF800
68 #pragma pack(push, 1)
69 struct hw_cdc_tlp {
70 unsigned short pktlength;
71 unsigned char payload;
72 };
73 #define HW_TLP_HDR_LENGTH sizeof(unsigned short)
74 #pragma pack(pop)
75
76 typedef enum __HW_TLP_BUF_STATE {
77 HW_TLP_BUF_STATE_IDLE = 0,
78 HW_TLP_BUF_STATE_PARTIAL_FILL,
79 HW_TLP_BUF_STATE_PARTIAL_HDR,
80 HW_TLP_BUF_STATE_HDR_ONLY,
81 HW_TLP_BUF_STATE_ERROR
82 } HW_TLP_BUF_STATE;
83
84 struct hw_cdc_tlp_tmp {
85 void *buffer;
86 unsigned short pktlength;
87 unsigned short bytesneeded;
88 };
89 /* max ethernet pkt size 1514 */
90 #define HW_USB_RECEIVE_BUFFER_SIZE 1600L
91 /* for Tin-layer-protocol (TLP) */
92 #define HW_USB_MRECEIVE_BUFFER_SIZE 4096L
93 /* for TLP */
94 #define HW_USB_MRECEIVE_MAX_BUFFER_SIZE (1024 * 16)
95
96 #define HW_JUNGO_BCDDEVICE_VALUE 0x0102
97 #define BINTERFACESUBCLASS 0x02
98 #define BINTERFACESUBCLASS_HW 0x03
99 ///////////////////////////////////////////////////////////////////////////////////////////
100 #define EVENT_TX_HALT 0
101 #define EVENT_RX_HALT 1
102 #define EVENT_RX_MEMORY 2
103 #define EVENT_STS_SPLIT 3
104 #define EVENT_LINK_RESET 4
105
106 #define NCM_TX_DEFAULT_TIMEOUT_MS 2
107
108 static int ncm_prefer_32 = 1;
109
110 module_param(ncm_prefer_32, int, S_IRUGO);
111
112 static int ncm_prefer_crc = 0;
113
114 module_param(ncm_prefer_crc, int, S_IRUGO);
115
116 static unsigned long ncm_tx_timeout = NCM_TX_DEFAULT_TIMEOUT_MS;
117 module_param(ncm_tx_timeout, ulong, S_IRUGO);
118
119 static unsigned int ncm_read_buf_count = 4;
120 module_param(ncm_read_buf_count, uint, S_IRUGO);
121
122 static unsigned short ncm_read_size_in1k = 4;
123 module_param(ncm_read_size_in1k, short, S_IRUGO);
124
125 static int rt_debug = 0;
126 // module_param(rt_debug, bool, S_IRUGO|S_IWUSR);
127 module_param(rt_debug, int, S_IRUGO | S_IWUSR);
128
129 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
130
131 #else
get_unaligned_le16(const void * p)132 static inline u16 get_unaligned_le16(const void *p)
133 {
134 return le16_to_cpup((__le16 *)p);
135 }
136
get_unaligned_le32(const void * p)137 static inline u32 get_unaligned_le32(const void *p)
138 {
139 return le32_to_cpup((__le32 *)p);
140 }
141
put_unaligned_le16(u16 val,void * p)142 static inline void put_unaligned_le16(u16 val, void *p)
143 {
144 *((__le16 *)p) = cpu_to_le16(val);
145 }
146
put_unaligned_le32(u32 val,void * p)147 static inline void put_unaligned_le32(u32 val, void *p)
148 {
149 *((__le32 *)p) = cpu_to_le32(val);
150 }
151 #endif
152 bool deviceisBalong = false;
153
154 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
155 #define LINUX_VERSION37_LATER 1
156 #else
157 #define LINUX_VERSION37_LATER 0
158 #endif
159
160 /*
161 >2.6.36 some syetem not find ncm.h but find cdc.h
162 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
163 #include <linux/usb/ncm.h>
164 #else
165 */
166 #define USB_CDC_NCM_TYPE 0x1a
167
168 /* NCM Functional Descriptor */
169 /* change usb_cdc_ncm_desc -> usb_cdc_ncm_desc_hw ,prevent cdc.h redefinition 11-05 */
170 struct usb_cdc_ncm_desc_hw {
171 __u8 bLength;
172 __u8 bDescriptorType;
173 __u8 bDescriptorSubType;
174 __le16 bcdNcmVersion;
175 __u8 bmNetworkCapabilities;
176 } __attribute__((packed));
177
178 #ifdef NCM_NCAP_ETH_FILTER
179 #undef NCM_NCAP_ETH_FILTER
180 #endif
181 #ifdef NCM_NCAP_NET_ADDRESS
182 #undef NCM_NCAP_NET_ADDRESS
183 #endif
184 #ifdef NCM_NCAP_ENCAP_COMM
185 #undef NCM_NCAP_ENCAP_COMM
186 #endif
187 #ifdef NCM_NCAP_MAX_DGRAM
188 #undef NCM_NCAP_MAX_DGRAM
189 #endif
190 #ifdef NCM_NCAP_CRC_MODE
191 #undef NCM_NCAP_CRC_MODE
192 #endif
193
194 #define NCM_NCAP_ETH_FILTER (1 << 0)
195 #define NCM_NCAP_NET_ADDRESS (1 << 1)
196 #define NCM_NCAP_ENCAP_COMM (1 << 2)
197 #define NCM_NCAP_MAX_DGRAM (1 << 3)
198 #define NCM_NCAP_CRC_MODE (1 << 4)
199
200 #ifdef USB_CDC_GET_NTB_PARAMETERS
201 #undef USB_CDC_GET_NTB_PARAMETERS
202 #endif
203 #ifdef USB_CDC_GET_NET_ADDRESS
204 #undef USB_CDC_GET_NET_ADDRESS
205 #endif
206 #ifdef USB_CDC_SET_NET_ADDRESS
207 #undef USB_CDC_SET_NET_ADDRESS
208 #endif
209 #ifdef USB_CDC_GET_NTB_FORMAT
210 #undef USB_CDC_GET_NTB_FORMAT
211 #endif
212 #ifdef USB_CDC_SET_NTB_FORMAT
213 #undef USB_CDC_SET_NTB_FORMAT
214 #endif
215 #ifdef USB_CDC_GET_NTB_INPUT_SIZE
216 #undef USB_CDC_GET_NTB_INPUT_SIZE
217 #endif
218 #ifdef USB_CDC_SET_NTB_INPUT_SIZE
219 #undef USB_CDC_SET_NTB_INPUT_SIZE
220 #endif
221 #ifdef USB_CDC_GET_MAX_DATAGRAM_SIZE
222 #undef USB_CDC_GET_MAX_DATAGRAM_SIZE
223 #endif
224 #ifdef USB_CDC_SET_MAX_DATAGRAM_SIZE
225 #undef USB_CDC_SET_MAX_DATAGRAM_SIZE
226 #endif
227 #ifdef USB_CDC_GET_CRC_MODE
228 #undef USB_CDC_GET_CRC_MODE
229 #endif
230 #ifdef USB_CDC_SET_CRC_MODE
231 #undef USB_CDC_SET_CRC_MODE
232 #endif
233
234 #define USB_CDC_GET_NTB_PARAMETERS 0x80
235 #define USB_CDC_GET_NET_ADDRESS 0x81
236 #define USB_CDC_SET_NET_ADDRESS 0x82
237 #define USB_CDC_GET_NTB_FORMAT 0x83
238 #define USB_CDC_SET_NTB_FORMAT 0x84
239 #define USB_CDC_GET_NTB_INPUT_SIZE 0x85
240 #define USB_CDC_SET_NTB_INPUT_SIZE 0x86
241 #define USB_CDC_GET_MAX_DATAGRAM_SIZE 0x87
242 #define USB_CDC_SET_MAX_DATAGRAM_SIZE 0x88
243 #define USB_CDC_GET_CRC_MODE 0x89
244 #define USB_CDC_SET_CRC_MODE 0x8a
245
246 /*
247 * Class Specific structures and constants
248 *
249 * CDC NCM parameter structure, CDC NCM subclass 6.2.1
250 *
251 */
252 struct usb_cdc_ncm_ntb_parameter_hw {
253 __le16 wLength;
254 __le16 bmNtbFormatSupported;
255 __le32 dwNtbInMaxSize;
256 __le16 wNdpInDivisor;
257 __le16 wNdpInPayloadRemainder;
258 __le16 wNdpInAlignment;
259 __le16 wPadding1;
260 __le32 dwNtbOutMaxSize;
261 __le16 wNdpOutDivisor;
262 __le16 wNdpOutPayloadRemainder;
263 __le16 wNdpOutAlignment;
264 __le16 wPadding2;
265 } __attribute__((packed));
266
267 /*
268 * CDC NCM transfer headers, CDC NCM subclass 3.2
269 */
270 #ifdef NCM_NTH16_SIGN
271 #undef NCM_NTH16_SIGN
272 #endif
273 #ifdef NCM_NTH32_SIGN
274 #undef NCM_NTH32_SIGN
275 #endif
276
277 #define NCM_NTH16_SIGN 0x484D434E /* NCMH */
278 #define NCM_NTH32_SIGN 0x686D636E /* ncmh */
279
280 /* change usb_cdc_ncm_nth16 -> usb_cdc_ncm_nth16_hw ,prevent cdc.h redefinition */
281 struct usb_cdc_ncm_nth16_hw {
282 __le32 dwSignature;
283 __le16 wHeaderLength;
284 __le16 wSequence;
285 __le16 wBlockLength;
286 __le16 wFpIndex;
287 } __attribute__((packed));
288
289 /* change usb_cdc_ncm_nth32 -> usb_cdc_ncm_nth_hw ,prevent cdc.h redefinition */
290 struct usb_cdc_ncm_nth32_hw {
291 __le32 dwSignature;
292 __le16 wHeaderLength;
293 __le16 wSequence;
294 __le32 dwBlockLength;
295 __le32 dwFpIndex;
296 } __attribute__((packed));
297
298 /*
299 * CDC NCM datagram pointers, CDC NCM subclass 3.3
300 */
301 #ifdef NCM_NDP16_CRC_SIGN
302 #undef NCM_NDP16_CRC_SIGN
303 #endif
304 #ifdef NCM_NDP16_NOCRC_SIGN
305 #undef NCM_NDP16_NOCRC_SIGN
306 #endif
307 #ifdef NCM_NDP32_CRC_SIGN
308 #undef NCM_NDP32_CRC_SIGN
309 #endif
310 #ifdef NCM_NDP32_NOCRC_SIGN
311 #undef NCM_NDP32_NOCRC_SIGN
312 #endif
313
314 #define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */
315 #define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */
316 #define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */
317 #define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */
318
319 /* change usb_cdc_ncm_ndp16 -> usb_cdc_ncm_ndp16_hw ,prevent cdc.h redefinition */
320 struct usb_cdc_ncm_ndp16_hw {
321 __le32 dwSignature;
322 __le16 wLength;
323 __le16 wNextFpIndex;
324 __u8 data[0];
325 } __attribute__((packed));
326
327 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent cdc.h redefinition */
328 struct usb_cdc_ncm_ndp32_hw {
329 __le32 dwSignature;
330 __le16 wLength;
331 __le16 wReserved6;
332 __le32 dwNextFpIndex;
333 __le32 dwReserved12;
334 __u8 data[0];
335 } __attribute__((packed));
336
337 /*
338 * Here are options for NCM Datagram Pointer table (NDP) parser.
339 * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
340 * in NDP16 offsets and sizes fields are 1 16bit word wide,
341 * in NDP32 -- 2 16bit words wide. Also signatures are different.
342 * To make the parser code the same, put the differences in the structure,
343 * and switch pointers to the structures when the format is changed.
344 */
345
346 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent redefinition */
347 struct ndp_parser_opts_hw {
348 u32 nth_sign;
349 u32 ndp_sign;
350 unsigned nth_size;
351 unsigned ndp_size;
352 unsigned ndplen_align;
353 /* sizes in u16 units */
354 unsigned dgram_item_len; /* index or length */
355 unsigned block_length;
356 unsigned fp_index;
357 unsigned reserved1;
358 unsigned reserved2;
359 unsigned next_fp_index;
360 };
361
362 #ifdef INIT_NDP16_OPTS
363 #undef INIT_NDP16_OPTS
364 #endif
365 #ifdef INIT_NDP32_OPTS
366 #undef INIT_NDP32_OPTS
367 #endif
368
369 #define INIT_NDP16_OPTS \
370 { \
371 .nth_sign = NCM_NTH16_SIGN, .ndp_sign = NCM_NDP16_NOCRC_SIGN, .nth_size = sizeof(struct usb_cdc_ncm_nth16_hw), \
372 .ndp_size = sizeof(struct usb_cdc_ncm_ndp16_hw), .ndplen_align = 4, .dgram_item_len = 1, .block_length = 1, \
373 .fp_index = 1, .reserved1 = 0, .reserved2 = 0, .next_fp_index = 1, \
374 }
375
376 #define INIT_NDP32_OPTS \
377 { \
378 .nth_sign = NCM_NTH32_SIGN, .ndp_sign = NCM_NDP32_NOCRC_SIGN, .nth_size = sizeof(struct usb_cdc_ncm_nth32_hw), \
379 .ndp_size = sizeof(struct usb_cdc_ncm_ndp32_hw), .ndplen_align = 8, .dgram_item_len = 2, .block_length = 2, \
380 .fp_index = 2, .reserved1 = 1, .reserved2 = 2, .next_fp_index = 2, \
381 }
382
put_ncm(__le16 ** p,unsigned size,unsigned val)383 static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
384 {
385 switch (size) {
386 case 1:
387 put_unaligned_le16((u16)val, *p);
388 break;
389 case 0x02:
390 put_unaligned_le32((u32)val, *p);
391
392 break;
393 default:
394 BUG();
395 }
396
397 *p += size;
398 }
399
get_ncm(__le16 ** p,unsigned size)400 static inline unsigned get_ncm(__le16 **p, unsigned size)
401 {
402 unsigned tmp;
403
404 switch (size) {
405 case 1:
406 tmp = get_unaligned_le16(*p);
407 break;
408 case 0x02:
409 tmp = get_unaligned_le32(*p);
410 break;
411 default:
412 BUG();
413 }
414
415 *p += size;
416 return tmp;
417 }
418
419 #ifdef NCM_CONTROL_TIMEOUT
420 #undef NCM_CONTROL_TIMEOUT
421 #endif
422
423 #define NCM_CONTROL_TIMEOUT (5 * 1000)
424
425 /* 'u' must be of unsigned type */
426 #define IS_POWER2(u) (((u) > 0) && !((u) & ((u)-1)))
427
428 /* 'p' must designate a variable of type * __le16 (in all get/put_ncm_leXX) */
429 #define get_ncm_le16(p) \
430 ( { \
431 __le16 val = get_unaligned_le16(p); \
432 p += 1; \
433 val; \
434 })
435
436 #define get_ncm_le32(p) \
437 ( { \
438 __le32 val = get_unaligned_le32(p); \
439 p += 2; \
440 val; \
441 })
442
443 #define put_ncm_le16(val, p) \
444 ( { \
445 put_unaligned_le16((val), p); \
446 p += 1; \
447 })
448
449 #define put_ncm_le32(val, p) \
450 ( { \
451 put_unaligned_le32((val), p); \
452 p += 2; \
453 })
454
455 #define NCM_NDP_MIN_ALIGNMENT 4
456
457 #ifdef NCM_NTB_MIN_IN_SIZE
458 #undef NCM_NTB_MIN_IN_SIZE
459 #endif
460 #define NCM_NTB_MIN_IN_SIZE 2048
461
462 #ifdef NCM_NTB_MIN_OUT_SIZE
463 #undef NCM_NTB_MIN_OUT_SIZE
464 #endif
465
466 #define NCM_NDP16_ENTRY_LEN 4
467
468 /* NTB16 must include: NTB16 header, NDP16 header, datagram pointer entry,
469 * terminating (NULL) datagram entry
470 */
471 #define NCM_NTB_MIN_OUT_SIZE \
472 (sizeof(struct usb_cdc_ncm_nth16_hw) + sizeof(struct usb_cdc_ncm_ndp16_hw) + 2 * NCM_NDP16_ENTRY_LEN)
473
474 #ifndef max
475 #define max(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
476 #endif
477
478 #ifndef min
479 #define min(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
480 #endif
481
482 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
483 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(16, (int)ncm_read_size_in1k) * 1024))
484 #else
485 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(2, (int)ncm_read_size_in1k) * 1024))
486 #endif
487
488 #define RX_QLEN_NCM ncm_read_buf_count
489 #define TX_QLEN_NCM 4
490
491 /* These are actually defined in usbnet.c and we need to redefine these here in
492 * order to calculate the size of the SKB pool
493 */
494
495 static struct ndp_parser_opts_hw ndp16_opts = INIT_NDP16_OPTS;
496 static struct ndp_parser_opts_hw ndp32_opts = INIT_NDP32_OPTS;
497
498 struct ndp_entry {
499 struct list_head list;
500 unsigned idx;
501 unsigned len;
502 };
503
504 struct ntb {
505 /* Maximum possible length of this NTB */
506 unsigned max_len;
507 /* The current offset of the NDP */
508 unsigned ndp_off;
509 /* The current length of the NDP */
510 unsigned ndp_len;
511 /* End of the datagrams section */
512 unsigned dgrams_end;
513 /* Entries list (datagram index/lenght pairs) */
514 struct list_head entries;
515 /* Number of datagrams in this NTB */
516 unsigned ndgrams;
517 /* The SKB with the actual NTB data */
518 struct sk_buff *skb;
519 };
520
521 #define NTB_LEN(n) ((n)->ndp_off + (n)->ndp_len)
522 #define NTB_IS_EMPTY(n) ((n)->ndgrams == 0)
523
524 struct ncm_ctx {
525 struct usb_cdc_ncm_desc_hw *ncm_desc;
526
527 struct hw_cdc_net *ndev;
528 struct usb_interface *control;
529 struct usb_interface *data;
530
531 #define NTB_FORMAT_SUPPORTED_16BIT 0x0001
532 #define NTB_FORMAT_SUPPORTED_32BIT 0x0002
533 u16 formats;
534 u32 rx_max_ntb;
535 u32 tx_max_ntb;
536 u16 tx_divisor;
537 u16 tx_remainder;
538 u16 tx_align;
539
540 #define NCM_BIT_MODE_16 0
541 #define NCM_BIT_MODE_32 1
542 u8 bit_mode;
543 #define NCM_CRC_MODE_NO 0
544 #define NCM_CRC_MODE_YES 1
545 u8 crc_mode;
546
547 struct ndp_parser_opts_hw popts;
548
549 struct ntb curr_ntb;
550 spinlock_t tx_lock;
551 struct sk_buff **skb_pool;
552 unsigned skb_pool_size;
553 struct timer_list tx_timer;
554 /* The maximum amount of jiffies that a datagram can be held (in the
555 * current-NTB) before it must be sent on the bus
556 */
557 unsigned long tx_timeout_jiffies;
558 #ifdef CONFIG_CDC_ENCAP_COMMAND
559 struct cdc_encap *cdc_encap_ctx;
560 #endif
561 };
562
563 struct hw_cdc_net {
564 /* housekeeping */
565 struct usb_device *udev;
566 struct usb_interface *intf;
567 const char *driver_name;
568 const char *driver_desc;
569 void *driver_priv;
570 wait_queue_head_t *wait;
571 struct mutex phy_mutex;
572 unsigned char suspend_count;
573
574 /* i/o info: pipes etc */
575 unsigned in, out;
576 struct usb_host_endpoint *status;
577 unsigned maxpacket;
578 struct timer_list delay;
579
580 /* protocol/interface state */
581 struct net_device *net;
582 struct net_device_stats stats;
583 int msg_enable;
584 unsigned long data[5];
585 u32 xid;
586 u32 hard_mtu; /* count any extra framing */
587 size_t rx_urb_size; /* size for rx urbs */
588 struct mii_if_info mii;
589
590 /* various kinds of pending driver work */
591 struct sk_buff_head rxq;
592 struct sk_buff_head txq;
593 struct sk_buff_head done;
594 struct urb *interrupt;
595 struct tasklet_struct bh;
596
597 struct work_struct kevent;
598 struct delayed_work status_work;
599 int qmi_sync;
600 unsigned long flags;
601
602 /* The state and buffer for the data of TLP */
603 HW_TLP_BUF_STATE hw_tlp_buffer_state;
604 struct hw_cdc_tlp_tmp hw_tlp_tmp_buf;
605 /* indicate the download tlp feature is activated or not */
606 int hw_tlp_download_is_actived;
607
608 /* Add for ncm */
609 int is_ncm;
610 struct ncm_ctx *ncm_ctx;
611 };
612
driver_of(struct usb_interface * intf)613 static inline struct usb_driver *driver_of(struct usb_interface *intf)
614 {
615 return to_usb_driver(intf->dev.driver);
616 }
617
618 /* Drivers that reuse some of the standard USB CDC infrastructure
619 * (notably, using multiple interfaces according to the CDC
620 * union descriptor) get some helper code.
621 */
622 struct hw_dev_state {
623 struct usb_cdc_header_desc *header;
624 struct usb_cdc_union_desc *u;
625 struct usb_cdc_ether_desc *ether;
626 struct usb_interface *control;
627 struct usb_interface *data;
628 };
629
630 /* we record the state for each of our queued skbs */
631 enum skb_state { illegal = 0, tx_start, tx_done, rx_start, rx_done, rx_cleanup };
632
633 struct skb_data { /* skb->cb is one of these */
634 struct urb *urb;
635 struct hw_cdc_net *dev;
636 enum skb_state state;
637 size_t length;
638 };
639 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
640 #define devdbg(hw_cdc_net, fmt, arg...) ((void)(rt_debug && printk(KERN_ERR "Meig_cdc_driver######: " fmt "\n", ##arg)))
641
642 #define deverr(hw_cdc_net, fmt, arg...) printk(KERN_ERR "%s: " fmt "\n", (hw_cdc_net)->net->name, ##arg)
643 #define devwarn(hw_cdc_net, fmt, arg...) printk(KERN_WARNING "%s: " fmt "\n", (hw_cdc_net)->net->name, ##arg)
644
645 #define devinfo(hw_cdc_net, fmt, arg...) printk(KERN_INFO "%s: " fmt "\n", (hw_cdc_net)->net->name, ##arg)
646
647 static void hw_cdc_status(struct hw_cdc_net *dev, struct urb *urb);
648 static inline int hw_get_ethernet_addr(struct hw_cdc_net *dev);
649 static int hw_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf);
650 void hw_cdc_unbind(struct hw_cdc_net *dev, struct usb_interface *intf);
651 static int cdc_ncm_rx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb);
652 static struct sk_buff *cdc_ncm_tx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb, gfp_t mem_flags);
653
654 int hw_get_endpoints(struct hw_cdc_net *dev, struct usb_interface *intf);
655 void hw_skb_return(struct hw_cdc_net *dev, struct sk_buff *skb);
656 void hw_unlink_rx_urbs(struct hw_cdc_net *dev);
657 void hw_defer_kevent(struct hw_cdc_net *dev, int work);
658 int hw_get_settings(struct net_device *net, struct ethtool_cmd *cmd);
659 int hw_set_settings(struct net_device *net, struct ethtool_cmd *cmd);
660 u32 hw_get_link(struct net_device *net);
661 int hw_nway_reset(struct net_device *net);
662 void hw_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info);
663 u32 hw_get_msglevel(struct net_device *net);
664 void hw_set_msglevel(struct net_device *net, u32 level);
665 void hw_disconnect(struct usb_interface *intf);
666 int hw_cdc_probe(struct usb_interface *udev, const struct usb_device_id *prod);
667 int hw_resume(struct usb_interface *intf);
668 int hw_suspend(struct usb_interface *intf, pm_message_t message);
669
670 static void hw_cdc_check_status_work(struct work_struct *work);
671
672 /* handles CDC Ethernet and many other network "bulk data" interfaces */
hw_get_endpoints(struct hw_cdc_net * dev,struct usb_interface * intf)673 int hw_get_endpoints(struct hw_cdc_net *dev, struct usb_interface *intf)
674 {
675 int tmp;
676 struct usb_host_interface *alt = NULL;
677 struct usb_host_endpoint *in = NULL, *out = NULL;
678 struct usb_host_endpoint *status = NULL;
679
680 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
681 unsigned ep;
682 in = NULL;
683 out = NULL;
684 status = NULL;
685 alt = intf->altsetting + tmp;
686
687 /* take the first altsetting with in-bulk + out-bulk;
688 * remember any status endpoint, just in case;
689 * ignore other endpoints and altsetttings.
690 */
691 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
692 struct usb_host_endpoint *e;
693 int intr = 0;
694
695 e = alt->endpoint + ep;
696 switch (e->desc.bmAttributes) {
697 case USB_ENDPOINT_XFER_INT:
698 if (!usb_endpoint_dir_in(&e->desc)) {
699 continue;
700 }
701 intr = 1;
702 /* FALLTHROUGH */
703 case USB_ENDPOINT_XFER_BULK:
704 break;
705 default:
706 continue;
707 }
708 if (usb_endpoint_dir_in(&e->desc)) {
709 if (!intr && !in) {
710 in = e;
711 } else if (intr && !status) {
712 status = e;
713 }
714 } else {
715 if (!out) {
716 out = e;
717 }
718 }
719 }
720 if (in && out) {
721 break;
722 }
723 }
724 if (!alt || !in || !out) {
725 return -EINVAL;
726 }
727 if (alt->desc.bAlternateSetting != 0) {
728 tmp = usb_set_interface(dev->udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting);
729 if (tmp < 0) {
730 return tmp;
731 }
732 }
733
734 dev->in = usb_rcvbulkpipe(dev->udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
735 dev->out = usb_sndbulkpipe(dev->udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
736 dev->status = status;
737 return 0;
738 }
739 EXPORT_SYMBOL_GPL(hw_get_endpoints);
740
741 static void intr_complete(struct urb *urb);
742
init_status(struct hw_cdc_net * dev,struct usb_interface * intf)743 static int init_status(struct hw_cdc_net *dev, struct usb_interface *intf)
744 {
745 char *buf = NULL;
746 unsigned pipe = 0;
747 unsigned maxp;
748 unsigned period;
749
750 pipe = usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
751 maxp = usb_maxpacket(dev->udev, pipe, 0);
752
753 /* avoid 1 msec chatter: min 8 msec poll rate */
754 period = max((int)dev->status->desc.bInterval, (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
755
756 buf = kmalloc(maxp, GFP_KERNEL);
757 if (buf) {
758 dev->interrupt = usb_alloc_urb(0, GFP_KERNEL);
759 if (!dev->interrupt) {
760 kfree(buf);
761 return -ENOMEM;
762 } else {
763 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, buf, maxp, intr_complete, dev, period);
764 dev_dbg(&intf->dev, "status ep%din, %d bytes period %d\n", usb_pipeendpoint(pipe), maxp, period);
765 }
766 }
767 return 0;
768 }
769 /* [zhaopf@meigsmart.com-2020-0903] add for higher version kernel support { */
770 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
current_kernel_time(void)771 struct timespec64 current_kernel_time(void)
772 {
773 struct timespec64 lTime;
774 ktime_get_coarse_real_ts64(&lTime);
775 return lTime;
776 }
777 #endif
778 /* [zhaopf@meigsmart.com-2020-0903] add for higher version kernel support } */
779
780 /* Passes this packet up the stack, updating its accounting.
781 * Some link protocols batch packets, so their rx_fixup paths
782 * can return clones as well as just modify the original skb.
783 */
hw_skb_return(struct hw_cdc_net * dev,struct sk_buff * skb)784 void hw_skb_return(struct hw_cdc_net *dev, struct sk_buff *skb)
785 {
786 int status;
787 u32 sn;
788
789 if (skb->len > 0x80) {
790 sn = be32_to_cpu(*(u32 *)(skb->data + 0x26));
791 devdbg(dev, "hw_skb_return,len:%d receive sn:%x, time:%ld-%ld", skb->len, sn, current_kernel_time().tv_sec,
792 current_kernel_time().tv_nsec);
793 } else {
794 sn = be32_to_cpu(*(u32 *)(skb->data + 0x2a));
795 devdbg(dev, "hw_skb_return,len:%d receive ack sn:%x, time:%ld-%ld", skb->len, sn, current_kernel_time().tv_sec,
796 current_kernel_time().tv_nsec);
797 }
798
799 skb->protocol = eth_type_trans(skb, dev->net);
800 dev->stats.rx_packets++;
801 dev->stats.rx_bytes += skb->len;
802
803 if (netif_msg_rx_status(dev)) {
804 devdbg(dev, "< rx, len %zu, type 0x%x", skb->len + sizeof(struct ethhdr), skb->protocol);
805 }
806 memset(skb->cb, 0, sizeof(struct skb_data));
807 status = netif_rx(skb);
808 if (status != NET_RX_SUCCESS && netif_msg_rx_err(dev)) {
809 devdbg(dev, "netif_rx status %d", status);
810 }
811 }
812 EXPORT_SYMBOL_GPL(hw_skb_return);
813
814 // unlink pending rx/tx; completion handlers do all other cleanup
815
unlink_urbs(struct hw_cdc_net * dev,struct sk_buff_head * q)816 static int unlink_urbs(struct hw_cdc_net *dev, struct sk_buff_head *q)
817 {
818 unsigned long flags;
819 struct sk_buff *skb, *skbnext;
820 int count = 0;
821
822 spin_lock_irqsave(&q->lock, flags);
823 for (skb = q->next; skb != (struct sk_buff *)q; skb = skbnext) {
824 struct skb_data *entry;
825 struct urb *urb;
826 int retval;
827
828 entry = (struct skb_data *)skb->cb;
829 urb = entry->urb;
830 skbnext = skb->next;
831
832 // during some PM-driven resume scenarios,
833 // these (async) unlinks complete immediately
834 retval = usb_unlink_urb(urb);
835 if (retval != -EINPROGRESS && retval != 0) {
836 devdbg(dev, "unlink urb err, %d", retval);
837 } else {
838 count++;
839 }
840 }
841 spin_unlock_irqrestore(&q->lock, flags);
842 return count;
843 }
844
845 // Flush all pending rx urbs
846 // minidrivers may need to do this when the MTU changes
847
hw_unlink_rx_urbs(struct hw_cdc_net * dev)848 void hw_unlink_rx_urbs(struct hw_cdc_net *dev)
849 {
850 if (netif_running(dev->net)) {
851 (void)unlink_urbs(dev, &dev->rxq);
852 tasklet_schedule(&dev->bh);
853 }
854 }
855 EXPORT_SYMBOL_GPL(hw_unlink_rx_urbs);
856
857 /*-------------------------------------------------------------------------
858 *
859 * Network Device Driver (peer link to "Host Device", from USB host)
860 *
861 *-------------------------------------------------------------------------*/
862
hw_change_mtu(struct net_device * net,int new_mtu)863 static int hw_change_mtu(struct net_device *net, int new_mtu)
864 {
865 struct hw_cdc_net *dev = netdev_priv(net);
866 int ll_mtu = new_mtu + net->hard_header_len;
867 int old_hard_mtu = dev->hard_mtu;
868 int old_rx_urb_size = dev->rx_urb_size;
869
870 if (new_mtu <= 0) {
871 return -EINVAL;
872 }
873 // no second zero-length packet read wanted after mtu-sized packets
874 if ((ll_mtu % dev->maxpacket) == 0) {
875 return -EDOM;
876 }
877 net->mtu = new_mtu;
878
879 dev->hard_mtu = net->mtu + net->hard_header_len;
880 if (dev->rx_urb_size == old_hard_mtu && !dev->is_ncm) {
881 dev->rx_urb_size = dev->hard_mtu;
882 if (dev->rx_urb_size > old_rx_urb_size) {
883 hw_unlink_rx_urbs(dev);
884 }
885 }
886
887 devdbg(dev, "change mtu :%d, urb_size:%u", new_mtu, (u32)dev->rx_urb_size);
888
889 return 0;
890 }
891
892 /*-------------------------------------------------------------------------*/
893
hw_get_stats(struct net_device * net)894 static struct net_device_stats *hw_get_stats(struct net_device *net)
895 {
896 struct hw_cdc_net *dev = netdev_priv(net);
897 return &dev->stats;
898 }
899 // #endif
900 /*-------------------------------------------------------------------------*/
901
tx_defer_bh(struct hw_cdc_net * dev,struct sk_buff * skb,struct sk_buff_head * list)902 static void tx_defer_bh(struct hw_cdc_net *dev, struct sk_buff *skb, struct sk_buff_head *list)
903 {
904 unsigned long flags;
905
906 spin_lock_irqsave(&list->lock, flags);
907 __skb_unlink(skb, list);
908 spin_unlock(&list->lock);
909 spin_lock(&dev->done.lock);
910 __skb_queue_tail(&dev->done, skb);
911 if (1 <= dev->done.qlen) {
912 tasklet_schedule(&dev->bh);
913 }
914 spin_unlock_irqrestore(&dev->done.lock, flags);
915 }
916
submit_skb(struct hw_cdc_net * dev,unsigned char * data,unsigned int len)917 static HW_TLP_BUF_STATE submit_skb(struct hw_cdc_net *dev, unsigned char *data, unsigned int len)
918 {
919 struct sk_buff *skb;
920 struct skb_data *entry;
921
922 unsigned long flags;
923
924 if (len > dev->rx_urb_size) {
925 devdbg(dev, "The package length is too large\n");
926 return HW_TLP_BUF_STATE_ERROR;
927 }
928
929 if ((skb = alloc_skb(len + NET_IP_ALIGN, GFP_ATOMIC)) == NULL) {
930 return HW_TLP_BUF_STATE_ERROR;
931 }
932 skb_reserve(skb, NET_IP_ALIGN);
933
934 entry = (struct skb_data *)skb->cb;
935 entry->urb = NULL;
936 entry->dev = dev;
937 entry->state = rx_done;
938 entry->length = skb->len;
939
940 memcpy(skb->data, data, len);
941 skb->len = len;
942
943 spin_lock_irqsave(&dev->done.lock, flags);
944 __skb_queue_tail(&dev->done, skb);
945 if (1 <= dev->done.qlen) {
946 tasklet_schedule(&dev->bh);
947 }
948 spin_unlock_irqrestore(&dev->done.lock, flags);
949 return HW_TLP_BUF_STATE_IDLE;
950 }
reset_tlp_tmp_buf(struct hw_cdc_net * dev)951 static void reset_tlp_tmp_buf(struct hw_cdc_net *dev)
952 {
953 dev->hw_tlp_tmp_buf.bytesneeded = 0;
954 dev->hw_tlp_tmp_buf.pktlength = 0;
955 }
rx_tlp_parse(struct hw_cdc_net * dev,struct sk_buff * skb)956 static void rx_tlp_parse(struct hw_cdc_net *dev, struct sk_buff *skb)
957 {
958 struct hw_cdc_tlp *tlp = NULL;
959 int remain_bytes = (int)skb->len;
960 unsigned short pktlen = 0;
961 unsigned char *cur_ptr = skb->data;
962 unsigned char *payload_ptr = NULL;
963 unsigned char *buf_start = skb->data;
964 unsigned char *buf_end = buf_start + skb->len;
965 unsigned char *ptr = NULL;
966
967 /* decoding the TLP packets into the ether packet */
968 while (remain_bytes > 0) {
969 switch (dev->hw_tlp_buffer_state) {
970 case HW_TLP_BUF_STATE_IDLE: {
971 if (HW_TLP_HDR_LENGTH < remain_bytes) {
972 tlp = (struct hw_cdc_tlp *)cur_ptr;
973 pktlen = (tlp->pktlength & HW_TLP_MASK_LENGTH);
974 payload_ptr = (unsigned char *)&(tlp->payload);
975
976 // validate the tlp packet header
977 if ((tlp->pktlength & HW_TLP_MASK_SYNC) != HW_TLP_BITS_SYNC) {
978 devdbg(dev, "The pktlength is error");
979 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
980 break;
981 }
982 /* The receiced buffer has the whole ether packet */
983 if ((payload_ptr + pktlen) <= buf_end) {
984 /* Get the ether packet from the TLP packet, and put it into the done queue */
985 submit_skb(dev, payload_ptr, pktlen);
986 cur_ptr = payload_ptr + pktlen;
987 remain_bytes = buf_end - cur_ptr;
988 } else { /* has the part of the ether packet */
989 if (pktlen > dev->rx_urb_size) {
990 devdbg(dev, "The pktlen is invalid");
991 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
992 break;
993 }
994 dev->hw_tlp_tmp_buf.bytesneeded = (payload_ptr + pktlen) - buf_end;
995 dev->hw_tlp_tmp_buf.pktlength = buf_end - payload_ptr;
996 memcpy(dev->hw_tlp_tmp_buf.buffer, payload_ptr, dev->hw_tlp_tmp_buf.pktlength);
997 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
998 remain_bytes = 0;
999 }
1000 } else if (HW_TLP_HDR_LENGTH == remain_bytes) {
1001 memcpy(dev->hw_tlp_tmp_buf.buffer, cur_ptr, remain_bytes);
1002 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1003 dev->hw_tlp_tmp_buf.pktlength = remain_bytes;
1004 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_HDR_ONLY;
1005 remain_bytes = 0;
1006 } else if (remain_bytes > 0) {
1007 memcpy(dev->hw_tlp_tmp_buf.buffer, cur_ptr, remain_bytes);
1008 dev->hw_tlp_tmp_buf.bytesneeded = HW_TLP_HDR_LENGTH - remain_bytes;
1009 dev->hw_tlp_tmp_buf.pktlength = remain_bytes;
1010 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_HDR;
1011 remain_bytes = 0;
1012 } else {
1013 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1014 }
1015 break;
1016 }
1017 case HW_TLP_BUF_STATE_HDR_ONLY: {
1018 tlp->pktlength = *((unsigned short *)dev->hw_tlp_tmp_buf.buffer);
1019 pktlen = (tlp->pktlength & HW_TLP_MASK_LENGTH);
1020 payload_ptr = cur_ptr;
1021 reset_tlp_tmp_buf(dev);
1022 /* validate the tlp packet header */
1023 if ((tlp->pktlength & HW_TLP_MASK_SYNC) != HW_TLP_BITS_SYNC) {
1024 devdbg(dev, "The pktlength is error");
1025 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1026 break;
1027 }
1028 if ((payload_ptr + pktlen) <= buf_end) {
1029 submit_skb(dev, payload_ptr, pktlen);
1030 cur_ptr = payload_ptr + pktlen;
1031 remain_bytes = buf_end - cur_ptr;
1032 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1033 } else {
1034 if (pktlen > dev->rx_urb_size) {
1035 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1036 break;
1037 }
1038 dev->hw_tlp_tmp_buf.bytesneeded = (payload_ptr + pktlen) - buf_end;
1039 dev->hw_tlp_tmp_buf.pktlength = buf_end - payload_ptr;
1040 memcpy(dev->hw_tlp_tmp_buf.buffer, payload_ptr, dev->hw_tlp_tmp_buf.pktlength);
1041 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
1042 remain_bytes = 0;
1043 }
1044 break;
1045 }
1046 case HW_TLP_BUF_STATE_PARTIAL_HDR: {
1047 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength, cur_ptr,
1048 dev->hw_tlp_tmp_buf.bytesneeded);
1049 cur_ptr += dev->hw_tlp_tmp_buf.bytesneeded;
1050 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_HDR_ONLY;
1051 remain_bytes -= dev->hw_tlp_tmp_buf.bytesneeded;
1052 break;
1053 }
1054 case HW_TLP_BUF_STATE_PARTIAL_FILL: {
1055 if (remain_bytes < dev->hw_tlp_tmp_buf.bytesneeded) {
1056 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength, cur_ptr, remain_bytes);
1057 dev->hw_tlp_tmp_buf.pktlength += remain_bytes;
1058 dev->hw_tlp_tmp_buf.bytesneeded -= remain_bytes;
1059 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
1060 cur_ptr += remain_bytes;
1061 remain_bytes = 0;
1062 } else {
1063 unsigned short tmplen = dev->hw_tlp_tmp_buf.bytesneeded + dev->hw_tlp_tmp_buf.pktlength;
1064 if (HW_USB_RECEIVE_BUFFER_SIZE < tmplen) {
1065 devdbg(dev, "The tlp length is larger than 1600");
1066 ptr = (unsigned char *)kmalloc(dev->hw_tlp_tmp_buf.bytesneeded + dev->hw_tlp_tmp_buf.pktlength,
1067 GFP_KERNEL);
1068 if (ptr != NULL) {
1069 memcpy(ptr, dev->hw_tlp_tmp_buf.buffer, dev->hw_tlp_tmp_buf.pktlength);
1070 memcpy(ptr + dev->hw_tlp_tmp_buf.pktlength, cur_ptr, dev->hw_tlp_tmp_buf.bytesneeded);
1071 submit_skb(dev, ptr, tmplen);
1072 kfree(ptr);
1073 }
1074 } else {
1075 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength, cur_ptr,
1076 dev->hw_tlp_tmp_buf.bytesneeded);
1077 submit_skb(dev, dev->hw_tlp_tmp_buf.buffer, tmplen);
1078 }
1079 remain_bytes -= dev->hw_tlp_tmp_buf.bytesneeded;
1080 cur_ptr += dev->hw_tlp_tmp_buf.bytesneeded;
1081 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1082 reset_tlp_tmp_buf(dev);
1083 }
1084 break;
1085 }
1086 case HW_TLP_BUF_STATE_ERROR:
1087 default: {
1088 remain_bytes = 0;
1089 reset_tlp_tmp_buf(dev);
1090 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1091 break;
1092 }
1093 }
1094 }
1095 }
1096
rx_defer_bh(struct hw_cdc_net * dev,struct sk_buff * skb,struct sk_buff_head * list)1097 static void rx_defer_bh(struct hw_cdc_net *dev, struct sk_buff *skb, struct sk_buff_head *list)
1098 {
1099 unsigned long flags;
1100 spin_lock_irqsave(&list->lock, flags);
1101 __skb_unlink(skb, list);
1102 spin_unlock_irqrestore(&list->lock, flags);
1103
1104 /* deal with the download tlp feature */
1105 if (1 == dev->hw_tlp_download_is_actived) {
1106 rx_tlp_parse(dev, skb);
1107 dev_kfree_skb_any(skb);
1108 } else {
1109 spin_lock_irqsave(&dev->done.lock, flags);
1110 __skb_queue_tail(&dev->done, skb);
1111 if (1 <= dev->done.qlen) {
1112 tasklet_schedule(&dev->bh);
1113 }
1114 spin_unlock_irqrestore(&dev->done.lock, flags);
1115 }
1116 }
1117
1118 /* some work can't be done in tasklets, so we use keventd
1119 *
1120 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1121 * but tasklet_schedule() doesn't. hope the failure is rare.
1122 */
hw_defer_kevent(struct hw_cdc_net * dev,int work)1123 void hw_defer_kevent(struct hw_cdc_net *dev, int work)
1124 {
1125 set_bit(work, &dev->flags);
1126 if (!schedule_work(&dev->kevent)) {
1127 deverr(dev, "kevent %d may have been dropped", work);
1128 } else {
1129 devdbg(dev, "kevent %d scheduled", work);
1130 }
1131 }
1132 EXPORT_SYMBOL_GPL(hw_defer_kevent);
1133
1134 /*-------------------------------------------------------------------------*/
1135
1136 static void rx_complete(struct urb *urb);
rx_submit(struct hw_cdc_net * dev,struct urb * urb,gfp_t flags)1137 static void rx_submit(struct hw_cdc_net *dev, struct urb *urb, gfp_t flags)
1138 {
1139 struct sk_buff *skb;
1140 struct skb_data *entry;
1141 int retval = 0;
1142 unsigned long lockflags;
1143 size_t size = dev->rx_urb_size;
1144
1145 if ((skb = alloc_skb(size + NET_IP_ALIGN, flags)) == NULL) {
1146 deverr(dev, "no rx skb");
1147 hw_defer_kevent(dev, EVENT_RX_MEMORY);
1148 usb_free_urb(urb);
1149 return;
1150 }
1151 skb_reserve(skb, NET_IP_ALIGN);
1152
1153 entry = (struct skb_data *)skb->cb;
1154 entry->urb = urb;
1155 entry->dev = dev;
1156 entry->state = rx_start;
1157 entry->length = 0;
1158
1159 usb_fill_bulk_urb(urb, dev->udev, dev->in, skb->data, size, rx_complete, skb);
1160
1161 spin_lock_irqsave(&dev->rxq.lock, lockflags);
1162
1163 if (netif_running(dev->net) && netif_device_present(dev->net) && !test_bit(EVENT_RX_HALT, &dev->flags)) {
1164 switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) {
1165 case 0: // submit successfully
1166 __skb_queue_tail(&dev->rxq, skb);
1167 break;
1168 case -EPIPE:
1169 hw_defer_kevent(dev, EVENT_RX_HALT);
1170 break;
1171 case -ENOMEM:
1172 hw_defer_kevent(dev, EVENT_RX_MEMORY);
1173 break;
1174 case -ENODEV:
1175 if (netif_msg_ifdown(dev)) {
1176 devdbg(dev, "device gone");
1177 }
1178 netif_device_detach(dev->net);
1179 break;
1180 default:
1181 if (netif_msg_rx_err(dev)) {
1182 devdbg(dev, "rx submit, %d", retval);
1183 }
1184 tasklet_schedule(&dev->bh);
1185 break;
1186 }
1187 } else {
1188 if (netif_msg_ifdown(dev)) {
1189 devdbg(dev, "rx: stopped");
1190 }
1191 retval = -ENOLINK;
1192 }
1193 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
1194
1195 devdbg(dev, "usb_submit_urb status:%x, time:%ld-%ld", retval, current_kernel_time().tv_sec,
1196 current_kernel_time().tv_nsec);
1197
1198 if (retval) {
1199 dev_kfree_skb_any(skb);
1200 usb_free_urb(urb);
1201 }
1202 }
1203
1204 /*-------------------------------------------------------------------------*/
1205
rx_process(struct hw_cdc_net * dev,struct sk_buff * skb)1206 static inline void rx_process(struct hw_cdc_net *dev, struct sk_buff *skb)
1207 {
1208 if (dev->is_ncm) {
1209 if (!cdc_ncm_rx_fixup(dev, skb)) {
1210 goto error;
1211 }
1212 }
1213 if (skb->len) {
1214 hw_skb_return(dev, skb);
1215 } else {
1216 if (netif_msg_rx_err(dev)) {
1217 devdbg(dev, "drop");
1218 }
1219 error:
1220 dev->stats.rx_errors++;
1221 skb_queue_tail(&dev->done, skb);
1222 }
1223 }
1224
1225 /*-------------------------------------------------------------------------*/
rx_complete(struct urb * urb)1226 static void rx_complete(struct urb *urb)
1227 {
1228 struct sk_buff *skb = (struct sk_buff *)urb->context;
1229 struct skb_data *entry = (struct skb_data *)skb->cb;
1230 struct hw_cdc_net *dev = entry->dev;
1231 int urb_status = urb->status;
1232
1233 devdbg(dev, "rx_complete,urb:%p,rx length %d, time %ld-%ld", urb, urb->actual_length, current_kernel_time().tv_sec,
1234 current_kernel_time().tv_nsec);
1235 skb_put(skb, urb->actual_length);
1236 entry->state = rx_done;
1237 entry->urb = NULL;
1238
1239 switch (urb_status) {
1240 /* success */
1241 case 0:
1242 if (skb->len < dev->net->hard_header_len) {
1243 entry->state = rx_cleanup;
1244 dev->stats.rx_errors++;
1245 dev->stats.rx_length_errors++;
1246 if (netif_msg_rx_err(dev)) {
1247 devdbg(dev, "rx length %d", skb->len);
1248 }
1249 }
1250 break;
1251
1252 /* stalls need manual reset. this is rare ... except that
1253 * when going through USB 2.0 TTs, unplug appears this way.
1254 * we avoid the highspeed version of the ETIMEOUT/EILSEQ
1255 * storm, recovering as needed.
1256 */
1257 case -EPIPE:
1258 dev->stats.rx_errors++;
1259 hw_defer_kevent(dev, EVENT_RX_HALT);
1260 // FALLTHROUGH
1261
1262 /* software-driven interface shutdown */
1263 case -ECONNRESET: /* async unlink */
1264 case -ESHUTDOWN: /* hardware gone */
1265 if (netif_msg_ifdown(dev)) {
1266 devdbg(dev, "rx shutdown, code %d", urb_status);
1267 }
1268 goto block;
1269
1270 /* we get controller i/o faults during khubd disconnect() delays.
1271 * throttle down resubmits, to avoid log floods; just temporarily,
1272 * so we still recover when the fault isn't a khubd delay.
1273 */
1274 case -EPROTO:
1275 case -ETIME:
1276 case -EILSEQ:
1277 dev->stats.rx_errors++;
1278 if (!timer_pending(&dev->delay)) {
1279 mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES);
1280 if (netif_msg_link(dev)) {
1281 devdbg(dev, "rx throttle %d", urb_status);
1282 }
1283 }
1284 block:
1285 entry->state = rx_cleanup;
1286 entry->urb = urb;
1287 urb = NULL;
1288 break;
1289
1290 /* data overrun ... flush fifo? */
1291 case -EOVERFLOW:
1292 dev->stats.rx_over_errors++;
1293 // FALLTHROUGH
1294
1295 default:
1296 entry->state = rx_cleanup;
1297 dev->stats.rx_errors++;
1298 if (netif_msg_rx_err(dev)) {
1299 devdbg(dev, "rx status %d", urb_status);
1300 }
1301 break;
1302 }
1303
1304 rx_defer_bh(dev, skb, &dev->rxq);
1305
1306 if (urb) {
1307 if (netif_running(dev->net) && !test_bit(EVENT_RX_HALT, &dev->flags)) {
1308 rx_submit(dev, urb, GFP_ATOMIC);
1309 return;
1310 }
1311 usb_free_urb(urb);
1312 }
1313 if (netif_msg_rx_err(dev)) {
1314 devdbg(dev, "no read resubmitted");
1315 }
1316 }
intr_complete(struct urb * urb)1317 static void intr_complete(struct urb *urb)
1318 {
1319 struct hw_cdc_net *dev = urb->context;
1320 int status = urb->status;
1321 switch (status) {
1322 /* success */
1323 case 0:
1324 hw_cdc_status(dev, urb);
1325 break;
1326
1327 /* software-driven interface shutdown */
1328 case -ENOENT: /* urb killed */
1329 case -ESHUTDOWN: /* hardware gone */
1330 if (netif_msg_ifdown(dev)) {
1331 devdbg(dev, "intr shutdown, code %d", status);
1332 }
1333 return;
1334
1335 /* NOTE: not throttling like RX/TX, since this endpoint
1336 * already polls infrequently
1337 */
1338 default:
1339 devdbg(dev, "intr status %d", status);
1340 break;
1341 }
1342
1343 if (!netif_running(dev->net)) {
1344 return;
1345 }
1346
1347 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
1348 status = usb_submit_urb(urb, GFP_ATOMIC);
1349 if (status != 0 && netif_msg_timer(dev)) {
1350 deverr(dev, "intr resubmit --> %d", status);
1351 }
1352 }
1353
1354 /*-------------------------------------------------------------------------*/
1355
1356 /*-------------------------------------------------------------------------*/
1357
1358 // precondition: never called in_interrupt
1359
hw_stop(struct net_device * net)1360 static int hw_stop(struct net_device *net)
1361 {
1362 struct hw_cdc_net *dev = netdev_priv(net);
1363 int temp;
1364 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1365 DECLARE_WAITQUEUE(wait, current);
1366
1367 netif_stop_queue(net);
1368
1369 if (netif_msg_ifdown(dev)) {
1370 devinfo(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", dev->stats.rx_packets, dev->stats.tx_packets,
1371 dev->stats.rx_errors, dev->stats.tx_errors);
1372 }
1373
1374 // ensure there are no more active urbs
1375 add_wait_queue(&unlink_wakeup, &wait);
1376 dev->wait = &unlink_wakeup;
1377 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1378
1379 // maybe wait for deletions to finish.
1380 while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) {
1381 msleep(UNLINK_TIMEOUT_MS);
1382 if (netif_msg_ifdown(dev)) {
1383 devdbg(dev, "waited for %d urb completions", temp);
1384 }
1385 }
1386 dev->wait = NULL;
1387 remove_wait_queue(&unlink_wakeup, &wait);
1388
1389 /* cleanup the data for TLP */
1390 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1391 if (dev->hw_tlp_tmp_buf.buffer != NULL) {
1392 kfree(dev->hw_tlp_tmp_buf.buffer);
1393 dev->hw_tlp_tmp_buf.buffer = NULL;
1394 }
1395 dev->hw_tlp_tmp_buf.pktlength = 0;
1396 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1397
1398 usb_kill_urb(dev->interrupt);
1399
1400 /* deferred work (task, timer, softirq) must also stop.
1401 * can't flush_scheduled_work() until we drop rtnl (later),
1402 * else workers could deadlock; so make workers a NOP.
1403 */
1404 dev->flags = 0;
1405 del_timer_sync(&dev->delay);
1406 tasklet_kill(&dev->bh);
1407 usb_autopm_put_interface(dev->intf);
1408
1409 return 0;
1410 }
1411
1412 /*-------------------------------------------------------------------------*/
1413
1414 // posts reads, and enables write queuing
1415
1416 // precondition: never called in_interrupt
1417
hw_open(struct net_device * net)1418 static int hw_open(struct net_device *net)
1419 {
1420 struct hw_cdc_net *dev = netdev_priv(net);
1421 int retval;
1422 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
1423 if (netif_msg_ifup(dev)) {
1424 devinfo(dev, "resumption fail (%d) hw_cdc_net usb-%s-%s, %s", retval, dev->udev->bus->bus_name,
1425 dev->udev->devpath, dev->driver_desc);
1426 }
1427 goto done_nopm;
1428 }
1429
1430 /* Initialized the data for TLP */
1431 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1432 dev->hw_tlp_tmp_buf.buffer = kmalloc(HW_USB_RECEIVE_BUFFER_SIZE, GFP_KERNEL);
1433 if (dev->hw_tlp_tmp_buf.buffer != NULL) {
1434 memset(dev->hw_tlp_tmp_buf.buffer, 0, HW_USB_RECEIVE_BUFFER_SIZE);
1435 }
1436 dev->hw_tlp_tmp_buf.pktlength = 0;
1437 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1438
1439 /* start any status interrupt transfer */
1440 if (dev->interrupt) {
1441 retval = usb_submit_urb(dev->interrupt, GFP_KERNEL);
1442 if (retval < 0) {
1443 if (netif_msg_ifup(dev)) {
1444 deverr(dev, "intr submit %d", retval);
1445 }
1446 goto done;
1447 }
1448 }
1449
1450 netif_start_queue(net);
1451
1452 // delay posting reads until we're fully open
1453 tasklet_schedule(&dev->bh);
1454 return retval;
1455 done:
1456 usb_autopm_put_interface(dev->intf);
1457 done_nopm:
1458 return retval;
1459 }
1460
1461 /*-------------------------------------------------------------------------*/
1462
1463 /* ethtool methods; minidrivers may need to add some more, but
1464 * they'll probably want to use this base set.
1465 */
1466
hw_get_settings(struct net_device * net,struct ethtool_cmd * cmd)1467 int hw_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1468 {
1469 struct hw_cdc_net *dev = netdev_priv(net);
1470
1471 if (!dev->mii.mdio_read) {
1472 return -EOPNOTSUPP;
1473 }
1474
1475 return mii_ethtool_gset(&dev->mii, cmd);
1476 }
1477 EXPORT_SYMBOL_GPL(hw_get_settings);
1478
hw_set_settings(struct net_device * net,struct ethtool_cmd * cmd)1479 int hw_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1480 {
1481 struct hw_cdc_net *dev = netdev_priv(net);
1482 int retval;
1483
1484 if (!dev->mii.mdio_write) {
1485 return -EOPNOTSUPP;
1486 }
1487
1488 retval = mii_ethtool_sset(&dev->mii, cmd);
1489
1490 return retval;
1491 }
1492 EXPORT_SYMBOL_GPL(hw_set_settings);
1493
hw_get_link(struct net_device * net)1494 u32 hw_get_link(struct net_device *net)
1495 {
1496 struct hw_cdc_net *dev = netdev_priv(net);
1497
1498 /* if the device has mii operations, use those */
1499 if (dev->mii.mdio_read) {
1500 return mii_link_ok(&dev->mii);
1501 }
1502
1503 /* Otherwise, say we're up (to avoid breaking scripts) */
1504 return 1;
1505 }
1506 EXPORT_SYMBOL_GPL(hw_get_link);
1507
hw_nway_reset(struct net_device * net)1508 int hw_nway_reset(struct net_device *net)
1509 {
1510 struct hw_cdc_net *dev = netdev_priv(net);
1511
1512 if (!dev->mii.mdio_write) {
1513 return -EOPNOTSUPP;
1514 }
1515
1516 return mii_nway_restart(&dev->mii);
1517 }
1518 EXPORT_SYMBOL_GPL(hw_nway_reset);
1519
hw_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1520 void hw_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
1521 {
1522 struct hw_cdc_net *dev = netdev_priv(net);
1523
1524 strncpy(info->driver, dev->driver_name, sizeof info->driver);
1525 strncpy(info->version, DRIVER_VERSION, sizeof info->version);
1526 strncpy(info->fw_version, dev->driver_desc, sizeof info->fw_version);
1527 usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info);
1528 }
1529 EXPORT_SYMBOL_GPL(hw_get_drvinfo);
1530
hw_get_msglevel(struct net_device * net)1531 u32 hw_get_msglevel(struct net_device *net)
1532 {
1533 struct hw_cdc_net *dev = netdev_priv(net);
1534
1535 return dev->msg_enable;
1536 }
1537 EXPORT_SYMBOL_GPL(hw_get_msglevel);
1538
hw_set_msglevel(struct net_device * net,u32 level)1539 void hw_set_msglevel(struct net_device *net, u32 level)
1540 {
1541 struct hw_cdc_net *dev = netdev_priv(net);
1542
1543 dev->msg_enable = level;
1544 }
1545 EXPORT_SYMBOL_GPL(hw_set_msglevel);
1546
1547 /* drivers may override default ethtool_ops in their bind() routine */
1548 static struct ethtool_ops hw_ethtool_ops = {
1549 /* [zhaopf@meigsmart.com-2020-0903] add for higher version kernel support { */
1550 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 19, 0))
1551 .get_settings = hw_get_settings,
1552 .set_settings = hw_set_settings,
1553 #endif
1554 /* [zhaopf@meigsmart.com-2020-0903] add for higher version kernel support } */
1555 .get_link = hw_get_link,
1556 .nway_reset = hw_nway_reset,
1557 .get_drvinfo = hw_get_drvinfo,
1558 .get_msglevel = hw_get_msglevel,
1559 .set_msglevel = hw_set_msglevel,
1560 };
1561
1562 /*-------------------------------------------------------------------------*/
1563
1564 /* work that cannot be done in interrupt context uses keventd.
1565 *
1566 * NOTE: with 2.5 we could do more of this using completion callbacks,
1567 * especially now that control transfers can be queued.
1568 */
kevent(struct work_struct * work)1569 static void kevent(struct work_struct *work)
1570 {
1571 struct hw_cdc_net *dev = container_of(work, struct hw_cdc_net, kevent);
1572 int status;
1573
1574 /* usb_clear_halt() needs a thread context */
1575 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
1576 unlink_urbs(dev, &dev->txq);
1577 status = usb_clear_halt(dev->udev, dev->out);
1578 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
1579 if (netif_msg_tx_err(dev)) {
1580 deverr(dev, "can't clear tx halt, status %d", status);
1581 }
1582 } else {
1583 clear_bit(EVENT_TX_HALT, &dev->flags);
1584 if (status != -ESHUTDOWN) {
1585 netif_wake_queue(dev->net);
1586 }
1587 }
1588 }
1589 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
1590 unlink_urbs(dev, &dev->rxq);
1591 status = usb_clear_halt(dev->udev, dev->in);
1592 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
1593 if (netif_msg_rx_err(dev)) {
1594 deverr(dev, "can't clear rx halt, status %d", status);
1595 }
1596 } else {
1597 clear_bit(EVENT_RX_HALT, &dev->flags);
1598 tasklet_schedule(&dev->bh);
1599 }
1600 }
1601
1602 /* tasklet could resubmit itself forever if memory is tight */
1603 if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
1604 struct urb *urb = NULL;
1605
1606 if (netif_running(dev->net)) {
1607 urb = usb_alloc_urb(0, GFP_KERNEL);
1608 } else {
1609 clear_bit(EVENT_RX_MEMORY, &dev->flags);
1610 }
1611 if (urb != NULL) {
1612 clear_bit(EVENT_RX_MEMORY, &dev->flags);
1613 rx_submit(dev, urb, GFP_KERNEL);
1614 tasklet_schedule(&dev->bh);
1615 }
1616 }
1617
1618 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
1619 clear_bit(EVENT_LINK_RESET, &dev->flags);
1620 }
1621
1622 if (dev->flags) {
1623 devdbg(dev, "kevent done, flags = 0x%lx", dev->flags);
1624 }
1625 }
1626
1627 /*-------------------------------------------------------------------------*/
1628
tx_complete(struct urb * urb)1629 static void tx_complete(struct urb *urb)
1630 {
1631 struct sk_buff *skb = (struct sk_buff *)urb->context;
1632 struct skb_data *entry = (struct skb_data *)skb->cb;
1633 struct hw_cdc_net *dev = entry->dev;
1634
1635 devdbg(dev, "tx_complete,status:%d,len:%d, *********time:%ld-%ld", urb->status, (int)entry->length,
1636 current_kernel_time().tv_sec, current_kernel_time().tv_nsec);
1637
1638 if (urb->status == 0) {
1639 dev->stats.tx_packets++;
1640 dev->stats.tx_bytes += entry->length;
1641 } else {
1642 dev->stats.tx_errors++;
1643
1644 switch (urb->status) {
1645 case -EPIPE:
1646 hw_defer_kevent(dev, EVENT_TX_HALT);
1647 break;
1648
1649 /* software-driven interface shutdown */
1650 case -ECONNRESET: // async unlink
1651 case -ESHUTDOWN: // hardware gone
1652 break;
1653
1654 // like rx, tx gets controller i/o faults during khubd delays
1655 // and so it uses the same throttling mechanism.
1656 case -EPROTO:
1657 case -ETIME:
1658 case -EILSEQ:
1659 if (!timer_pending(&dev->delay)) {
1660 mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES);
1661 if (netif_msg_link(dev)) {
1662 devdbg(dev, "tx throttle %d", urb->status);
1663 }
1664 }
1665 netif_stop_queue(dev->net);
1666 break;
1667 default:
1668 if (netif_msg_tx_err(dev)) {
1669 devdbg(dev, "tx err %d", entry->urb->status);
1670 }
1671 break;
1672 }
1673 }
1674
1675 urb->dev = NULL;
1676 entry->state = tx_done;
1677 tx_defer_bh(dev, skb, &dev->txq);
1678 }
1679
1680 /*-------------------------------------------------------------------------*/
1681
hw_tx_timeout(struct net_device * net,unsigned int data)1682 static void hw_tx_timeout(struct net_device *net, unsigned int data)
1683 {
1684 struct hw_cdc_net *dev = netdev_priv(net);
1685
1686 unlink_urbs(dev, &dev->txq);
1687 tasklet_schedule(&dev->bh);
1688
1689 // device recovery -- reset?
1690 }
1691
1692 #if LINUX_VERSION37_LATER
1693 /*-------------------------------------------------------------------------*/
1694
1695 /* net_device->trans_start is expensive for high speed devices on SMP,
1696 * so use netdev_queue->trans_start instaed as linux suggest.
1697 *
1698 * NOTE: from linux kernel 4.7.1,linux not support net_device->trans_start.
1699 *
1700 */
hw_netif_trans_update(struct net_device * dev)1701 static void hw_netif_trans_update(struct net_device *dev)
1702 {
1703 struct netdev_queue *txq = NULL;
1704
1705 if (dev == NULL) {
1706 printk(KERN_ERR "%s invalid dev paramter\n", __FUNCTION__);
1707 return;
1708 }
1709
1710 // netdev_get_tx_queue(const struct net_device *dev,unsigned int index) only returned netdev_queue's address,
1711 // so linux kernel trans index 0 to get netdev_queue's address
1712 txq = netdev_get_tx_queue(dev, 0);
1713 if (txq == NULL) {
1714 printk(KERN_ERR "%s invalid txq paramter\n", __FUNCTION__);
1715 return;
1716 }
1717 if (txq->trans_start != jiffies) {
1718 txq->trans_start = jiffies;
1719 }
1720 }
1721 #endif
1722
1723 /*-------------------------------------------------------------------------*/
1724
hw_start_xmit(struct sk_buff * skb,struct net_device * net)1725 static int hw_start_xmit(struct sk_buff *skb, struct net_device *net)
1726 {
1727 struct hw_cdc_net *dev = netdev_priv(net);
1728 int length;
1729 int retval = NET_XMIT_SUCCESS;
1730 struct urb *urb = NULL;
1731 struct skb_data *entry;
1732 unsigned long flags;
1733
1734 if (dev->is_ncm) {
1735 skb = cdc_ncm_tx_fixup(dev, skb, GFP_ATOMIC);
1736 if (!skb) {
1737 if (netif_msg_tx_err(dev)) {
1738 devdbg(dev, "can't tx_fixup skb");
1739 }
1740 goto drop;
1741 }
1742 }
1743
1744 length = skb->len;
1745
1746 if (!(urb = usb_alloc_urb(0, GFP_ATOMIC))) {
1747 if (netif_msg_tx_err(dev)) {
1748 devdbg(dev, "no urb");
1749 }
1750 goto drop;
1751 }
1752
1753 entry = (struct skb_data *)skb->cb;
1754 entry->urb = urb;
1755 entry->dev = dev;
1756 entry->state = tx_start;
1757 entry->length = length;
1758
1759 usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb);
1760
1761 /* don't assume the hardware handles USB_ZERO_PACKET
1762 * NOTE: strictly conforming cdc-ether devices should expect
1763 * the ZLP here, but ignore the one-byte packet.
1764 */
1765 if ((length % dev->maxpacket) == 0) {
1766 urb->transfer_buffer_length++;
1767 if (skb_tailroom(skb)) {
1768 skb->data[skb->len] = 0;
1769 __skb_put(skb, 1);
1770 }
1771 }
1772
1773 devdbg(dev, "hw_start_xmit ,usb_submit_urb,len:%d, time:%ld-%ld", skb->len, current_kernel_time().tv_sec,
1774 current_kernel_time().tv_nsec);
1775
1776 spin_lock_irqsave(&dev->txq.lock, flags);
1777
1778 switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) {
1779 case -EPIPE:
1780 netif_stop_queue(net);
1781 hw_defer_kevent(dev, EVENT_TX_HALT);
1782 break;
1783 default:
1784 if (netif_msg_tx_err(dev)) {
1785 devdbg(dev, "tx: submit urb err %d", retval);
1786 }
1787 break;
1788 case 0:
1789 #if LINUX_VERSION37_LATER
1790 hw_netif_trans_update(net);
1791 #else
1792 net->trans_start = jiffies;
1793 #endif
1794 __skb_queue_tail(&dev->txq, skb);
1795 if (dev->txq.qlen >= TX_QLEN(dev)) {
1796 netif_stop_queue(net);
1797 }
1798 }
1799 spin_unlock_irqrestore(&dev->txq.lock, flags);
1800
1801 if (retval) {
1802 if (netif_msg_tx_err(dev)) {
1803 devdbg(dev, "drop, code %d", retval);
1804 }
1805 drop:
1806 retval = NET_XMIT_SUCCESS;
1807 dev->stats.tx_dropped++;
1808 if (skb) {
1809 dev_kfree_skb_any(skb);
1810 }
1811 usb_free_urb(urb);
1812 } else if (netif_msg_tx_queued(dev)) {
1813 devdbg(dev, "> tx, len %d, type 0x%x", length, skb->protocol);
1814 }
1815
1816 return retval;
1817 }
1818
1819 /*-------------------------------------------------------------------------*/
1820
1821 // tasklet (work deferred from completions, in_irq) or timer
hw_bh(unsigned long param)1822 static void hw_bh(unsigned long param)
1823 {
1824 struct hw_cdc_net *dev = (struct hw_cdc_net *)param;
1825 struct sk_buff *skb;
1826 struct skb_data *entry;
1827
1828 while ((skb = skb_dequeue(&dev->done))) {
1829 entry = (struct skb_data *)skb->cb;
1830 switch (entry->state) {
1831 case rx_done:
1832 entry->state = rx_cleanup;
1833 rx_process(dev, skb);
1834 continue;
1835 case tx_done:
1836 case rx_cleanup:
1837 usb_free_urb(entry->urb);
1838 dev_kfree_skb(skb);
1839 continue;
1840 default:
1841 devdbg(dev, "bogus skb state %d", entry->state);
1842 }
1843 }
1844
1845 // waiting for all pending urbs to complete?
1846 if (dev->wait) {
1847 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1848 wake_up(dev->wait);
1849 }
1850
1851 // or are we maybe short a few urbs?
1852 } else if (netif_running(dev->net) && netif_device_present(dev->net) && !timer_pending(&dev->delay) &&
1853 !test_bit(EVENT_RX_HALT, &dev->flags)) {
1854 int temp = dev->rxq.qlen;
1855 int qlen = dev->is_ncm ? RX_QLEN_NCM : RX_QLEN(dev);
1856
1857 if (temp < qlen) {
1858 struct urb *urb;
1859 int i;
1860
1861 // don't refill the queue all at once
1862 for (i = 0; i < 0x0A && dev->rxq.qlen < qlen; i++) {
1863 urb = usb_alloc_urb(0, GFP_ATOMIC);
1864 if (urb != NULL) {
1865 rx_submit(dev, urb, GFP_ATOMIC);
1866 }
1867 }
1868 if (temp != dev->rxq.qlen && netif_msg_link(dev)) {
1869 devdbg(dev, "rxqlen %d --> %d", temp, dev->rxq.qlen);
1870 }
1871 if (dev->rxq.qlen < qlen) {
1872 tasklet_schedule(&dev->bh);
1873 }
1874 }
1875 if (dev->txq.qlen < (dev->is_ncm ? TX_QLEN_NCM : TX_QLEN(dev))) {
1876 netif_wake_queue(dev->net);
1877 }
1878 }
1879 }
1880
1881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
hw_bh_timer_call(struct timer_list * t)1882 static void hw_bh_timer_call(struct timer_list *t)
1883 {
1884 struct hw_cdc_net *dev = from_timer(dev, t, delay);
1885 hw_bh((unsigned long)dev);
1886 }
1887 #endif
1888
1889 /*-------------------------------------------------------------------------
1890 *
1891 * USB Device Driver support
1892 *
1893 *-------------------------------------------------------------------------*/
1894
1895 // precondition: never called in_interrupt
1896
hw_disconnect(struct usb_interface * intf)1897 void hw_disconnect(struct usb_interface *intf)
1898 {
1899 struct hw_cdc_net *dev;
1900 struct usb_device *xdev;
1901 struct net_device *net;
1902
1903 dev = usb_get_intfdata(intf);
1904 usb_set_intfdata(intf, NULL);
1905 if (!dev) {
1906 return;
1907 }
1908
1909 xdev = interface_to_usbdev(intf);
1910
1911 if (netif_msg_probe(dev)) {
1912 devinfo(dev, "unregister '%s' usb-%s-%s, %s", intf->dev.driver->name, xdev->bus->bus_name, xdev->devpath,
1913 dev->driver_desc);
1914 }
1915
1916 /* [zhaopf@meigsmart-2020-1127] balong device ignore cancel delayed work { */
1917 if (deviceisBalong) {
1918 devinfo(dev, "balong device ignore cancel delayed work");
1919 } else {
1920 cancel_delayed_work_sync(&dev->status_work);
1921 }
1922 /* [zhaopf@meigsmart-2020-1127] balong device ignore cancel delayed work } */
1923 net = dev->net;
1924 unregister_netdev(net);
1925
1926 /* we don't hold rtnl here ... */
1927 flush_scheduled_work();
1928
1929 hw_cdc_unbind(dev, intf);
1930
1931 free_netdev(net);
1932 usb_put_dev(xdev);
1933 }
1934 EXPORT_SYMBOL_GPL(hw_disconnect);
1935
1936 /*-------------------------------------------------------------------------*/
1937 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
hw_eth_mac_addr(struct net_device * dev,void * p)1938 static int hw_eth_mac_addr(struct net_device *dev, void *p)
1939 {
1940 dev->dev_addr[0] = 0x00;
1941 dev->dev_addr[1] = 0x1e;
1942 dev->dev_addr[0x02] = 0x10;
1943 dev->dev_addr[0x03] = 0x1f;
1944 dev->dev_addr[0x04] = 0x00;
1945 dev->dev_addr[0x05] = 0x01;
1946
1947 return 0;
1948 }
1949 static const struct net_device_ops hw_netdev_ops = {
1950 .ndo_open = hw_open,
1951 .ndo_stop = hw_stop,
1952 .ndo_start_xmit = hw_start_xmit,
1953 .ndo_tx_timeout = hw_tx_timeout,
1954 .ndo_change_mtu = hw_change_mtu,
1955 .ndo_set_mac_address = hw_eth_mac_addr,
1956 .ndo_validate_addr = eth_validate_addr,
1957 .ndo_get_stats = hw_get_stats, // ��������ͳ��
1958 };
1959 #endif
1960
1961 int hw_send_tlp_download_request(struct usb_interface *intf);
1962 // precondition: never called in_interrupt
1963 int hw_check_conn_status(struct usb_interface *intf);
1964
is_ncm_interface(struct usb_interface * intf)1965 static int is_ncm_interface(struct usb_interface *intf)
1966 {
1967 u8 bif_class;
1968 u8 bif_subclass;
1969 u8 bif_protocol;
1970 bif_class = intf->cur_altsetting->desc.bInterfaceClass;
1971 bif_subclass = intf->cur_altsetting->desc.bInterfaceSubClass;
1972 bif_protocol = intf->cur_altsetting->desc.bInterfaceProtocol;
1973
1974 if ((bif_class == 0x02 && bif_subclass == 0x0d) ||
1975 (bif_class == 0xff && (bif_subclass == 0x02 || bif_subclass == BINTERFACESUBCLASS_HW) &&
1976 bif_protocol == 0x16) ||
1977 (bif_class == 0xff && (bif_subclass == 0x02 || bif_subclass == BINTERFACESUBCLASS_HW) &&
1978 bif_protocol == 0x46) ||
1979 (bif_class == 0xff && (bif_subclass == 0x02 || bif_subclass == BINTERFACESUBCLASS_HW) &&
1980 bif_protocol == 0x76)) {
1981 return 1;
1982 }
1983
1984 return 0;
1985 }
1986
cdc_ncm_config(struct ncm_ctx * ctx)1987 static int cdc_ncm_config(struct ncm_ctx *ctx)
1988 {
1989 int err;
1990 struct usb_device *udev = ctx->ndev->udev;
1991 u8 net_caps;
1992 u8 control_if;
1993 unsigned int tx_pipe;
1994 unsigned int rx_pipe;
1995 struct usb_cdc_ncm_ntb_parameter_hw *ntb_params;
1996 u8 *b;
1997
1998 #define NCM_MAX_CONTROL_MSG sizeof(*ntb_params)
1999
2000 b = kmalloc(NCM_MAX_CONTROL_MSG, GFP_KERNEL);
2001 if (unlikely(b == NULL)) {
2002 return -ENOMEM;
2003 }
2004
2005 net_caps = ctx->ncm_desc->bmNetworkCapabilities;
2006 control_if = ctx->control->cur_altsetting->desc.bInterfaceNumber;
2007 tx_pipe = usb_sndctrlpipe(udev, 0);
2008 rx_pipe = usb_rcvctrlpipe(udev, 0);
2009
2010 err = usb_control_msg(udev, rx_pipe, USB_CDC_GET_NTB_PARAMETERS, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
2011 0, control_if, b, sizeof(*ntb_params), NCM_CONTROL_TIMEOUT);
2012 if (err < 0) {
2013 dev_dbg(&udev->dev, "cannot read NTB params\n");
2014 goto exit;
2015 }
2016 if (err < sizeof(*ntb_params)) {
2017 dev_dbg(&udev->dev, "the read NTB params block is too short\n");
2018 err = -EINVAL;
2019 goto exit;
2020 }
2021
2022 ntb_params = (void *)b;
2023 ctx->formats = le16_to_cpu(ntb_params->bmNtbFormatSupported);
2024 ctx->rx_max_ntb = le32_to_cpu(ntb_params->dwNtbInMaxSize);
2025 ctx->tx_max_ntb = le32_to_cpu(ntb_params->dwNtbOutMaxSize);
2026 ctx->tx_divisor = le16_to_cpu(ntb_params->wNdpOutDivisor);
2027 ctx->tx_remainder = le16_to_cpu(ntb_params->wNdpOutPayloadRemainder);
2028 ctx->tx_align = le16_to_cpu(ntb_params->wNdpOutAlignment);
2029
2030 devdbg(ctx->ndev, "rx_max_ntb:%d,tx_max_ntb:%d,tx_align:%d", ctx->rx_max_ntb, ctx->tx_max_ntb, ctx->tx_align);
2031
2032 if (unlikely(!(ctx->formats & NTB_FORMAT_SUPPORTED_16BIT))) {
2033 deverr(ctx->ndev, "device does not support 16-bit mode\n");
2034 err = -EINVAL;
2035 goto exit;
2036 }
2037
2038 if (unlikely(ctx->tx_align < NCM_NDP_MIN_ALIGNMENT)) {
2039 deverr(ctx->ndev,
2040 "wNdpOutAlignment (%u) must be at least "
2041 "%u\n",
2042 ctx->tx_align, NCM_NDP_MIN_ALIGNMENT);
2043 err = -EINVAL;
2044 goto exit;
2045 }
2046
2047 if (unlikely(!IS_POWER2(ctx->tx_align))) {
2048 deverr(ctx->ndev,
2049 "wNdpOutAlignment (%u) must be a power of "
2050 "2\n",
2051 ctx->tx_align);
2052 err = -EINVAL;
2053 goto exit;
2054 }
2055
2056 if (unlikely(ctx->rx_max_ntb < NCM_NTB_MIN_IN_SIZE)) {
2057 deverr(ctx->ndev,
2058 "dwNtbInMaxSize (%u) must be at least "
2059 "%u\n",
2060 ctx->rx_max_ntb, NCM_NTB_MIN_IN_SIZE);
2061 err = -EINVAL;
2062 goto exit;
2063 }
2064
2065 if (ctx->rx_max_ntb > (u32)NCM_NTB_HARD_MAX_IN_SIZE) {
2066 devdbg(ctx->ndev,
2067 "dwNtbInMaxSize (%u) must be at most %u "
2068 ", setting the device to %u\n",
2069 ctx->rx_max_ntb, NCM_NTB_HARD_MAX_IN_SIZE, NCM_NTB_HARD_MAX_IN_SIZE);
2070 ctx->rx_max_ntb = NCM_NTB_HARD_MAX_IN_SIZE;
2071 put_unaligned_le32(ctx->rx_max_ntb, b);
2072 err = usb_control_msg(udev, tx_pipe, USB_CDC_SET_NTB_INPUT_SIZE,
2073 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, 0, control_if, b, 0x04,
2074 NCM_CONTROL_TIMEOUT);
2075 if (err < 0) {
2076 deverr(ctx->ndev, "failed setting NTB input size\n");
2077 goto exit;
2078 }
2079 }
2080
2081 if (unlikely(ctx->tx_max_ntb < NCM_NTB_MIN_OUT_SIZE)) {
2082 deverr(ctx->ndev,
2083 "dwNtbOutMaxSize (%u) must be at least "
2084 "%u\n",
2085 ctx->tx_max_ntb, (u32)NCM_NTB_MIN_OUT_SIZE);
2086 err = -EINVAL;
2087 goto exit;
2088 }
2089
2090 ctx->bit_mode = NCM_BIT_MODE_16;
2091 if (ncm_prefer_32) {
2092 if (ctx->formats & NTB_FORMAT_SUPPORTED_32BIT) {
2093 ctx->bit_mode = NCM_BIT_MODE_32;
2094 } else {
2095 devinfo(ctx->ndev, "device does not support 32-bit "
2096 "mode, using 16-bit mode\n");
2097 }
2098 }
2099
2100 /* The spec defines a USB_CDC_SET_NTB_FORMAT as an optional feature.
2101 * The test for 32-bit support is actually a test if the device
2102 * implements this request
2103 */
2104 if (ctx->formats & NTB_FORMAT_SUPPORTED_32BIT) {
2105 err = usb_control_msg(udev, tx_pipe, USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
2106 ctx->bit_mode, control_if, NULL, 0, NCM_CONTROL_TIMEOUT);
2107 if (err < 0) {
2108 deverr(ctx->ndev, "failed setting bit-mode\n");
2109 goto exit;
2110 }
2111 }
2112
2113 ctx->crc_mode = NCM_CRC_MODE_NO;
2114 if (ncm_prefer_crc && (net_caps & NCM_NCAP_CRC_MODE)) {
2115 ctx->crc_mode = NCM_CRC_MODE_YES;
2116 err = usb_control_msg(udev, tx_pipe, USB_CDC_SET_CRC_MODE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
2117 NCM_CRC_MODE_YES, control_if, NULL, 0, NCM_CONTROL_TIMEOUT);
2118 if (err < 0) {
2119 deverr(ctx->ndev, "failed setting crc-mode\n");
2120 goto exit;
2121 }
2122 }
2123
2124 switch (ctx->bit_mode) {
2125 case NCM_BIT_MODE_16:
2126 memcpy(&ctx->popts, &ndp16_opts, sizeof(struct ndp_parser_opts_hw));
2127 if (ctx->crc_mode == NCM_CRC_MODE_YES) {
2128 ctx->popts.ndp_sign = NCM_NDP16_CRC_SIGN;
2129 }
2130 break;
2131 case NCM_BIT_MODE_32:
2132 memcpy(&ctx->popts, &ndp32_opts, sizeof(struct ndp_parser_opts_hw));
2133 if (ctx->crc_mode == NCM_CRC_MODE_YES) {
2134 ctx->popts.ndp_sign = NCM_NDP32_CRC_SIGN;
2135 }
2136 break;
2137 }
2138
2139 exit:
2140 kfree(b);
2141 return err;
2142 #undef NCM_MAX_CONTROL_MSG
2143 }
2144
2145 /* add crc support */
cdc_ncm_rx_fixup(struct hw_cdc_net * dev,struct sk_buff * skb)2146 static int cdc_ncm_rx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb)
2147 {
2148 #define NCM_BITS(ctx) (((ctx)->bit_mode == NCM_BIT_MODE_16) ? 16 : 32)
2149 /* Minimal NDP has a header and two entries (each entry has 2 items). */
2150 #define MIN_NDP_LEN(ndp_hdr_size, item_len) ((ndp_hdr_size) + 2 * 2 * (sizeof(__le16) * (item_len)))
2151 struct ncm_ctx *ctx = dev->ncm_ctx;
2152 struct usb_device *udev = dev->udev;
2153 struct ndp_parser_opts_hw *popts = &ctx->popts;
2154 struct sk_buff *skb2;
2155 unsigned skb_len = skb->len;
2156 __le16 *p = (void *)skb->data;
2157 __le32 idx;
2158 __le16 ndp_len;
2159 unsigned dgram_item_len = popts->dgram_item_len;
2160 unsigned curr_dgram_idx;
2161 unsigned curr_dgram_len;
2162 unsigned next_dgram_idx;
2163 unsigned next_dgram_len;
2164
2165 u32 rx_len;
2166 u32 rep_len;
2167 rx_len = skb->len;
2168
2169 if (unlikely(skb_len < popts->nth_size)) {
2170 dev_dbg(&udev->dev,
2171 "skb len (%u) is shorter than NTH%u len "
2172 "(%u)\n",
2173 skb_len, NCM_BITS(ctx), popts->nth_size);
2174 goto error;
2175 }
2176
2177 if (get_ncm_le32(p) != popts->nth_sign) {
2178 dev_dbg(&udev->dev, "corrupt NTH%u signature\n", NCM_BITS(ctx));
2179 goto error;
2180 }
2181
2182 if (get_ncm_le16(p) != popts->nth_size) {
2183 dev_dbg(&udev->dev, "wrong NTH%u len\n", NCM_BITS(ctx));
2184 goto error;
2185 }
2186
2187 /* skip sequence num */
2188 p += 1;
2189
2190 if (unlikely(get_ncm(&p, popts->block_length) > skb_len)) {
2191 dev_dbg(&udev->dev, "bogus NTH%u block length\n", NCM_BITS(ctx));
2192 goto error;
2193 }
2194
2195 idx = get_ncm(&p, popts->fp_index);
2196 if (unlikely(idx > skb_len)) {
2197 dev_dbg(&udev->dev,
2198 "NTH%u fp_index (%u) bigger than skb len "
2199 "(%u)\n",
2200 NCM_BITS(ctx), idx, skb_len);
2201 goto error;
2202 }
2203
2204 p = (void *)(skb->data + idx);
2205
2206 if (get_ncm_le32(p) != popts->ndp_sign) {
2207 dev_dbg(&udev->dev, "corrupt NDP%u signature\n", NCM_BITS(ctx));
2208 goto error;
2209 }
2210
2211 ndp_len = get_ncm_le16(p);
2212 if (((ndp_len + popts->nth_size) > skb_len) || (ndp_len < (MIN_NDP_LEN(popts->ndp_size, dgram_item_len)))) {
2213 dev_dbg(&udev->dev, "bogus NDP%u len (%u)\n", NCM_BITS(ctx), ndp_len);
2214 goto error;
2215 }
2216
2217 p += popts->reserved1;
2218 /* next_fp_index is defined as reserved in the spec */
2219 p += popts->next_fp_index;
2220 p += popts->reserved2;
2221
2222 curr_dgram_idx = get_ncm(&p, dgram_item_len);
2223 curr_dgram_len = get_ncm(&p, dgram_item_len);
2224 next_dgram_idx = get_ncm(&p, dgram_item_len);
2225 next_dgram_len = get_ncm(&p, dgram_item_len);
2226
2227 /* Parse all the datagrams in the NTB except for the last one. Pass
2228 * all the parsed datagrams to the networking stack directly
2229 */
2230 rep_len = 0;
2231 while (next_dgram_idx && next_dgram_len) {
2232 if (unlikely((curr_dgram_idx + curr_dgram_len) > skb_len)) {
2233 goto error;
2234 }
2235 #if 1
2236 skb2 = skb_clone(skb, GFP_ATOMIC);
2237 if (unlikely(!skb2)) {
2238 goto error;
2239 }
2240
2241 if (unlikely(!skb_pull(skb2, curr_dgram_idx))) {
2242 goto error2;
2243 }
2244 skb_trim(skb2, curr_dgram_len);
2245 #else
2246 /* create a fresh copy to reduce truesize */
2247 skb2 = netdev_alloc_skb_ip_align(dev->net, curr_dgram_len);
2248 if (!skb2) {
2249 goto error;
2250 }
2251 skb_put_data(skb2, skb->data + curr_dgram_idx, curr_dgram_len);
2252 #endif
2253
2254 rep_len += skb2->len;
2255 hw_skb_return(dev, skb2);
2256
2257 curr_dgram_idx = next_dgram_idx;
2258 curr_dgram_len = next_dgram_len;
2259 next_dgram_idx = get_ncm(&p, dgram_item_len);
2260 next_dgram_len = get_ncm(&p, dgram_item_len);
2261 }
2262
2263 /* Update 'skb' to represent the last datagram in the NTB and forward
2264 * it to usbnet which in turn will push it up to the networking stack.
2265 */
2266 if (unlikely((curr_dgram_idx + curr_dgram_len) > skb_len)) {
2267 goto error;
2268 }
2269 if (unlikely(!skb_pull(skb, curr_dgram_idx))) {
2270 goto error;
2271 }
2272 skb_trim(skb, curr_dgram_len);
2273 rep_len += skb->len;
2274
2275 return 1;
2276 error2:
2277 dev_kfree_skb(skb2);
2278 error:
2279 devdbg(dev, "cdc_ncm_rx_fixup error\n");
2280 return 0;
2281 #undef NCM_BITS
2282 #undef MIN_NDP_LEN
2283 }
2284
ndp_dgram_pad(struct ncm_ctx * ctx,unsigned dgram_off)2285 static inline unsigned ndp_dgram_pad(struct ncm_ctx *ctx, unsigned dgram_off)
2286 {
2287 unsigned rem = dgram_off % ctx->tx_divisor;
2288 unsigned tmp = ctx->tx_remainder;
2289 if (rem > ctx->tx_remainder) {
2290 tmp += ctx->tx_divisor;
2291 }
2292 return tmp - rem;
2293 }
2294
ntb_clear(struct ntb * n)2295 static inline void ntb_clear(struct ntb *n)
2296 {
2297 n->ndgrams = 0;
2298 n->skb = NULL;
2299 INIT_LIST_HEAD(&n->entries);
2300 }
2301
ntb_init(struct ncm_ctx * ctx,struct ntb * n,unsigned size)2302 static inline int ntb_init(struct ncm_ctx *ctx, struct ntb *n, unsigned size)
2303 {
2304 struct ndp_parser_opts_hw *popts = &ctx->popts;
2305 unsigned dgrams_end;
2306
2307 n->max_len = size;
2308 dgrams_end = popts->nth_size;
2309
2310 n->ndp_off = ALIGN(dgrams_end, ctx->tx_align);
2311 n->ndp_len = popts->ndp_size + 0x02 * 0x02 * popts->dgram_item_len;
2312 n->dgrams_end = dgrams_end;
2313
2314 if (NTB_LEN(n) > n->max_len) {
2315 return -EINVAL;
2316 }
2317
2318 ntb_clear(n);
2319 return 0;
2320 }
2321
ntb_add_dgram(struct ncm_ctx * ctx,struct ntb * n,unsigned dgram_len,u8 * data,gfp_t flags)2322 static inline int ntb_add_dgram(struct ncm_ctx *ctx, struct ntb *n, unsigned dgram_len, u8 *data, gfp_t flags)
2323 {
2324 struct ndp_parser_opts_hw *popts = &ctx->popts;
2325 unsigned new_ndp_off;
2326 unsigned new_ndp_len;
2327 unsigned new_dgrams_end;
2328 unsigned dgram_off;
2329 struct ndp_entry *entry;
2330
2331 dgram_off = n->dgrams_end + ndp_dgram_pad(ctx, n->dgrams_end);
2332 new_dgrams_end = dgram_off + dgram_len;
2333
2334 new_ndp_off = ALIGN(new_dgrams_end, ctx->tx_align);
2335 new_ndp_len = n->ndp_len + 0x02 * 0x02 * popts->dgram_item_len;
2336
2337 if ((new_ndp_off + new_ndp_len) > n->max_len) {
2338 return -EINVAL;
2339 }
2340
2341 /* optimize to use a kernel lookaside cache (kmem_cache) */
2342 entry = kmalloc(sizeof(*entry), flags);
2343 if (unlikely(entry == NULL)) {
2344 return -ENOMEM;
2345 }
2346
2347 entry->idx = dgram_off;
2348 entry->len = dgram_len;
2349 list_add_tail(&entry->list, &n->entries);
2350
2351 memcpy(n->skb->data + dgram_off, data, dgram_len);
2352
2353 n->ndgrams++;
2354
2355 n->ndp_off = new_ndp_off;
2356 n->ndp_len = new_ndp_len;
2357 n->dgrams_end = new_dgrams_end;
2358
2359 return 0;
2360 }
2361
ntb_free_dgram_list(struct ntb * n)2362 static inline void ntb_free_dgram_list(struct ntb *n)
2363 {
2364 struct list_head *p;
2365 struct list_head *tmp;
2366
2367 list_for_each_safe(p, tmp, &n->entries)
2368 {
2369 struct ndp_entry *e = list_entry(p, struct ndp_entry, list);
2370 list_del(p);
2371 kfree(e);
2372 }
2373 }
2374
ntb_finalize(struct ncm_ctx * ctx,struct ntb * n)2375 static struct sk_buff *ntb_finalize(struct ncm_ctx *ctx, struct ntb *n)
2376 {
2377 struct ndp_parser_opts_hw *popts = &ctx->popts;
2378 __le16 *p = (void *)n->skb->data;
2379 struct ndp_entry *entry;
2380 struct sk_buff *skb;
2381
2382 put_ncm_le32(popts->nth_sign, p);
2383 put_ncm_le16(popts->nth_size, p);
2384
2385 /* add sequence numbers */
2386 put_ncm_le16(0, p);
2387
2388 put_ncm(&p, popts->block_length, NTB_LEN(n));
2389 put_ncm(&p, popts->fp_index, n->ndp_off);
2390
2391 p = (void *)(n->skb->data + n->ndp_off);
2392 memset(p, 0, popts->ndp_size);
2393
2394 put_ncm_le32(popts->ndp_sign, p);
2395 put_ncm_le16(n->ndp_len, p);
2396
2397 p += popts->reserved1;
2398 p += popts->next_fp_index;
2399 p += popts->reserved2;
2400
2401 list_for_each_entry(entry, &n->entries, list)
2402 {
2403 put_ncm(&p, popts->dgram_item_len, entry->idx);
2404 put_ncm(&p, popts->dgram_item_len, entry->len);
2405 }
2406
2407 put_ncm(&p, popts->dgram_item_len, 0);
2408 put_ncm(&p, popts->dgram_item_len, 0);
2409
2410 ntb_free_dgram_list(n);
2411 __skb_put(n->skb, NTB_LEN(n));
2412
2413 skb = n->skb;
2414 ntb_clear(n);
2415
2416 return skb;
2417 }
2418
ncm_get_skb(struct ncm_ctx * ctx)2419 static inline struct sk_buff *ncm_get_skb(struct ncm_ctx *ctx)
2420 {
2421 struct sk_buff *skb = NULL;
2422 unsigned i;
2423
2424 /* 'skb_shared' will return 0 for an SKB after this SKB was
2425 * deallocated by usbnet
2426 */
2427 for (i = 0; i < ctx->skb_pool_size && skb_shared(ctx->skb_pool[i]); i++) {
2428 ;
2429 }
2430
2431 if (likely(i < ctx->skb_pool_size)) {
2432 skb = skb_get(ctx->skb_pool[i]);
2433 }
2434
2435 if (likely(skb != NULL)) {
2436 __skb_trim(skb, 0);
2437 }
2438
2439 return skb;
2440 }
2441
2442 /* Must be run with tx_lock held */
ncm_init_curr_ntb(struct ncm_ctx * ctx)2443 static inline int ncm_init_curr_ntb(struct ncm_ctx *ctx)
2444 {
2445 struct usb_device *udev = ctx->ndev->udev;
2446 int err;
2447
2448 err = ntb_init(ctx, &ctx->curr_ntb, ctx->tx_max_ntb);
2449 if (unlikely(err < 0)) {
2450 dev_dbg(&udev->dev,
2451 "error initializing current-NTB with size "
2452 "%u\n",
2453 ctx->tx_max_ntb);
2454 return err;
2455 }
2456
2457 ctx->curr_ntb.skb = ncm_get_skb(ctx);
2458 if (unlikely(ctx->curr_ntb.skb == NULL)) {
2459 dev_dbg(&udev->dev, "failed getting an SKB from the pool\n");
2460 return -ENOMEM;
2461 }
2462
2463 return 0;
2464 }
2465
ncm_uninit_curr_ntb(struct ncm_ctx * ctx)2466 static inline void ncm_uninit_curr_ntb(struct ncm_ctx *ctx)
2467 {
2468 dev_kfree_skb_any(ctx->curr_ntb.skb);
2469 ntb_clear(&ctx->curr_ntb);
2470 }
2471
2472 /* if 'skb' is NULL (timer context), we will finish the current ntb and
2473 * return it to usbnet
2474 */
cdc_ncm_tx_fixup(struct hw_cdc_net * dev,struct sk_buff * skb,gfp_t mem_flags)2475 static struct sk_buff *cdc_ncm_tx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb, gfp_t mem_flags)
2476 {
2477 struct ncm_ctx *ctx = dev->ncm_ctx;
2478 struct ntb *curr_ntb = &ctx->curr_ntb;
2479 struct sk_buff *skb2 = NULL;
2480 int err = 0;
2481 unsigned long flags;
2482 unsigned ndgrams = 0;
2483 unsigned is_skb_added = 0;
2484 unsigned is_curr_ntb_new = 0;
2485 u32 sn;
2486
2487 spin_lock_irqsave(&ctx->tx_lock, flags);
2488
2489 if (skb == NULL) {
2490 /* Timer context */
2491 if (NTB_IS_EMPTY(curr_ntb)) {
2492 /* we have nothing to send */
2493 goto exit;
2494 }
2495 ndgrams = curr_ntb->ndgrams;
2496 skb2 = ntb_finalize(ctx, curr_ntb);
2497 goto exit;
2498 }
2499
2500 /* non-timer context */
2501 if (NTB_IS_EMPTY(curr_ntb)) {
2502 err = ncm_init_curr_ntb(ctx);
2503 if (unlikely(err < 0)) {
2504 goto exit;
2505 }
2506 is_curr_ntb_new = 1;
2507 }
2508
2509 if (skb->len < 0x80) {
2510 sn = be32_to_cpu(*(u32 *)(skb->data + 0x2a));
2511 devdbg(dev, "get pc ACK SN:%x time:%ld-%ld", sn, current_kernel_time().tv_sec, current_kernel_time().tv_nsec);
2512 } else {
2513 sn = be32_to_cpu(*(u32 *)(skb->data + 0x26));
2514 devdbg(dev, "get pc PACKETS SN:%x, time:%ld-%ld", sn, current_kernel_time().tv_sec,
2515 current_kernel_time().tv_nsec);
2516 }
2517
2518 err = ntb_add_dgram(ctx, curr_ntb, skb->len, skb->data, GFP_ATOMIC);
2519 switch (err) {
2520 case 0:
2521 /* The datagram was successfully added to the current-NTB */
2522 is_skb_added = 1;
2523 if (!ctx->tx_timeout_jiffies) {
2524 ndgrams = curr_ntb->ndgrams;
2525 skb2 = ntb_finalize(ctx, curr_ntb);
2526 }
2527 break;
2528 case -EINVAL:
2529 /* not enough space in current-NTB */
2530 ndgrams = curr_ntb->ndgrams;
2531 /* finalize the current-NTB */
2532 skb2 = ntb_finalize(ctx, curr_ntb);
2533 /* setup a new current-NTB */
2534 err = ncm_init_curr_ntb(ctx);
2535 if (unlikely(err < 0)) {
2536 break;
2537 }
2538
2539 is_curr_ntb_new = 1;
2540
2541 err = ntb_add_dgram(ctx, curr_ntb, skb->len, skb->data, GFP_ATOMIC);
2542 if (unlikely(err < 0)) {
2543 ncm_uninit_curr_ntb(ctx);
2544 break;
2545 }
2546
2547 is_skb_added = 1;
2548 break;
2549 default:
2550 if (is_curr_ntb_new) {
2551 ncm_uninit_curr_ntb(ctx);
2552 }
2553 break;
2554 }
2555
2556 exit:
2557 if (err) {
2558 devdbg(dev, "tx fixup failed (err %d)\n", err);
2559 }
2560
2561 if (skb) {
2562 dev_kfree_skb_any(skb);
2563 }
2564
2565 /* When NULL is returned, usbnet will increment the drop count of the
2566 * net device. If 'skb' was successfully added to the current-NTB,
2567 * decrement the drop-count ahead
2568 */
2569 if (skb2 == NULL && (is_skb_added || skb == NULL)) {
2570 if (is_skb_added) {
2571 dev->stats.tx_dropped--;
2572 }
2573 }
2574 /* If a finished NTB is returned to usbnet, it will add 1 to packet
2575 * count. All other packets that we previously 'dropped' by usbnet must
2576 * be compensated
2577 */
2578 if (skb2 != NULL) {
2579 dev->stats.tx_packets += ndgrams - 1;
2580 }
2581
2582 /* reschedule the timer if successfully added a first datagram to a
2583 * newly allocated current-NTB
2584 */
2585 if (is_curr_ntb_new && is_skb_added && ctx->tx_timeout_jiffies) {
2586 mod_timer(&ctx->tx_timer, jiffies + ctx->tx_timeout_jiffies);
2587 }
2588
2589 spin_unlock_irqrestore(&ctx->tx_lock, flags);
2590
2591 return skb2;
2592 }
2593
2594 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
2595
ncm_tx_timer_cb(unsigned long param)2596 static void ncm_tx_timer_cb(unsigned long param)
2597 {
2598 struct ncm_ctx *ctx = (void *)param;
2599
2600 #else
2601 static void ncm_tx_timer_cb(struct timer_list *t)
2602 {
2603 struct ncm_ctx *ctx = from_timer(ctx, t, tx_timer);
2604
2605 #endif
2606 if (!netif_queue_stopped(ctx->ndev->net)) {
2607 hw_start_xmit(NULL, ctx->ndev->net);
2608 }
2609 }
2610
2611 int hw_cdc_probe(struct usb_interface *udev, const struct usb_device_id *prod)
2612 {
2613 struct hw_cdc_net *dev;
2614 struct net_device *net;
2615 struct usb_host_interface *interface;
2616 struct usb_device *xdev;
2617 int status;
2618 const char *name;
2619
2620 #if LINUX_VERSION37_LATER
2621 struct usb_driver *driver = NULL;
2622
2623 if (udev == NULL) {
2624 return -EINVAL;
2625 }
2626
2627 driver = to_usb_driver(udev->dev.driver);
2628 if (driver == NULL) {
2629 return -EINVAL;
2630 }
2631
2632 if (!driver->supports_autosuspend) {
2633 driver->supports_autosuspend = 1;
2634 pm_runtime_enable(&udev->dev);
2635 }
2636 #endif
2637 printk("Meig NCM driver version:%s\n", DRIVER_VERSION);
2638
2639 deviceisBalong = false;
2640
2641 name = udev->dev.driver->name;
2642 xdev = interface_to_usbdev(udev);
2643 interface = udev->cur_altsetting;
2644
2645 usb_get_dev(xdev);
2646
2647 status = -ENOMEM;
2648
2649 // set up our own records
2650 net = alloc_etherdev(sizeof(*dev));
2651 if (!net) {
2652 goto out;
2653 }
2654
2655 dev = netdev_priv(net);
2656 dev->udev = xdev;
2657 dev->intf = udev;
2658
2659 /* linux kernel > 2.6.37: PowerManager needs disable_depth ==0 */
2660 #ifdef CONFIG_PM_RUNTIME
2661 if (LINUX_VERSION37_LATER) {
2662 dev->intf->dev.power.disable_depth = 0;
2663 }
2664 #endif
2665
2666 dev->driver_name = name;
2667 dev->driver_desc = "Meig NCM Ethernet Device";
2668 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2669 skb_queue_head_init(&dev->rxq);
2670 skb_queue_head_init(&dev->txq);
2671 skb_queue_head_init(&dev->done);
2672 dev->bh.func = hw_bh;
2673 dev->bh.data = (unsigned long)dev;
2674 INIT_WORK(&dev->kevent, kevent);
2675 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
2676 dev->delay.function = hw_bh;
2677 dev->delay.data = (unsigned long)dev;
2678 init_timer(&dev->delay);
2679 #else
2680 timer_setup(&dev->delay, hw_bh_timer_call, 0);
2681 #endif
2682 mutex_init(&dev->phy_mutex);
2683
2684 dev->net = net;
2685
2686 memcpy(net->dev_addr, node_id, sizeof node_id);
2687
2688 /* rx and tx sides can use different message sizes;
2689 * bind() should set rx_urb_size in that case.
2690 */
2691 dev->hard_mtu = net->mtu + net->hard_header_len;
2692
2693 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
2694 net->netdev_ops = &hw_netdev_ops;
2695 #else
2696 net->change_mtu = hw_change_mtu;
2697 net->get_stats = hw_get_stats;
2698 net->hard_start_xmit = hw_start_xmit;
2699 net->open = hw_open;
2700 net->stop = hw_stop;
2701 net->tx_timeout = hw_tx_timeout;
2702 #endif
2703 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2704 net->ethtool_ops = &hw_ethtool_ops;
2705
2706 status = hw_cdc_bind(dev, udev);
2707 if (status < 0) {
2708 goto out1;
2709 }
2710
2711 strcpy(net->name, "usb%d"); // name
2712
2713 /* maybe the remote can't receive an Ethernet MTU */
2714 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) {
2715 net->mtu = dev->hard_mtu - net->hard_header_len;
2716 }
2717
2718 if (status >= 0 && dev->status) {
2719 status = init_status(dev, udev);
2720 }
2721 if (status < 0) {
2722 goto out3;
2723 }
2724
2725 if (dev->is_ncm) {
2726 dev->rx_urb_size = dev->ncm_ctx->rx_max_ntb;
2727 } else if (!dev->rx_urb_size) {
2728 dev->rx_urb_size = dev->hard_mtu;
2729 }
2730
2731 dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
2732
2733 SET_NETDEV_DEV(net, &udev->dev);
2734 status = register_netdev(net);
2735 if (status) {
2736 goto out3;
2737 }
2738 printk("register meig net device:%s\n", net->name);
2739 // ok, it's ready to go.
2740 usb_set_intfdata(udev, dev);
2741
2742 /* activate the download tlp feature */
2743 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
2744 dev->hw_tlp_download_is_actived = 0; // activated failed
2745 devdbg(dev, "kernel-4.14.x later default not active tlp");
2746 #else
2747 if (0 < hw_send_tlp_download_request(udev)) {
2748 devdbg(dev, "%s: The tlp is activated", __FUNCTION__);
2749 dev->hw_tlp_download_is_actived = 1; // activated successfully
2750 } else {
2751 dev->hw_tlp_download_is_actived = 0; // activated failed
2752 }
2753 #endif
2754
2755 netif_device_attach(net);
2756 /* set the carrier off as default */
2757 netif_carrier_off(net);
2758 if (!deviceisBalong) {
2759 dev->qmi_sync = 0;
2760 INIT_DELAYED_WORK(&dev->status_work, hw_cdc_check_status_work);
2761 schedule_delayed_work(&dev->status_work, 0x0A * HZ);
2762 }
2763 return 0;
2764
2765 out3:
2766 hw_cdc_unbind(dev, udev);
2767 out1:
2768 free_netdev(net);
2769 out:
2770 usb_put_dev(xdev);
2771 return status;
2772 }
2773 EXPORT_SYMBOL_GPL(hw_cdc_probe);
2774
2775 /*-------------------------------------------------------------------------*/
2776
2777 /*
2778 * suspend the whole driver as soon as the first interface is suspended
2779 * resume only when the last interface is resumed
2780 */
2781
2782 int hw_suspend(struct usb_interface *intf, pm_message_t message)
2783 {
2784 struct hw_cdc_net *dev = usb_get_intfdata(intf);
2785
2786 if (!dev->suspend_count++) {
2787 /*
2788 * accelerate emptying of the rx and queues, to avoid
2789 * having everything error out.
2790 */
2791 netif_device_detach(dev->net);
2792 (void)unlink_urbs(dev, &dev->rxq);
2793 (void)unlink_urbs(dev, &dev->txq);
2794 /*
2795 * reattach so runtime management can use and
2796 * wake the device
2797 */
2798 netif_device_attach(dev->net);
2799 }
2800 return 0;
2801 }
2802 EXPORT_SYMBOL_GPL(hw_suspend);
2803
2804 int hw_resume(struct usb_interface *intf)
2805 {
2806 struct hw_cdc_net *dev = usb_get_intfdata(intf);
2807
2808 if (!--dev->suspend_count) {
2809 tasklet_schedule(&dev->bh);
2810 }
2811
2812 return 0;
2813 }
2814 EXPORT_SYMBOL_GPL(hw_resume);
2815
2816 static int hw_cdc_reset_resume(struct usb_interface *intf)
2817 {
2818 return hw_resume(intf);
2819 }
2820
2821 int hw_send_tlp_download_request(struct usb_interface *intf)
2822 {
2823 struct usb_device *udev = interface_to_usbdev(intf);
2824 struct usb_host_interface *interface = intf->cur_altsetting;
2825 struct usbdevfs_ctrltransfer req = {0};
2826 unsigned char buf[256] = {0};
2827 int retval = 0;
2828 req.bRequestType = 0xC0;
2829 req.bRequest = 0x02; // activating the download tlp feature request
2830 req.wIndex = interface->desc.bInterfaceNumber;
2831 req.wValue = 1;
2832 req.wLength = 1;
2833
2834 req.timeout = 0x3E8;
2835 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req.bRequest, req.bRequestType, req.wValue, req.wIndex,
2836 buf, req.wLength, req.timeout);
2837 /* check the TLP feature is activated or not, response value 0x01 indicates success */
2838 if (retval > 0 && buf[0] == 1) {
2839 return retval;
2840 } else {
2841 return 0;
2842 }
2843 }
2844 ///////////////////////////////////////////////////////////////////////////////////////////////////////
2845 /*
2846 * probes control interface, claims data interface, collects the bulk
2847 * endpoints, activates data interface (if needed), maybe sets MTU.
2848 * all pure cdc
2849 */
2850 // int hw_generic_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf)
2851 #define USB_DEVICE_HUAWEI_DATA 0xFF
2852 static int hw_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf)
2853 {
2854 u8 *buf = intf->cur_altsetting->extra;
2855 int len = intf->cur_altsetting->extralen;
2856 struct usb_interface_descriptor *d;
2857 struct hw_dev_state *info = (void *)&dev->data;
2858 int status;
2859 struct usb_driver *driver = driver_of(intf);
2860 int i;
2861 struct ncm_ctx *ctx = NULL;
2862
2863 devdbg(dev, "hw_cdc_bind enter\n");
2864
2865 if (sizeof dev->data < sizeof *info) {
2866 return -EDOM;
2867 }
2868
2869 dev->ncm_ctx = NULL;
2870 dev->is_ncm = is_ncm_interface(intf);
2871
2872 if (dev->is_ncm) {
2873 devdbg(dev, "this is ncm interface\n");
2874 dev->ncm_ctx = kzalloc(sizeof(struct ncm_ctx), GFP_KERNEL);
2875 if (dev->ncm_ctx == NULL) {
2876 return -ENOMEM;
2877 }
2878 ctx = dev->ncm_ctx;
2879 ctx->ndev = dev;
2880
2881 spin_lock_init(&ctx->tx_lock);
2882
2883 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
2884 ctx->tx_timer.function = ncm_tx_timer_cb;
2885 ctx->tx_timer.data = (unsigned long)ctx;
2886 init_timer(&ctx->tx_timer);
2887 #else
2888 timer_setup(&ctx->tx_timer, ncm_tx_timer_cb, 0);
2889 #endif
2890
2891 if (ncm_tx_timeout) {
2892 ctx->tx_timeout_jiffies = msecs_to_jiffies(ncm_tx_timeout);
2893 } else {
2894 ctx->tx_timeout_jiffies = 0;
2895 }
2896
2897 devdbg(dev, "ctx->tx_timeout_jiffies:%ld", ctx->tx_timeout_jiffies);
2898 }
2899
2900 memset(info, 0, sizeof *info);
2901 info->control = intf;
2902 while (len > 0x03) {
2903 if (buf[1] != USB_DT_CS_INTERFACE) {
2904 goto next_desc;
2905 }
2906
2907 switch (buf[0x02]) {
2908 case USB_CDC_HEADER_TYPE:
2909 if (info->header) {
2910 dev_dbg(&intf->dev, "extra CDC header\n");
2911 goto bad_desc;
2912 }
2913 info->header = (void *)buf;
2914 if (info->header->bLength != sizeof *info->header) {
2915 dev_dbg(&intf->dev, "CDC header len %u\n", info->header->bLength);
2916 goto bad_desc;
2917 }
2918 break;
2919 case USB_CDC_UNION_TYPE:
2920 if (info->u) {
2921 dev_dbg(&intf->dev, "extra CDC union\n");
2922 goto bad_desc;
2923 }
2924 info->u = (void *)buf;
2925 if (info->u->bLength != sizeof *info->u) {
2926 dev_dbg(&intf->dev, "CDC union len %u\n", info->u->bLength);
2927 goto bad_desc;
2928 }
2929
2930 /* we need a master/control interface (what we're
2931 * probed with) and a slave/data interface; union
2932 * descriptors sort this all out.
2933 */
2934 info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
2935 info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
2936 if (!info->control || !info->data) {
2937 dev_dbg(&intf->dev, "master #%u/%p slave #%u/%p\n", info->u->bMasterInterface0, info->control,
2938 info->u->bSlaveInterface0, info->data);
2939 goto bad_desc;
2940 }
2941 if (info->control != intf) {
2942 dev_dbg(&intf->dev, "bogus CDC Union\n");
2943 /* Ambit USB Cable Modem (and maybe others)
2944 * interchanges master and slave interface.
2945 */
2946 if (info->data == intf) {
2947 info->data = info->control;
2948 info->control = intf;
2949 } else {
2950 goto bad_desc;
2951 }
2952 }
2953
2954 /* For Jungo solution, the NDIS device has no data interface, so needn't detect data interface */
2955 if ((dev->udev->descriptor.bcdDevice != HW_JUNGO_BCDDEVICE_VALUE &&
2956 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS &&
2957 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS_HW) ||
2958 ((info->u != NULL) && (info->u->bMasterInterface0 != info->u->bSlaveInterface0))) {
2959 printk("L[%d]", __LINE__);
2960 /* a data interface altsetting does the real i/o */
2961 d = &info->data->cur_altsetting->desc;
2962 // if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { /* delete the standard CDC slave class detect */
2963 if (d->bInterfaceClass != USB_DEVICE_HUAWEI_DATA && d->bInterfaceClass != USB_CLASS_CDC_DATA) {
2964 /* Add to detect CDC slave class either Huawei defined or standard */
2965 dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
2966 goto bad_desc;
2967 }
2968 }
2969 break;
2970 case USB_CDC_ETHERNET_TYPE:
2971 if (info->ether) {
2972 dev_dbg(&intf->dev, "extra CDC ether\n");
2973 goto bad_desc;
2974 }
2975 info->ether = (void *)buf;
2976 if (info->ether->bLength != sizeof *info->ether) {
2977 dev_dbg(&intf->dev, "CDC ether len %u\n", info->ether->bLength);
2978 goto bad_desc;
2979 }
2980 dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
2981 /* because of Zaurus, we may be ignoring the host
2982 * side link address we were given.
2983 */
2984 break;
2985 case USB_CDC_NCM_TYPE:
2986 if (dev->ncm_ctx->ncm_desc) {
2987 dev_dbg(&intf->dev, "extra NCM descriptor\n");
2988 } else {
2989 dev->ncm_ctx->ncm_desc = (void *)buf;
2990 }
2991 break;
2992 }
2993 next_desc:
2994 len -= buf[0]; /* bLength */
2995 buf += buf[0];
2996 }
2997
2998 if (!info->header || !info->u || (!dev->is_ncm && !info->ether) || (dev->is_ncm && !dev->ncm_ctx->ncm_desc)) {
2999 dev_dbg(&intf->dev, "missing cdc %s%s%s%sdescriptor\n", info->header ? "" : "header ", info->u ? "" : "union ",
3000 info->ether ? "" : "ether ", dev->ncm_ctx->ncm_desc ? "" : "ncm ");
3001 goto bad_desc;
3002 }
3003 if (dev->is_ncm) {
3004 ctx = dev->ncm_ctx;
3005 ctx->control = info->control;
3006 ctx->data = info->data;
3007 status = cdc_ncm_config(ctx);
3008 if (status < 0) {
3009 goto error2;
3010 }
3011
3012 dev->rx_urb_size = ctx->rx_max_ntb;
3013
3014 /* We must always have one spare SKB for the current-NTB (of which
3015 * usbnet has no account)
3016 */
3017 ctx->skb_pool_size = TX_QLEN_NCM;
3018
3019 ctx->skb_pool = kzalloc(sizeof(struct sk_buff *) * ctx->skb_pool_size, GFP_KERNEL);
3020 if (ctx->skb_pool == NULL) {
3021 dev_dbg(&intf->dev, "failed allocating the SKB pool\n");
3022 goto error2;
3023 }
3024
3025 for (i = 0; i < ctx->skb_pool_size; i++) {
3026 ctx->skb_pool[i] = alloc_skb(ctx->tx_max_ntb, GFP_KERNEL);
3027 if (ctx->skb_pool[i] == NULL) {
3028 dev_dbg(&intf->dev, "failed allocating an SKB for the "
3029 "SKB pool\n");
3030 goto error3;
3031 }
3032 }
3033
3034 ntb_clear(&ctx->curr_ntb);
3035 }
3036
3037 /* if the NDIS device is not Jungo solution, then assume that it has the data interface, and claim for it */
3038 if ((dev->udev->descriptor.bcdDevice != HW_JUNGO_BCDDEVICE_VALUE &&
3039 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS &&
3040 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS_HW) ||
3041 ((info->u != NULL) && (info->u->bMasterInterface0 != info->u->bSlaveInterface0))) {
3042 /* claim data interface and set it up ... with side effects.
3043 * network traffic can't flow until an altsetting is enabled.
3044 */
3045
3046 if (info->data->dev.driver != NULL) {
3047 usb_driver_release_interface(driver, info->data);
3048 }
3049
3050 status = usb_driver_claim_interface(driver, info->data, dev);
3051 if (status < 0) {
3052 return status;
3053 }
3054 }
3055
3056 status = hw_get_endpoints(dev, info->data);
3057 if (status < 0) {
3058 /* ensure immediate exit from hw_disconnect */
3059 goto error3;
3060 }
3061
3062 /* status endpoint: optional for CDC Ethernet, */
3063 dev->status = NULL;
3064
3065 if (dev->udev->descriptor.bcdDevice == HW_JUNGO_BCDDEVICE_VALUE ||
3066 intf->cur_altsetting->desc.bInterfaceSubClass == BINTERFACESUBCLASS ||
3067 intf->cur_altsetting->desc.bInterfaceSubClass == BINTERFACESUBCLASS_HW ||
3068 info->control->cur_altsetting->desc.bNumEndpoints == 1 ||
3069 ((info->u != NULL) && (info->u->bMasterInterface0 == info->u->bSlaveInterface0))) {
3070 struct usb_endpoint_descriptor *desc;
3071 dev->status = &info->control->cur_altsetting->endpoint[0];
3072 desc = &dev->status->desc;
3073 if (((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) ||
3074 ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != USB_DIR_IN) ||
3075 (le16_to_cpu(desc->wMaxPacketSize) < sizeof(struct usb_cdc_notification)) || !desc->bInterval) {
3076 printk(KERN_ERR "fxz-%s:bad notification endpoint\n", __func__);
3077 dev->status = NULL;
3078 }
3079 }
3080
3081 if ((dev->udev->descriptor.bcdDevice != HW_JUNGO_BCDDEVICE_VALUE &&
3082 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS &&
3083 intf->cur_altsetting->desc.bInterfaceSubClass != BINTERFACESUBCLASS_HW) ||
3084 ((info->u != NULL) && (info->u->bMasterInterface0 != info->u->bSlaveInterface0))) {
3085 printk(KERN_ERR "Qualcomm device bcdDevice=%x,InterfaceSubClass=%x\n", dev->udev->descriptor.bcdDevice,
3086 intf->cur_altsetting->desc.bInterfaceSubClass);
3087 deviceisBalong = false;
3088 } else {
3089 deviceisBalong = true;
3090 printk(KERN_ERR "Balong device bcdDevice=%x,InterfaceSubClass=%x\n", dev->udev->descriptor.bcdDevice,
3091 intf->cur_altsetting->desc.bInterfaceSubClass);
3092 }
3093
3094 return hw_get_ethernet_addr(dev);
3095
3096 error3:
3097 if (dev->is_ncm) {
3098 for (i = 0; i < ctx->skb_pool_size && ctx->skb_pool[i]; i++) {
3099 dev_kfree_skb_any(ctx->skb_pool[i]);
3100 }
3101 kfree(ctx->skb_pool);
3102 }
3103 error2:
3104 /* ensure immediate exit from cdc_disconnect */
3105 usb_set_intfdata(info->data, NULL);
3106 usb_driver_release_interface(driver_of(intf), info->data);
3107
3108 if (dev->ncm_ctx) {
3109 kfree(dev->ncm_ctx);
3110 }
3111 return status;
3112
3113 bad_desc:
3114 devinfo(dev, "bad CDC descriptors\n");
3115 return -ENODEV;
3116 }
3117
3118 void hw_cdc_unbind(struct hw_cdc_net *dev, struct usb_interface *intf)
3119 {
3120 struct hw_dev_state *info = (void *)&dev->data;
3121 struct usb_driver *driver = driver_of(intf);
3122 int i;
3123
3124 /* disconnect master --> disconnect slave */
3125 if (intf == info->control && info->data) {
3126 /* ensure immediate exit from usbnet_disconnect */
3127 usb_set_intfdata(info->data, NULL);
3128 usb_driver_release_interface(driver, info->data);
3129 info->data = NULL;
3130 } else if (intf == info->data && info->control) {
3131 /* ensure immediate exit from usbnet_disconnect */
3132 usb_set_intfdata(info->control, NULL);
3133 usb_driver_release_interface(driver, info->control);
3134 info->control = NULL;
3135 }
3136 if (dev->is_ncm && dev->ncm_ctx) {
3137 del_timer_sync(&dev->ncm_ctx->tx_timer);
3138
3139 ntb_free_dgram_list(&dev->ncm_ctx->curr_ntb);
3140 for (i = 0; i < dev->ncm_ctx->skb_pool_size; i++) {
3141 dev_kfree_skb_any(dev->ncm_ctx->skb_pool[i]);
3142 }
3143 kfree(dev->ncm_ctx->skb_pool);
3144 kfree(dev->ncm_ctx);
3145 dev->ncm_ctx = NULL;
3146 }
3147 }
3148 EXPORT_SYMBOL_GPL(hw_cdc_unbind);
3149
3150 /*-------------------------------------------------------------------------
3151 *
3152 * Communications Device Class, Ethernet Control model
3153 *
3154 * Takes two interfaces. The DATA interface is inactive till an altsetting
3155 * is selected. Configuration data includes class descriptors. There's
3156 * an optional status endpoint on the control interface.
3157 *
3158 * This should interop with whatever the 2.4 "CDCEther.c" driver
3159 * (by Brad Hards) talked with, with more functionality.
3160 *
3161 *-------------------------------------------------------------------------*/
3162
3163 static void dumpspeed(struct hw_cdc_net *dev, __le32 *speeds)
3164 {
3165 if (netif_msg_timer(dev)) {
3166 devinfo(dev, "link speeds: %u kbps up, %u kbps down", __le32_to_cpu(speeds[0]) / 1000,
3167 __le32_to_cpu(speeds[1]) / 1000);
3168 }
3169 }
3170
3171 static inline int hw_get_ethernet_addr(struct hw_cdc_net *dev)
3172 {
3173 dev->net->dev_addr[0] = 0x00;
3174 dev->net->dev_addr[1] = 0x1e;
3175
3176 dev->net->dev_addr[0x02] = 0x10;
3177 dev->net->dev_addr[0x03] = 0x1f;
3178 dev->net->dev_addr[0x04] = 0x00;
3179 dev->net->dev_addr[0x05] = 0x01; /* change 0x04 into 0x01 20100129 */
3180
3181 return 0;
3182 }
3183
3184 enum { WRITE_REQUEST = 0x21, READ_RESPONSE = 0xa1 };
3185 #define HW_CDC_OK 0
3186 #define HW_CDC_FAIL (-1)
3187 /*-------------------------------------------------------------------------*/
3188 /* The ioctl is called to send the qmi request to the device
3189 * or get the qmi response from the device */
3190 static int hw_cdc_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
3191 {
3192 struct usb_device *udev = interface_to_usbdev(intf);
3193 struct hw_cdc_net *hwnet = (struct hw_cdc_net *)dev_get_drvdata(&intf->dev);
3194 struct usb_host_interface *interface = intf->cur_altsetting;
3195 struct usbdevfs_ctrltransfer *req = (struct usbdevfs_ctrltransfer *)buf;
3196 char *pbuf = NULL;
3197 int ret = -1;
3198 if (!deviceisBalong) {
3199 if (hwnet->qmi_sync == 1) {
3200 deverr(hwnet, "%s: The ndis port is busy.", __FUNCTION__);
3201 return HW_CDC_FAIL;
3202 }
3203 }
3204
3205 if (code != USBDEVFS_CONTROL || req == NULL) {
3206 deverr(hwnet, "%s: The request is not supported.", __FUNCTION__);
3207 return HW_CDC_FAIL;
3208 }
3209
3210 if (req->wLength > 0) {
3211 pbuf = (char *)kmalloc(req->wLength + 1, GFP_KERNEL);
3212 if (pbuf == NULL) {
3213 deverr(hwnet, "%s: Kmalloc the buffer failed.", __FUNCTION__);
3214 return HW_CDC_FAIL;
3215 }
3216 memset(pbuf, 0, req->wLength);
3217 }
3218
3219 switch (req->bRequestType) {
3220 case WRITE_REQUEST: {
3221 if (NULL != req->data && 0 < req->wLength) {
3222 if (copy_from_user(pbuf, req->data, req->wLength)) {
3223 deverr(hwnet, "usbnet_cdc_ioctl: copy_from_user failed");
3224 goto op_error;
3225 }
3226 pbuf[req->wLength] = 0;
3227 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req->bRequest, req->bRequestType, req->wValue,
3228 interface->desc.bInterfaceNumber, pbuf, req->wLength, req->timeout);
3229 } else {
3230 pbuf = NULL;
3231 req->wLength = 0;
3232 }
3233 break;
3234 }
3235 case READ_RESPONSE: {
3236 if (req->data == NULL || req->wLength <= 0 || pbuf == NULL) {
3237 deverr(hwnet, "%s: The buffer is null, can not read the response.", __FUNCTION__);
3238 goto op_error;
3239 }
3240 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req->bRequest, req->bRequestType, req->wValue,
3241 interface->desc.bInterfaceNumber, pbuf, req->wLength, req->timeout);
3242 if (0 < ret) {
3243 if (!deviceisBalong) {
3244 /* check the connection indication */
3245 if (pbuf[0x6] == 0x04 && pbuf[0x9] == 0x22 && pbuf[0xA] == 0) {
3246 if (pbuf[0x10] == 0x02) {
3247 if (hwnet) {
3248 netif_carrier_on(hwnet->net);
3249 }
3250 } else {
3251 if (hwnet) {
3252 netif_carrier_off(hwnet->net);
3253 }
3254 }
3255 }
3256 }
3257 if (copy_to_user(req->data, pbuf, req->wLength)) {
3258 deverr(hwnet, "%s: copy_from_user failed", __FUNCTION__);
3259 goto op_error;
3260 }
3261 }
3262 break;
3263 }
3264 default:
3265 break;
3266 }
3267
3268 if (pbuf != NULL) {
3269 kfree(pbuf);
3270 pbuf = NULL;
3271 }
3272
3273 return HW_CDC_OK;
3274
3275 op_error:
3276 if (pbuf != NULL) {
3277 kfree(pbuf);
3278 pbuf = NULL;
3279 }
3280 return HW_CDC_FAIL;
3281 }
3282
3283 /*
3284 *#define HUAWEI_ETHER_INTERFACE \
3285 * .bInterfaceClass = USB_CLASS_COMM, \
3286 * .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3287 * .bInterfaceProtocol = USB_CDC_PROTO_NONE
3288 */
3289
3290 #define HUAWEI_NDIS_INTERFACE \
3291 .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, .bInterfaceProtocol = 0xff
3292
3293 #define HUAWEI_NCM_INTERFACE \
3294 .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = 0x0d, .bInterfaceProtocol = 0xff
3295
3296 #define HUAWEI_NCM_INTERFACE2 \
3297 .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = 0x0d, .bInterfaceProtocol = 0x00
3298
3299 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE \
3300 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x09
3301
3302 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO \
3303 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x09
3304
3305 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF \
3306 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x39
3307
3308 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO \
3309 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x39
3310
3311 #define HUAWEI_NDIS_SINGLE_INTERFACE \
3312 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x07
3313
3314 #define HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO \
3315 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x07
3316
3317 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF \
3318 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x37
3319
3320 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO \
3321 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x37
3322
3323 #define HUAWEI_NCM_OPTIMIZED_INTERFACE \
3324 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x16
3325
3326 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO \
3327 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x16
3328
3329 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF \
3330 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x46
3331
3332 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO \
3333 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x46
3334
3335 #define HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO \
3336 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x11
3337
3338 #define HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM \
3339 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x11
3340
3341 #define HUAWEI_INTERFACE_NDIS_HW_QUALCOMM \
3342 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x67
3343
3344 #define HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM \
3345 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x69
3346
3347 #define HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM \
3348 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x01, .bInterfaceProtocol = 0x76
3349
3350 #define HUAWEI_INTERFACE_NDIS_HW_JUNGO \
3351 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x67
3352
3353 #define HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO \
3354 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x69
3355
3356 #define HUAWEI_INTERFACE_NDIS_NCM_JUNGO \
3357 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x02, .bInterfaceProtocol = 0x76
3358
3359 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO_HW \
3360 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x09
3361
3362 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO_HW \
3363 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x39
3364
3365 #define HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO_HW \
3366 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x07
3367
3368 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO_HW \
3369 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x37
3370
3371 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO_HW \
3372 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x16
3373
3374 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO_HW \
3375 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x46
3376
3377 #define HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO_HW \
3378 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x11
3379
3380 #define HUAWEI_INTERFACE_NDIS_HW_JUNGO_HW \
3381 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x67
3382
3383 #define HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO_HW \
3384 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x69
3385
3386 #define HUAWEI_INTERFACE_NDIS_NCM_JUNGO_HW \
3387 .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0x03, .bInterfaceProtocol = 0x76
3388
3389 static const struct usb_device_id hw_products[] = {
3390 {
3391 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3392 .idVendor = 0x2dee,
3393 HUAWEI_NDIS_INTERFACE,
3394 },
3395
3396 {
3397 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3398 .idVendor = 0x2dee,
3399 HUAWEI_NDIS_OPTIMIZED_INTERFACE,
3400 },
3401
3402 {
3403 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3404 .idVendor = 0x2dee,
3405 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF,
3406 },
3407
3408 {
3409 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3410 .idVendor = 0x2dee,
3411 HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO,
3412 },
3413
3414 {
3415 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3416 .idVendor = 0x2dee,
3417 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO,
3418 },
3419
3420 {
3421 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3422 .idVendor = 0x2dee,
3423 HUAWEI_NCM_OPTIMIZED_INTERFACE,
3424 },
3425
3426 {
3427 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3428 .idVendor = 0x2dee,
3429 HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO,
3430 },
3431
3432 {
3433 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3434 .idVendor = 0x2dee,
3435 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF,
3436 },
3437
3438 {
3439 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3440 .idVendor = 0x2dee,
3441 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO,
3442 },
3443
3444 {
3445 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3446 .idVendor = 0x2dee,
3447 HUAWEI_NCM_INTERFACE,
3448 },
3449
3450 {
3451 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3452 .idVendor = 0x2dee,
3453 HUAWEI_NCM_INTERFACE2,
3454 },
3455
3456 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3457 .idVendor = 0x2dee,
3458 HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO},
3459 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3460 .idVendor = 0x2dee,
3461 HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM},
3462
3463 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3464 .idVendor = 0x2dee,
3465 HUAWEI_INTERFACE_NDIS_HW_QUALCOMM},
3466 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3467 .idVendor = 0x2dee,
3468 HUAWEI_INTERFACE_NDIS_HW_JUNGO},
3469 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3470 .idVendor = 0x2dee,
3471 HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM},
3472 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3473 .idVendor = 0x2dee,
3474 HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO},
3475 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3476 .idVendor = 0x2dee,
3477 HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM},
3478 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3479 .idVendor = 0x2dee,
3480 HUAWEI_INTERFACE_NDIS_NCM_JUNGO},
3481 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3482 .idVendor = 0x2dee,
3483 HUAWEI_NDIS_SINGLE_INTERFACE},
3484 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3485 .idVendor = 0x2dee,
3486 HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO},
3487 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3488 .idVendor = 0x2dee,
3489 HUAWEI_NDIS_SINGLE_INTERFACE_VDF},
3490 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3491 .idVendor = 0x2dee,
3492 HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO},
3493 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3494 .idVendor = 0x2dee,
3495 HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO_HW},
3496 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3497 .idVendor = 0x2dee,
3498 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO_HW},
3499 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3500 .idVendor = 0x2dee,
3501 HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO_HW},
3502 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3503 .idVendor = 0x2dee,
3504 HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO_HW},
3505 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3506 .idVendor = 0x2dee,
3507 HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO_HW},
3508 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3509 .idVendor = 0x2dee,
3510 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO_HW},
3511 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3512 .idVendor = 0x2dee,
3513 HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO_HW},
3514 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3515 .idVendor = 0x2dee,
3516 HUAWEI_INTERFACE_NDIS_HW_JUNGO_HW},
3517 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3518 .idVendor = 0x2dee,
3519 HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO_HW},
3520 {.match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR,
3521 .idVendor = 0x2dee,
3522 HUAWEI_INTERFACE_NDIS_NCM_JUNGO_HW},
3523 {}, // END
3524 };
3525 MODULE_DEVICE_TABLE(usb, hw_products);
3526
3527 static int hw_cdc_reset_resume(struct usb_interface *intf);
3528 static struct usb_driver hw_ether_driver = {
3529 .name = "MeiG_ether",
3530 .id_table = hw_products,
3531 .probe = hw_cdc_probe,
3532 .disconnect = hw_disconnect,
3533 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
3534 .unlocked_ioctl = hw_cdc_ioctl,
3535 #else
3536 .ioctl = hw_cdc_ioctl,
3537 #endif
3538 .suspend = hw_suspend,
3539 .resume = hw_resume,
3540 .reset_resume = hw_cdc_reset_resume,
3541 };
3542
3543 static void hw_cdc_status(struct hw_cdc_net *dev, struct urb *urb)
3544 {
3545 struct usb_cdc_notification *event;
3546
3547 if (urb->actual_length < sizeof *event) {
3548 return;
3549 }
3550
3551 /* SPEED_CHANGE can get split into two 8-byte packets */
3552 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
3553 devdbg(dev, "The speed is changed by status event");
3554 dumpspeed(dev, (__le32 *)urb->transfer_buffer);
3555 return;
3556 }
3557
3558 event = urb->transfer_buffer;
3559 switch (event->bNotificationType) {
3560 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
3561 if (netif_msg_timer(dev)) {
3562 devdbg(dev, "CDC: carrier %s", event->wValue ? "on" : "off");
3563 }
3564 if (event->wValue) {
3565 netif_carrier_on(dev->net);
3566 } else {
3567 netif_carrier_off(dev->net);
3568 }
3569
3570 break;
3571 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
3572 if (netif_msg_timer(dev)) {
3573 devdbg(dev, "CDC: speed change (len %d)", urb->actual_length);
3574 }
3575 if (urb->actual_length != (sizeof *event + 0x08)) {
3576 set_bit(EVENT_STS_SPLIT, &dev->flags);
3577 } else {
3578 dumpspeed(dev, (__le32 *)&event[1]);
3579 }
3580 break;
3581
3582 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: {
3583 break;
3584 }
3585
3586 default:
3587 devdbg(dev, "%s: CDC: unexpected notification %02x!", __FUNCTION__, event->bNotificationType);
3588 break;
3589 }
3590 }
3591
3592 static int __init hw_cdc_init(void)
3593 {
3594 BUG_ON((sizeof(((struct hw_cdc_net *)0)->data) < sizeof(struct hw_dev_state)));
3595
3596 return usb_register(&hw_ether_driver);
3597 }
3598 fs_initcall(hw_cdc_init);
3599
3600 static int hw_send_qmi_request(struct usb_interface *intf, unsigned char *snd_req, int snd_len,
3601 unsigned char *read_resp, int resp_len);
3602 static int hw_send_qmi_request_no_resp(struct usb_interface *intf, unsigned char *snd_req, int snd_len,
3603 unsigned char *read_resp, int resp_len);
3604
3605 // int hw_check_conn_status(struct usb_interface *intf)
3606 static void hw_cdc_check_status_work(struct work_struct *work)
3607
3608 {
3609 struct hw_cdc_net *dev = container_of(work, struct hw_cdc_net, status_work.work);
3610 int ret;
3611 int repeat = 0;
3612 unsigned char resp_buf[56] = {0};
3613 unsigned char client_id_req[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
3614 0x22, 0x00, 0x04, 0x00, 0x01, 0x01, 0x00, 0x01};
3615 unsigned char rel_client_id_req[0x11] = {0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
3616 0x00, 0x05, 0x00, 0x01, 0x02, 0x00, 0x01, 0x00};
3617 unsigned char status_req[13] = {0x01, 0x0c, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x00, 0x22, 0x00, 0x00, 0x00};
3618 unsigned char set_instance_req[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
3619 0x20, 0x00, 0x04, 0x00, 0x01, 0x01, 0x00, 0x00};
3620 dev->qmi_sync = 1;
3621
3622 hw_send_qmi_request_no_resp(dev->intf, set_instance_req, 0x10, resp_buf, 0x38);
3623
3624 ret = hw_send_qmi_request(dev->intf, client_id_req, 0x10, resp_buf, 0x38);
3625 if (ret == 0) {
3626 printk(KERN_ERR "%s: Get client ID failed\n", __FUNCTION__);
3627 goto failed;
3628 }
3629 status_req[0x05] = resp_buf[0x17];
3630 memset(resp_buf, 0, 0x38 * sizeof(unsigned char));
3631 for (repeat = 0; repeat < 0x3; repeat++) {
3632 ret = hw_send_qmi_request(dev->intf, status_req, 0x0D, resp_buf, 0x38);
3633 if (ret == 0) {
3634 printk(KERN_ERR "%s: Get connection status failed\n", __FUNCTION__);
3635 continue;
3636 }
3637
3638 if (resp_buf[0x17] == 0x02) {
3639 printk(KERN_ERR "%s: carrier on\n", __FUNCTION__);
3640 netif_carrier_on(dev->net);
3641 break;
3642 } else {
3643 printk(KERN_ERR "%s: carrier off\n", __FUNCTION__);
3644 }
3645 }
3646 failed:
3647 rel_client_id_req[0x0f] = 0x02;
3648 rel_client_id_req[0x10] = status_req[0x05];
3649 memset(resp_buf, 0, 0x38 * sizeof(unsigned char));
3650
3651 ret = hw_send_qmi_request_no_resp(dev->intf, rel_client_id_req, 0x11, resp_buf, 0x38);
3652
3653 dev->qmi_sync = 0;
3654 cancel_delayed_work(&dev->status_work);
3655
3656 return;
3657 }
3658 static int hw_send_qmi_request_no_resp(struct usb_interface *intf, unsigned char *snd_req, int snd_len,
3659 unsigned char *read_resp, int resp_len)
3660 {
3661 int ret;
3662 int index = 0;
3663 struct usb_device *udev = interface_to_usbdev(intf);
3664 for (index = 0; index < 0x03; index++) {
3665 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x21, 0x00,
3666 intf->cur_altsetting->desc.bInterfaceNumber, snd_req, snd_len, 0x1388);
3667 if (ret < 0) {
3668 printk(KERN_ERR "%s: send the qmi request failed\n", __FUNCTION__);
3669 continue;
3670 } else {
3671 break;
3672 }
3673 }
3674 return ret;
3675 }
3676
3677 static int hw_send_qmi_request(struct usb_interface *intf, unsigned char *snd_req, int snd_len,
3678 unsigned char *read_resp, int resp_len)
3679 {
3680 int ret;
3681 int index = 0;
3682 struct usb_device *udev = interface_to_usbdev(intf);
3683 struct hw_cdc_net *net = usb_get_intfdata(intf);
3684
3685 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x21, 0x00, intf->cur_altsetting->desc.bInterfaceNumber,
3686 snd_req, snd_len, 0x1388);
3687 if (ret < 0) {
3688 printk(KERN_ERR "%s: send the qmi request failed\n", __FUNCTION__);
3689 return ret;
3690 }
3691 while (index < 0x0A) {
3692 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, 0xA1, 0x00,
3693 intf->cur_altsetting->desc.bInterfaceNumber, read_resp, resp_len, 0x3E8);
3694 if (ret <= 0) {
3695 printk(KERN_ERR "%s: %d Get response failed\n", __FUNCTION__, index);
3696 msleep(0x0A);
3697 } else {
3698 if (read_resp[0x04] == 0) {
3699 if (read_resp[0x6] == 1 && snd_req[0x5] == read_resp[0x5] && snd_req[0x8] == read_resp[0x8] &&
3700 snd_req[0x9] == read_resp[0x9]) {
3701 ret = 1;
3702 break;
3703 }
3704 } else if (read_resp[0x04] == 1) {
3705 if (read_resp[0x06] == 0x2 && snd_req[0x5] == read_resp[0x5] && snd_req[0x9] == read_resp[0x9] &&
3706 snd_req[0xA] == read_resp[0xA]) {
3707 printk(KERN_ERR "%s: get the conn status req=%02x resp\n", __FUNCTION__, snd_req[9]);
3708 ret = 1;
3709 break;
3710 }
3711 } else if (read_resp[0x04] == 0x4) {
3712 if (snd_req[0x09] == read_resp[0x09] && snd_req[0x0A] == read_resp[0x0A] && read_resp[0x10] == 0x2) {
3713 printk(KERN_ERR "%s: get the conn status ind= carrier on\n", __FUNCTION__);
3714 netif_carrier_on(net->net);
3715 }
3716 }
3717 }
3718
3719 index++;
3720 continue;
3721 }
3722
3723 if (index >= 0x0A) {
3724 ret = 0;
3725 }
3726 return ret;
3727 }
3728 static void __exit hw_cdc_exit(void)
3729 {
3730 usb_deregister(&hw_ether_driver);
3731 }
3732 module_exit(hw_cdc_exit);
3733
3734 MODULE_AUTHOR(DRIVER_AUTHOR);
3735 MODULE_DESCRIPTION(DRIVER_DESC);
3736 MODULE_VERSION(DRIVER_VERSION);
3737 MODULE_LICENSE("GPL");
3738