1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2014 Kevin Lo
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 /*
31 * ASIX Electronics AX88178A/AX88179/AX88179A USB 2.0/3.0 gigabit ethernet
32 * driver.
33 */
34
35 #include <lwip/netif.h>
36 #include <lwip/dhcp.h>
37 #include <lwip/netifapi.h>
38 #include <lwip/inet.h>
39
40 #include "implementation/global_implementation.h"
41 #include "usb_ethernet.h"
42 #include "if_axgereg.h"
43
44 static void axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen);
45 static void axge_miibus_statchg(struct axge_softc *sc, uint8_t link_status);
46
47 #define IFF_DRV_OACTIVE IFF_MASTER
48 #define IFF_SIMPLEX IFF_SLAVE
49
50 #undef USB_DEBUG_VAR
51 #define USB_DEBUG_VAR axge_debug
52 #ifdef LOSCFG_USB_DEBUG
53 static int axge_debug = 0;
54 void
usb_axge_debug_func(int level)55 usb_axge_debug_func(int level)
56 {
57 axge_debug = level;
58 PRINTK("The level of usb axge debug is %d\n", level);
59 }
60 DEBUG_MODULE(axge, usb_axge_debug_func);
61 #endif
62
63 /*
64 * Various supported device vendors/products.
65 */
66 static const STRUCT_USB_HOST_ID axge_devs[] = {
67 { USB_VPI(0x0B95, 0x178A, AXE_FLAG_178A) },
68 };
69
70 static device_probe_t axge_probe;
71 static device_attach_t axge_attach;
72 static device_detach_t axge_detach;
73
74 static usb_callback_t axge_bulk_read_callback;
75 static usb_callback_t axge_bulk_write_callback;
76
77 static uether_fn_t axge_attach_post;
78 static uether_fn_t axge_init;
79 static uether_fn_t axge_stop;
80 static uether_fn_t axge_start;
81 static uether_fn_t axge_setmulti;
82 static uether_fn_t axge_setpromisc;
83 static uether_fn_t axge_tick;
84
85 static const struct usb_config axge_config[AXGE_N_TRANSFER] = {
86 { /* [AXGE_BULK_DT_WR] = */
87 .type = UE_BULK,
88 .endpoint = UE_ADDR_ANY,
89 .direction = UE_DIR_OUT,
90 .frames = 16,
91 .bufsize = 16 * MCLBYTES,
92 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
93 .callback = axge_bulk_write_callback,
94 .timeout = 10000, /* 10 seconds */
95 },
96 { /* [AXGE_BULK_DT_RD] = */
97 .type = UE_BULK,
98 .endpoint = UE_ADDR_ANY,
99 .direction = UE_DIR_IN,
100 .bufsize = 16 * MCLBYTES, /* bytes */
101 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
102 .callback = axge_bulk_read_callback,
103 .timeout = 0, /* no timeout */
104 },
105 };
106
107 static const struct {
108 uint8_t ctrl;
109 uint8_t timer_l;
110 uint8_t timer_h;
111 uint8_t size;
112 uint8_t ifg;
113 }__packed axge_bulk_size[] = {
114 { 7, 0x4f, 0x00, 0x12, 0xff },
115 { 7, 0x20, 0x03, 0x16, 0xff },
116 { 7, 0xae, 0x07, 0x18, 0xff },
117 { 7, 0xcc, 0x4c, 0x18, 0x08 }
118 };
119
120 static device_method_t axge_methods[] = {
121 /* Device interface */
122 DEVMETHOD(device_probe, axge_probe),
123 DEVMETHOD(device_attach, axge_attach),
124 DEVMETHOD(device_detach, axge_detach),
125 DEVMETHOD_END
126 };
127
128 static driver_t axge_driver = {
129 .name = "USB_AXGE",
130 .methods = axge_methods,
131 .size = sizeof(struct axge_softc),
132 };
133
134 static devclass_t axge_devclass;
135 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, 0, 0);
136
137 static const struct usb_ether_methods axge_ue_methods = {
138 .ue_attach_post = axge_attach_post,
139 .ue_start = axge_start,
140 .ue_init = axge_init,
141 .ue_stop = axge_stop,
142 .ue_setmulti = axge_setmulti,
143 .ue_setpromisc = axge_setpromisc,
144 .ue_tick = axge_tick,
145 };
146
147 static void
axge_write_mem(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t val,void * buf,unsigned int len)148 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
149 uint16_t val, void *buf, unsigned int len)
150 {
151 struct usb_device_request req;
152
153 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
154 req.bRequest = cmd;
155 USETW(req.wValue, val);
156 USETW(req.wIndex, index);
157 USETW(req.wLength, len);
158
159 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) {
160 /* Error ignored. */
161 }
162 }
163
164 static void
axge_write_cmd_1(struct axge_softc * sc,uint8_t cmd,uint16_t reg,uint8_t val)165 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val)
166 {
167 axge_write_mem(sc, cmd, 1, reg, &val, 1);
168 }
169
170 static void
axge_write_cmd_2(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t reg,uint16_t val)171 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
172 uint16_t reg, uint16_t val)
173 {
174 uint8_t temp[2];
175
176 USETW(temp, val);
177 axge_write_mem(sc, cmd, index, reg, &temp, 2);
178 }
179
180
181 static int
axge_read_mem(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t val,void * buf,int len)182 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
183 uint16_t val, void *buf, int len)
184 {
185 struct usb_device_request req;
186
187 AXGE_LOCK_ASSERT(sc, MA_OWNED);
188
189 req.bmRequestType = UT_READ_VENDOR_DEVICE;
190 req.bRequest = cmd;
191 USETW(req.wValue, val);
192 USETW(req.wIndex, index);
193 USETW(req.wLength, len);
194
195 return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
196 }
197
198 static uint8_t
axge_read_cmd_1(struct axge_softc * sc,uint8_t cmd,uint16_t reg)199 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg)
200 {
201 uint8_t val;
202
203 (void)axge_read_mem(sc, cmd, 1, reg, &val, 1);
204 return (val);
205 }
206
207 static uint16_t
axge_read_cmd_2(struct axge_softc * sc,uint8_t cmd,uint16_t index,uint16_t reg)208 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, uint16_t reg)
209 {
210 uint8_t val[2];
211
212 (void)axge_read_mem(sc, cmd, index, reg, &val, 2);
213 return (UGETW(val));
214 }
215
216 static void
axge_chip_init(struct axge_softc * sc)217 axge_chip_init(struct axge_softc *sc)
218 {
219 /* Power up ethernet PHY. */
220 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0);
221 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL);
222 (void)uether_pause(&sc->sc_ue, hz / 4);
223 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT,
224 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS);
225 (void)uether_pause(&sc->sc_ue, hz / 10);
226 }
227
228 static void
axge_csum_cfg(struct usb_ether * ue)229 axge_csum_cfg(struct usb_ether *ue)
230 {
231 struct axge_softc *sc = ue->ue_sc;
232 uint8_t csum;
233
234 csum = 0;
235 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP;
236 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum);
237
238 csum = 0;
239 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP;
240 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum);
241 }
242
243 static void
axge_setmulti(struct usb_ether * ue)244 axge_setmulti(struct usb_ether *ue)
245 {
246 struct axge_softc *sc = ue->ue_sc;
247 struct los_eth_driver *ifp = ue->ue_drv_sc;
248 uint16_t rxmode;
249
250 rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR);
251 if (ifp->ac_if.flags & (IFF_ALLMULTI | IFF_PROMISC)) {
252 rxmode |= RCR_AMALL;
253 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
254 return;
255 }
256
257 rxmode &= ~RCR_AMALL;
258 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
259 }
260
261 static void
axge_setpromisc(struct usb_ether * ue)262 axge_setpromisc(struct usb_ether *ue)
263 {
264 struct axge_softc *sc = uether_getsc(ue);
265 struct los_eth_driver *ifp = ue->ue_drv_sc;
266 uint16_t rxmode;
267
268 rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR);
269
270 if (ifp->ac_if.flags & IFF_PROMISC)
271 rxmode |= RCR_PRO;
272 else
273 rxmode &= ~RCR_PRO;
274
275 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
276 axge_setmulti(ue);
277 }
278
279 static void
axge_tick(struct usb_ether * ue)280 axge_tick(struct usb_ether *ue)
281 {
282 struct axge_softc *sc = uether_getsc(ue);
283 uint8_t link_status;
284
285 AXGE_LOCK_ASSERT(sc, MA_OWNED);
286
287 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR);
288
289 if (sc->sc_link_status != (link_status & AXGE_LINK_MASK)) {
290 axge_miibus_statchg(sc, link_status);
291 sc->sc_link_status = link_status & AXGE_LINK_MASK;
292 }
293 }
294
295 static void
axge_reset(struct axge_softc * sc)296 axge_reset(struct axge_softc *sc)
297 {
298 struct usb_config_descriptor *cd;
299 usb_error_t err;
300
301 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
302
303 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
304 cd->bConfigurationValue);
305 if (err)
306 DPRINTF("reset failed (ignored)\n");
307
308 /* Wait a little while for the chip to get its brains in order. */
309 (void)uether_pause(&sc->sc_ue, hz / 100);
310
311 axge_chip_init(sc);
312 }
313
314 static void
axge_attach_post(struct usb_ether * ue)315 axge_attach_post(struct usb_ether *ue)
316 {
317 struct axge_softc *sc = uether_getsc(ue);
318
319 /* Initialize controller and get station address. */
320 axge_chip_init(sc);
321 (void)axge_read_mem(sc, AXGE_ACCESS_MAC, NETIF_MAX_HWADDR_LEN, AXGE_NIDR,
322 ue->ue_eaddr, NETIF_MAX_HWADDR_LEN);
323 }
324 /*
325 * Probe for a AX88172 chip.
326 */
327 static int
axge_probe(device_t dev)328 axge_probe(device_t dev)
329 {
330 struct usb_attach_arg *uaa = device_get_ivars(dev);
331
332 if (uaa->usb_mode != USB_MODE_HOST)
333 return (ENXIO);
334 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX)
335 return (ENXIO);
336 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX)
337 return (ENXIO);
338
339 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa));
340 }
341
342 /*
343 * Attach the interface. Allocate softc structures, do ifmedia
344 * setup and ethernet/BPF attach.
345 */
346 static int
axge_attach(device_t dev)347 axge_attach(device_t dev)
348 {
349 struct usb_attach_arg *uaa = device_get_ivars(dev);
350 struct axge_softc *sc = device_get_softc(dev);
351 struct usb_ether *ue = &sc->sc_ue;
352 uint8_t iface_index;
353 int error;
354 sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
355 sc->sc_link_status = AXGE_LINK_MASK;
356
357 device_set_usb_desc(dev);
358 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_RECURSE);
359
360 iface_index = AXGE_IFACE_IDX;
361 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
362 axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx);
363 if (error) {
364 device_printf(dev, "allocating USB transfers failed\n");
365 goto detach;
366 }
367 ue->ue_sc = sc;
368 ue->ue_dev = dev;
369 ue->ue_udev = uaa->device;
370 ue->ue_mtx = &sc->sc_mtx;
371 ue->ue_methods = &axge_ue_methods;
372 error = uether_ifattach(ue);
373 if (error) {
374 device_printf(dev, "could not attach interface\n");
375 goto detach;
376 }
377 return (0); /* success */
378
379 detach:
380 (void) axge_detach(dev);
381 return (ENXIO); /* failure */
382 }
383
384 static int
axge_detach(device_t dev)385 axge_detach(device_t dev)
386 {
387 struct axge_softc *sc = device_get_softc(dev);
388 struct usb_ether *ue = &sc->sc_ue;
389
390 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER);
391 uether_ifdetach(ue);
392 mtx_destroy(&sc->sc_mtx);
393
394 return (0);
395 }
396
397 static void
axge_miibus_statchg(struct axge_softc * sc,uint8_t link_status)398 axge_miibus_statchg(struct axge_softc *sc, uint8_t link_status)
399 {
400 struct usb_ether *ue = &sc->sc_ue;
401 struct los_eth_driver *ifp = ue->ue_drv_sc;
402 uint8_t tmp[5];
403 uint16_t val = 0;
404
405 if (link_status & AXGE_LINK_MASK) {
406 val = MSR_RE;
407 if (link_status & IFM_1000_T) {
408 val |= MSR_GM | MSR_EN_125MHZ;
409 if (link_status & PLSR_USB_SS)
410 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[0], sizeof(axge_bulk_size[0]));
411 else if (link_status & PLSR_USB_HS)
412 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[1], sizeof(axge_bulk_size[1]));
413 else
414 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[3], sizeof(axge_bulk_size[3]));
415 }
416 else if (link_status & IFM_100_TX) {
417 val |= MSR_PS;
418 if (link_status & (PLSR_USB_SS | PLSR_USB_HS))
419 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[2], sizeof(axge_bulk_size[2]));
420 else
421 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[3], sizeof(axge_bulk_size[3]));
422 }
423 else if (link_status & IFM_10_T) {
424 (void)memcpy_s(tmp, sizeof(tmp), &axge_bulk_size[3], sizeof(axge_bulk_size[3]));
425 }
426 else {
427 PRINT_WARN("%s, link_status:%x\n", __FUNCTION__, link_status);
428 return;
429 }
430
431 /* Rx bulk configuration. */
432 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5);
433 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
434
435 ifp->ac_if.flags |= NETIF_FLAG_LINK_UP;
436
437 (void)netifapi_netif_set_up(&ifp->ac_if);
438 PRINTK("AX88178A Link Up\n");
439 }
440 else {
441 ifp->ac_if.flags &= ~NETIF_FLAG_LINK_UP;
442 PRINTK("AX88178A Link Down\n");
443 }
444 }
445
446 static void
axge_bulk_read_callback(struct usb_xfer * xfer,usb_error_t error)447 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
448 {
449 struct axge_softc *sc = usbd_xfer_softc(xfer);
450 struct usb_ether *ue = &sc->sc_ue;
451 struct usb_page_cache *pc;
452 int actlen;
453
454 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
455 switch (USB_GET_STATE(xfer)) {
456 case USB_ST_TRANSFERRED:
457 pc = usbd_xfer_get_frame(xfer, 0);
458 axge_rx_frame(ue, pc, actlen);
459 /* FALLTHROUGH */
460 case USB_ST_SETUP:
461 tr_setup:
462 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
463 usbd_transfer_submit(xfer);
464 uether_rxflush(ue);
465 return;
466
467 default: /* Error */
468 DPRINTF("bulk read error, %s\n", usbd_errstr(error));
469
470 if (error != USB_ERR_CANCELLED) {
471 /* try to clear stall first */
472 usbd_xfer_set_stall(xfer);
473 goto tr_setup;
474 }
475 return;
476 }
477 }
478
479 static int
axge_rxeof(struct usb_ether * ue,struct usb_page_cache * pc,unsigned int offset,unsigned int len,uint32_t pkt_hdr)480 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset,
481 unsigned int len, uint32_t pkt_hdr)
482 {
483 struct los_eth_driver *ifp = ue->ue_drv_sc;
484 struct pbuf *m = pbuf_alloc(PBUF_RAW, len+ETH_PAD_SIZE, PBUF_RAM);
485 struct pbuf *p;
486
487 if (len < ETHER_HDR_LEN) {
488 (void)pbuf_free(m);
489 return (EINVAL);
490 }
491
492 #if ETH_PAD_SIZE
493 /* drop the padding word */
494 if (pbuf_header(m, -ETH_PAD_SIZE)) {
495 PRINTK("[AXE_ERROR]axe_rxeof : pbuf_header drop failed\n");
496 (void)pbuf_free(m);
497 return (EINVAL);
498 }
499 #endif
500
501 for (p = m; p != NULL; p = p->next)
502 usbd_copy_out(pc, offset, p->payload, p->len);
503
504 #if ETH_PAD_SIZE
505 /* reclaim the padding word */
506 if (pbuf_header(m, ETH_PAD_SIZE)) {
507 PRINTK("[AXE_ERROR]axe_rxeof : pbuf_header drop failed\n");
508 (void)pbuf_free(m);
509 return (EINVAL);
510 }
511 #endif
512
513 driverif_input(&ifp->ac_if, m);
514 return (0);
515 }
516
517 static void
axge_rx_frame(struct usb_ether * ue,struct usb_page_cache * pc,int actlen)518 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
519 {
520 uint32_t pos;
521 uint32_t pkt_cnt;
522 uint32_t rxhdr;
523 uint32_t pkt_hdr;
524 uint32_t hdr_off;
525 uint32_t pktlen;
526
527 /* verify we have enough data */
528 if (actlen < (int)sizeof(rxhdr))
529 return;
530
531 pos = 0;
532
533 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr));
534 rxhdr = le32toh(rxhdr);
535
536 pkt_cnt = (uint16_t)rxhdr;
537 hdr_off = (uint16_t)(rxhdr >> 16);
538
539 while (pkt_cnt--) {
540 /* verify the header offset */
541 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) {
542 DPRINTF("End of packet headers\n");
543 break;
544 }
545 if ((int)pos >= actlen) {
546 DPRINTF("Data position reached end\n");
547 break;
548 }
549 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr));
550
551 pkt_hdr = le32toh(pkt_hdr);
552 pktlen = (pkt_hdr >> 16) & 0x1fff;
553 if (pkt_hdr & (AXGE_RXHDR_CRC_ERR | AXGE_RXHDR_DROP_ERR)) {
554 DPRINTF("Dropped a packet\n");
555 }
556 if (pktlen >= 6 && (int)(pos + pktlen) <= actlen) {
557 (void)axge_rxeof(ue, pc, pos, pktlen, pkt_hdr);
558 } else {
559 DPRINTF("Invalid packet pos=%d len=%d\n",
560 (int)pos, (int)pktlen);
561 }
562 pos += (pktlen + 7) & ~7;
563 hdr_off += sizeof(pkt_hdr);
564 }
565 }
566
567 static void
axge_bulk_write_callback(struct usb_xfer * xfer,usb_error_t error)568 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
569 {
570 struct axge_softc *sc = usbd_xfer_softc(xfer);
571 struct usb_ether *ue = &(sc->sc_ue);
572 struct los_eth_driver *ifp = ue->ue_drv_sc;
573 struct eth_drv_sc *drv_sc = (struct eth_drv_sc *)ifp->driver_context;
574 struct usb_page_cache *pc;
575 uint16_t txlen;
576 uint32_t nframes, pos;
577 struct pbuf *p;
578 uint8_t ustat;
579 uint32_t txhdr;
580
581 ustat = USB_GET_STATE(xfer);
582 tr_setup:
583 switch (ustat) {
584 case USB_ST_TRANSFERRED:
585 drv_sc->state &= ~IFF_DRV_OACTIVE;
586 /* FALLTHROUGH */
587
588 case USB_ST_SETUP:
589 if (drv_sc->state & IFF_DRV_OACTIVE)
590 return;
591
592 UE_LOCK(ue);
593 IF_DEQUEUE(&(ue->ue_txq), p);
594 UE_UNLOCK(ue);
595
596 nframes = 0;
597 while (p) {
598 txlen = p->len;
599 if (txlen <= 0)
600 break;
601
602 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, nframes);
603 pos = 0;
604 pc = usbd_xfer_get_frame(xfer, nframes);
605
606 txhdr = htole32(txlen);
607 usbd_copy_in(pc, 0, &txhdr, sizeof(txhdr));
608 txhdr = 0;
609 txhdr = htole32(txhdr);
610 usbd_copy_in(pc, 4, &txhdr, sizeof(txhdr));
611 pos += 8;
612 usbd_copy_in(pc, pos, p->payload, txlen);
613 pos += txlen;
614 if ((pos % usbd_xfer_max_framelen(xfer)) == 0)
615 txhdr |= 0x80008000;
616
617 /* Set frame length. */
618 usbd_xfer_set_frame_len(xfer, nframes, pos);
619
620 uether_freebuf(p);
621 nframes++;
622 if (nframes >= 16)
623 break;
624
625 UE_LOCK(ue);
626 IF_DEQUEUE(&(ue->ue_txq), p);
627 UE_UNLOCK(ue);
628 }
629 if (nframes != 0) {
630 usbd_xfer_set_frames(xfer, nframes);
631 usbd_transfer_submit(xfer);
632 drv_sc->state |= IFF_DRV_OACTIVE;
633 }
634 break;
635
636 default: /* Error */
637 drv_sc->state &= ~IFF_DRV_OACTIVE;
638 if (error != USB_ERR_CANCELLED) {
639 /* try to clear stall first */
640 usbd_xfer_set_stall(xfer);
641 ustat = USB_ST_SETUP;
642 goto tr_setup;
643 }
644 break;
645 }
646 }
647
648 static void
axge_start(struct usb_ether * ue)649 axge_start(struct usb_ether *ue)
650 {
651 struct axge_softc *sc = ue->ue_sc;
652
653 /*
654 * start the USB transfers, if not already started:
655 */
656 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]);
657 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]);
658 }
659
660 static void
axge_init(struct usb_ether * ue)661 axge_init(struct usb_ether *ue)
662 {
663 struct axge_softc *sc = uether_getsc(ue);
664 struct los_eth_driver *ifp = ue->ue_drv_sc;
665 struct eth_drv_sc *drv_sc = (struct eth_drv_sc *)ifp->driver_context;
666 uint16_t rxmode;
667
668 drv_sc->state = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
669
670 AXGE_LOCK_ASSERT(sc, MA_OWNED);
671 if ((drv_sc->state & IFF_DRV_RUNNING) != 0)
672 return;
673
674 /* Cancel pending I/O */
675 axge_stop(ue);
676 axge_reset(sc);
677
678 /* Set MAC address. */
679 ifp->ac_if.hwaddr_len = NETIF_MAX_HWADDR_LEN;
680 (void)axge_read_mem(sc, AXGE_ACCESS_MAC, 6, AXGE_NIDR, ifp ->ac_if.hwaddr, 6);
681
682 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34);
683 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52);
684
685 ifp->ac_if.flags |= NETIF_FLAG_UP | NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET;
686
687 /* Configure TX/RX checksum offloading. */
688 axge_csum_cfg(ue);
689
690 /* Configure RX settings. */
691 rxmode = (RCR_AM | RCR_SO | RCR_DROP_CRCE);
692
693 /* If we want promiscuous mode, set the allframes bit. */
694 if (ifp->ac_if.flags & IFF_PROMISC)
695 rxmode |= RCR_PRO;
696
697 if (ifp->ac_if.flags & IFF_BROADCAST)
698 rxmode |= RCR_AB;
699
700 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
701
702 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR,
703 MMSR_PME_TYPE | MMSR_PME_POL | MMSR_RWMP);
704
705 /* Load the multicast filter. */
706 axge_setmulti(ue);
707
708 drv_sc->state |= IFF_DRV_RUNNING;
709 }
710
711 static void
axge_stop(struct usb_ether * ue)712 axge_stop(struct usb_ether *ue)
713 {
714 struct axge_softc *sc = uether_getsc(ue);
715 struct los_eth_driver *ifp = ue->ue_drv_sc;
716 struct eth_drv_sc *drv_sc = (struct eth_drv_sc *)ifp->driver_context;
717
718 AXGE_LOCK_ASSERT(sc, MA_OWNED);
719 drv_sc->state &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
720 sc->sc_flags &= ~AXE_FLAG_LINK;
721 /*
722 * stop all the transfers, if not already stopped:
723 */
724 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]);
725 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]);
726 }
727
728 #undef USB_DEBUG_VAR
729