• Home
  • Raw
  • Download

Lines Matching +full:0 +full:x80b0

128 	struct gfar __iomem *regs = priv->gfargrp[0].regs;  in gfar_init_tx_rx_base()
133 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base()
139 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_tx_rx_base()
147 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_rqprm()
152 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_rqprm()
162 priv->uses_rxfcb = 0; in gfar_rx_offload_en()
173 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_rx_config()
174 u32 rctrl = 0; in gfar_mac_rx_config()
220 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_tx_config()
221 u32 tctrl = 0; in gfar_mac_tx_config()
243 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_configure_coalescing()
247 int i = 0; in gfar_configure_coalescing()
251 gfar_write(baddr + i, 0); in gfar_configure_coalescing()
258 gfar_write(baddr + i, 0); in gfar_configure_coalescing()
266 gfar_write(&regs->txic, 0); in gfar_configure_coalescing()
267 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
268 gfar_write(&regs->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
270 gfar_write(&regs->rxic, 0); in gfar_configure_coalescing()
271 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
272 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
278 gfar_configure_coalescing(priv, 0xFF, 0xFF); in gfar_configure_coalescing_all()
284 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; in gfar_get_stats()
285 unsigned long tx_packets = 0, tx_bytes = 0; in gfar_get_stats()
288 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_get_stats()
298 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats()
316 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
329 u8 whichbit = (result >> (32 - width)) & 0x1f; in gfar_set_hash_for_addr()
345 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_mac_for_addr()
351 /* For a station address of 0x12345678ABCD in transmission in gfar_set_mac_for_addr()
352 * order (BE), MACnADDR1 is set to 0xCDAB7856 and in gfar_set_mac_for_addr()
353 * MACnADDR2 is set to 0x34120000. in gfar_set_mac_for_addr()
360 tempval = (addr[1] << 24) | (addr[0] << 16); in gfar_set_mac_for_addr()
373 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_addr()
375 return 0; in gfar_set_mac_addr()
381 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_disable()
394 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_enable()
405 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues()
416 return 0; in gfar_alloc_tx_queues()
423 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_rx_queues()
432 return 0; in gfar_alloc_rx_queues()
439 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues()
447 for (i = 0; i < priv->num_rx_queues; i++) in gfar_free_rx_queues()
455 for (i = 0; i < MAXGROUPS; i++) in unmap_group_regs()
464 for (i = 0; i < priv->num_grps; i++) in free_gfar_dev()
465 for (j = 0; j < GFAR_NUM_IRQS; j++) { in free_gfar_dev()
477 for (i = 0; i < priv->num_grps; i++) { in disable_napi()
487 for (i = 0; i < priv->num_grps; i++) { in enable_napi()
499 for (i = 0; i < GFAR_NUM_IRQS; i++) { in gfar_parse_group()
506 grp->regs = of_iomap(np, 0); in gfar_parse_group()
510 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); in gfar_parse_group()
549 grp->rx_bit_map = 0xFF; in gfar_parse_group()
550 grp->tx_bit_map = 0xFF; in gfar_parse_group()
582 return 0; in gfar_parse_group()
588 int num = 0; in gfar_of_group_count()
603 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_get_interface()
645 int err = 0, i; in gfar_of_init()
651 u32 stash_len = 0; in gfar_of_init()
652 u32 stash_idx = 0; in gfar_of_init()
674 if (num_grps == 0 || num_grps > MAXGROUPS) { in gfar_of_init()
744 priv->rx_list.count = 0; in gfar_of_init()
747 for (i = 0; i < MAXGROUPS; i++) in gfar_of_init()
775 if (err == 0) in gfar_of_init()
780 if (err == 0) in gfar_of_init()
829 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); in gfar_of_init()
843 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); in gfar_of_init()
845 return 0; in gfar_of_init()
861 u32 rqfcr = 0x0; in cluster_entry_per_class()
894 int i = 0x0; in gfar_init_filer_table()
896 u32 rqfcr = 0x0; in gfar_init_filer_table()
917 for (i = 0; i < rqfar; i++) { in gfar_init_filer_table()
929 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ in __gfar_detect_errata_83xx()
930 unsigned int rev = svr & 0xffff; in __gfar_detect_errata_83xx()
933 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || in __gfar_detect_errata_83xx()
934 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) in __gfar_detect_errata_83xx()
938 if ((pvr == 0x80850010 && mod == 0x80b0) || in __gfar_detect_errata_83xx()
939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) in __gfar_detect_errata_83xx()
943 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) in __gfar_detect_errata_83xx()
951 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) in __gfar_detect_errata_85xx()
954 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || in __gfar_detect_errata_85xx()
955 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || in __gfar_detect_errata_85xx()
956 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) in __gfar_detect_errata_85xx()
976 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", in gfar_detect_errata()
982 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_addr_hash_table()
988 priv->hash_regs[0] = &regs->igaddr0; in gfar_init_addr_hash_table()
1006 priv->extended_hash = 0; in gfar_init_addr_hash_table()
1009 priv->hash_regs[0] = &regs->gaddr0; in gfar_init_addr_hash_table()
1028 return 0; in __gfar_is_rx_idle()
1030 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are in __gfar_is_rx_idle()
1034 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); in __gfar_is_rx_idle()
1035 res &= 0x7f807f80; in __gfar_is_rx_idle()
1036 if ((res & 0xffff) == (res >> 16)) in __gfar_is_rx_idle()
1039 return 0; in __gfar_is_rx_idle()
1045 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt_nodisable()
1078 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt()
1082 gfar_write(&regs->rqueue, 0); in gfar_halt()
1083 gfar_write(&regs->tqueue, 0); in gfar_halt()
1103 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1109 txbdp->lstatus = 0; in free_skb_tx_queue()
1110 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1133 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1136 rxbdp->lstatus = 0; in free_skb_rx_queue()
1137 rxbdp->bufPtr = 0; in free_skb_rx_queue()
1164 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources()
1174 for (i = 0; i < priv->num_rx_queues; i++) { in free_skb_resources()
1183 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1184 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
1209 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start()
1211 int i = 0; in gfar_start()
1227 for (i = 0; i < priv->num_grps; i++) { in gfar_start()
1253 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in gfar_new_page()
1262 rxb->page_offset = 0; in gfar_new_page()
1305 i = 0; in gfar_alloc_rx_buffs()
1318 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_bds()
1325 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds()
1331 tx_queue->skb_curtx = 0; in gfar_init_bds()
1332 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
1336 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
1337 txbdp->lstatus = 0; in gfar_init_bds()
1338 txbdp->bufPtr = 0; in gfar_init_bds()
1349 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_bds()
1352 rx_queue->next_to_clean = 0; in gfar_init_bds()
1353 rx_queue->next_to_use = 0; in gfar_init_bds()
1354 rx_queue->next_to_alloc = 0; in gfar_init_bds()
1376 priv->total_tx_ring_size = 0; in gfar_alloc_skb_resources()
1377 for (i = 0; i < priv->num_tx_queues; i++) in gfar_alloc_skb_resources()
1380 priv->total_rx_ring_size = 0; in gfar_alloc_skb_resources()
1381 for (i = 0; i < priv->num_rx_queues; i++) in gfar_alloc_skb_resources()
1394 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1405 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1416 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1425 for (j = 0; j < tx_queue->tx_ring_size; j++) in gfar_alloc_skb_resources()
1429 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1440 return 0; in gfar_alloc_skb_resources()
1469 priv->oldlink = 0; in startup_gfar()
1470 priv->oldspeed = 0; in startup_gfar()
1479 return 0; in startup_gfar()
1486 u32 val = 0; in gfar_get_flowctrl_cfg()
1500 rmt_adv = 0; in gfar_get_flowctrl_cfg()
1519 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_update_link_state()
1579 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_update_link_state()
1591 priv->tx_actual_en = 0; in gfar_update_link_state()
1601 priv->oldlink = 0; in gfar_update_link_state()
1602 priv->oldspeed = 0; in gfar_update_link_state()
1677 * Returns 0 on success.
1681 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; in init_phy()
1695 priv->oldlink = 0; in init_phy()
1696 priv->oldspeed = 0; in init_phy()
1699 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, in init_phy()
1717 memset(&edata, 0, sizeof(struct ethtool_eee)); in init_phy()
1720 return 0; in init_phy()
1727 memset(fcb, 0, GMAC_FCB_LEN); in gfar_add_fcb()
1786 (fcb_addr % 0x20) > 0x18); in gfar_csum_errata_12()
1812 int i, rq = 0; in gfar_start_xmit()
1815 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; in gfar_start_xmit()
1874 memset(skb->data, 0, GMAC_TXPAL_LEN); in gfar_start_xmit()
1924 frag = &skb_shinfo(skb)->frags[0]; in gfar_start_xmit()
1925 for (i = 0; i < nr_frags; i++, frag++) { in gfar_start_xmit()
1940 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, in gfar_start_xmit()
2026 for (i = 0; i < nr_frags; i++) { in gfar_start_xmit()
2046 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_address()
2048 return 0; in gfar_set_mac_address()
2068 return 0; in gfar_change_mtu()
2118 priv->hwts_tx_en = 0; in gfar_hwtstamp_set()
2132 priv->hwts_rx_en = 0; in gfar_hwtstamp_set()
2148 -EFAULT : 0; in gfar_hwtstamp_set()
2156 config.flags = 0; in gfar_hwtstamp_get()
2162 -EFAULT : 0; in gfar_hwtstamp_get()
2195 int frags = 0, nr_txbds = 0; in gfar_clean_tx_ring()
2197 int howmany = 0; in gfar_clean_tx_ring()
2199 unsigned int bytes_sent = 0; in gfar_clean_tx_ring()
2244 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & in gfar_clean_tx_ring()
2245 ~0x7UL); in gfar_clean_tx_ring()
2247 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); in gfar_clean_tx_ring()
2258 for (i = 0; i < frags; i++) { in gfar_clean_tx_ring()
2398 WARN(size < 0, "gianfar: rx fragment size underflow"); in gfar_add_rx_frag()
2399 if (size < 0) in gfar_add_rx_frag()
2429 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; in gfar_reuse_rx_page()
2510 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); in gfar_process_frame()
2543 int i, howmany = 0; in gfar_clean_rx_ring()
2546 unsigned int total_bytes = 0, total_pkts = 0; in gfar_clean_rx_ring()
2556 cleaned_cnt = 0; in gfar_clean_rx_ring()
2587 i = 0; in gfar_clean_rx_ring()
2646 int work_done = 0; in gfar_poll_rx_sq()
2696 return 0; in gfar_poll_tx_sq()
2706 int work_done = 0, work_done_per_q = 0; in gfar_poll_rx()
2707 int i, budget_per_q = 0; in gfar_poll_rx()
2768 int has_tx_work = 0; in gfar_poll_tx()
2796 return 0; in gfar_poll_tx()
2821 "error interrupt (ievent=0x%08x imask=0x%08x)\n", in gfar_error()
2904 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2916 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2947 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, in register_grp_irqs()
2949 if (err < 0) { in register_grp_irqs()
2957 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, in register_grp_irqs()
2959 if (err < 0) { in register_grp_irqs()
2964 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, in register_grp_irqs()
2966 if (err < 0) { in register_grp_irqs()
2974 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, in register_grp_irqs()
2976 if (err < 0) { in register_grp_irqs()
2984 return 0; in register_grp_irqs()
3001 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
3004 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
3014 for (i = 0; i < priv->num_grps; i++) { in gfar_request_irq()
3017 for (j = 0; j < i; j++) in gfar_request_irq()
3023 return 0; in gfar_request_irq()
3027 * Returns 0 for success.
3062 return 0; in gfar_close()
3071 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; in gfar_clear_exact_match()
3086 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_multi()
3103 gfar_write(&regs->igaddr0, 0xffffffff); in gfar_set_multi()
3104 gfar_write(&regs->igaddr1, 0xffffffff); in gfar_set_multi()
3105 gfar_write(&regs->igaddr2, 0xffffffff); in gfar_set_multi()
3106 gfar_write(&regs->igaddr3, 0xffffffff); in gfar_set_multi()
3107 gfar_write(&regs->igaddr4, 0xffffffff); in gfar_set_multi()
3108 gfar_write(&regs->igaddr5, 0xffffffff); in gfar_set_multi()
3109 gfar_write(&regs->igaddr6, 0xffffffff); in gfar_set_multi()
3110 gfar_write(&regs->igaddr7, 0xffffffff); in gfar_set_multi()
3111 gfar_write(&regs->gaddr0, 0xffffffff); in gfar_set_multi()
3112 gfar_write(&regs->gaddr1, 0xffffffff); in gfar_set_multi()
3113 gfar_write(&regs->gaddr2, 0xffffffff); in gfar_set_multi()
3114 gfar_write(&regs->gaddr3, 0xffffffff); in gfar_set_multi()
3115 gfar_write(&regs->gaddr4, 0xffffffff); in gfar_set_multi()
3116 gfar_write(&regs->gaddr5, 0xffffffff); in gfar_set_multi()
3117 gfar_write(&regs->gaddr6, 0xffffffff); in gfar_set_multi()
3118 gfar_write(&regs->gaddr7, 0xffffffff); in gfar_set_multi()
3124 gfar_write(&regs->igaddr0, 0x0); in gfar_set_multi()
3125 gfar_write(&regs->igaddr1, 0x0); in gfar_set_multi()
3126 gfar_write(&regs->igaddr2, 0x0); in gfar_set_multi()
3127 gfar_write(&regs->igaddr3, 0x0); in gfar_set_multi()
3128 gfar_write(&regs->igaddr4, 0x0); in gfar_set_multi()
3129 gfar_write(&regs->igaddr5, 0x0); in gfar_set_multi()
3130 gfar_write(&regs->igaddr6, 0x0); in gfar_set_multi()
3131 gfar_write(&regs->igaddr7, 0x0); in gfar_set_multi()
3132 gfar_write(&regs->gaddr0, 0x0); in gfar_set_multi()
3133 gfar_write(&regs->gaddr1, 0x0); in gfar_set_multi()
3134 gfar_write(&regs->gaddr2, 0x0); in gfar_set_multi()
3135 gfar_write(&regs->gaddr3, 0x0); in gfar_set_multi()
3136 gfar_write(&regs->gaddr4, 0x0); in gfar_set_multi()
3137 gfar_write(&regs->gaddr5, 0x0); in gfar_set_multi()
3138 gfar_write(&regs->gaddr6, 0x0); in gfar_set_multi()
3139 gfar_write(&regs->gaddr7, 0x0); in gfar_set_multi()
3150 idx = 0; in gfar_set_multi()
3151 em_num = 0; in gfar_set_multi()
3170 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_reset()
3182 gfar_write(&regs->maccfg1, 0); in gfar_mac_reset()
3208 gfar_write(&regs->igaddr0, 0); in gfar_mac_reset()
3209 gfar_write(&regs->igaddr1, 0); in gfar_mac_reset()
3210 gfar_write(&regs->igaddr2, 0); in gfar_mac_reset()
3211 gfar_write(&regs->igaddr3, 0); in gfar_mac_reset()
3212 gfar_write(&regs->igaddr4, 0); in gfar_mac_reset()
3213 gfar_write(&regs->igaddr5, 0); in gfar_mac_reset()
3214 gfar_write(&regs->igaddr6, 0); in gfar_mac_reset()
3215 gfar_write(&regs->igaddr7, 0); in gfar_mac_reset()
3217 gfar_write(&regs->gaddr0, 0); in gfar_mac_reset()
3218 gfar_write(&regs->gaddr1, 0); in gfar_mac_reset()
3219 gfar_write(&regs->gaddr2, 0); in gfar_mac_reset()
3220 gfar_write(&regs->gaddr3, 0); in gfar_mac_reset()
3221 gfar_write(&regs->gaddr4, 0); in gfar_mac_reset()
3222 gfar_write(&regs->gaddr5, 0); in gfar_mac_reset()
3223 gfar_write(&regs->gaddr6, 0); in gfar_mac_reset()
3224 gfar_write(&regs->gaddr7, 0); in gfar_mac_reset()
3246 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_hw_init()
3258 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); in gfar_hw_init()
3261 gfar_write(&regs->rmon.cam1, 0xffffffff); in gfar_hw_init()
3262 gfar_write(&regs->rmon.cam2, 0xffffffff); in gfar_hw_init()
3282 if (priv->rx_stash_size != 0) in gfar_hw_init()
3323 int err = 0, i; in gfar_probe()
3343 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; in gfar_probe()
3355 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3397 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_probe()
3404 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_probe()
3412 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; in gfar_probe()
3443 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3447 dev->name, "_g", '0' + i, "_tx"); in gfar_probe()
3449 dev->name, "_g", '0' + i, "_rx"); in gfar_probe()
3451 dev->name, "_g", '0' + i, "_er"); in gfar_probe()
3466 for (i = 0; i < priv->num_rx_queues; i++) in gfar_probe()
3469 for (i = 0; i < priv->num_tx_queues; i++) in gfar_probe()
3473 return 0; in gfar_probe()
3505 return 0; in gfar_remove()
3512 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_disable()
3522 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_enable()
3540 for (i = 0; i <= MAX_FILER_IDX; i++) in gfar_filer_config_wol()
3541 gfar_write_filer(priv, i, rqfcr, 0); in gfar_filer_config_wol()
3543 i = 0; in gfar_filer_config_wol()
3548 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
3549 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | in gfar_filer_config_wol()
3576 for (i = 0; i <= MAX_FILER_IDX; i++) { in gfar_filer_restore_table()
3588 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start_wol_filer()
3590 int i = 0; in gfar_start_wol_filer()
3605 for (i = 0; i < priv->num_grps; i++) { in gfar_start_wol_filer()
3623 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_suspend()
3628 return 0; in gfar_suspend()
3659 return 0; in gfar_suspend()
3666 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_resume()
3671 return 0; in gfar_resume()
3693 return 0; in gfar_resume()
3704 return 0; in gfar_restore()
3715 priv->oldlink = 0; in gfar_restore()
3716 priv->oldspeed = 0; in gfar_restore()
3725 return 0; in gfar_restore()