• Home
  • Raw
  • Download

Lines Matching +full:0 +full:x80b0

127 	struct gfar __iomem *regs = priv->gfargrp[0].regs;  in gfar_init_tx_rx_base()
132 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base()
138 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_tx_rx_base()
146 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_rqprm()
151 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_rqprm()
161 priv->uses_rxfcb = 0; in gfar_rx_offload_en()
172 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_rx_config()
173 u32 rctrl = 0; in gfar_mac_rx_config()
216 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_tx_config()
217 u32 tctrl = 0; in gfar_mac_tx_config()
239 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_configure_coalescing()
243 int i = 0; in gfar_configure_coalescing()
247 gfar_write(baddr + i, 0); in gfar_configure_coalescing()
254 gfar_write(baddr + i, 0); in gfar_configure_coalescing()
262 gfar_write(&regs->txic, 0); in gfar_configure_coalescing()
263 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
264 gfar_write(&regs->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
266 gfar_write(&regs->rxic, 0); in gfar_configure_coalescing()
267 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
268 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
274 gfar_configure_coalescing(priv, 0xFF, 0xFF); in gfar_configure_coalescing_all()
282 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_get_stats64()
288 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64()
294 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_get_stats64()
324 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
337 u8 whichbit = (result >> (32 - width)) & 0x1f; in gfar_set_hash_for_addr()
353 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_mac_for_addr()
359 /* For a station address of 0x12345678ABCD in transmission in gfar_set_mac_for_addr()
360 * order (BE), MACnADDR1 is set to 0xCDAB7856 and in gfar_set_mac_for_addr()
361 * MACnADDR2 is set to 0x34120000. in gfar_set_mac_for_addr()
368 tempval = (addr[1] << 24) | (addr[0] << 16); in gfar_set_mac_for_addr()
381 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_addr()
383 return 0; in gfar_set_mac_addr()
389 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_disable()
402 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_enable()
414 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues()
425 return 0; in gfar_alloc_tx_queues()
432 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_rx_queues()
441 return 0; in gfar_alloc_rx_queues()
448 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues()
456 for (i = 0; i < priv->num_rx_queues; i++) in gfar_free_rx_queues()
464 for (i = 0; i < MAXGROUPS; i++) in unmap_group_regs()
473 for (i = 0; i < priv->num_grps; i++) in free_gfar_dev()
474 for (j = 0; j < GFAR_NUM_IRQS; j++) { in free_gfar_dev()
486 for (i = 0; i < priv->num_grps; i++) { in disable_napi()
496 for (i = 0; i < priv->num_grps; i++) { in enable_napi()
508 for (i = 0; i < GFAR_NUM_IRQS; i++) { in gfar_parse_group()
515 grp->regs = of_iomap(np, 0); in gfar_parse_group()
519 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); in gfar_parse_group()
538 grp->rx_bit_map = 0xFF; in gfar_parse_group()
539 grp->tx_bit_map = 0xFF; in gfar_parse_group()
571 return 0; in gfar_parse_group()
577 int num = 0; in gfar_of_group_count()
592 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_get_interface()
633 int err = 0, i; in gfar_of_init()
639 u32 stash_len = 0; in gfar_of_init()
640 u32 stash_idx = 0; in gfar_of_init()
659 if (num_grps == 0 || num_grps > MAXGROUPS) { in gfar_of_init()
714 priv->rx_list.count = 0; in gfar_of_init()
717 for (i = 0; i < MAXGROUPS; i++) in gfar_of_init()
745 if (err == 0) in gfar_of_init()
750 if (err == 0) in gfar_of_init()
796 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); in gfar_of_init()
810 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); in gfar_of_init()
812 return 0; in gfar_of_init()
828 u32 rqfcr = 0x0; in cluster_entry_per_class()
861 int i = 0x0; in gfar_init_filer_table()
863 u32 rqfcr = 0x0; in gfar_init_filer_table()
884 for (i = 0; i < rqfar; i++) { in gfar_init_filer_table()
896 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ in __gfar_detect_errata_83xx()
897 unsigned int rev = svr & 0xffff; in __gfar_detect_errata_83xx()
900 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || in __gfar_detect_errata_83xx()
901 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) in __gfar_detect_errata_83xx()
905 if ((pvr == 0x80850010 && mod == 0x80b0) || in __gfar_detect_errata_83xx()
906 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) in __gfar_detect_errata_83xx()
910 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) in __gfar_detect_errata_83xx()
918 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) in __gfar_detect_errata_85xx()
921 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || in __gfar_detect_errata_85xx()
922 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || in __gfar_detect_errata_85xx()
923 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) in __gfar_detect_errata_85xx()
943 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", in gfar_detect_errata()
949 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_addr_hash_table()
955 priv->hash_regs[0] = &regs->igaddr0; in gfar_init_addr_hash_table()
973 priv->extended_hash = 0; in gfar_init_addr_hash_table()
976 priv->hash_regs[0] = &regs->gaddr0; in gfar_init_addr_hash_table()
995 return 0; in __gfar_is_rx_idle()
997 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are in __gfar_is_rx_idle()
1001 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); in __gfar_is_rx_idle()
1002 res &= 0x7f807f80; in __gfar_is_rx_idle()
1003 if ((res & 0xffff) == (res >> 16)) in __gfar_is_rx_idle()
1006 return 0; in __gfar_is_rx_idle()
1012 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt_nodisable()
1045 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt()
1049 gfar_write(&regs->rqueue, 0); in gfar_halt()
1050 gfar_write(&regs->tqueue, 0); in gfar_halt()
1070 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1076 txbdp->lstatus = 0; in free_skb_tx_queue()
1077 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1100 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1103 rxbdp->lstatus = 0; in free_skb_rx_queue()
1104 rxbdp->bufPtr = 0; in free_skb_rx_queue()
1131 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources()
1141 for (i = 0; i < priv->num_rx_queues; i++) { in free_skb_resources()
1150 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1151 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
1176 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start()
1178 int i = 0; in gfar_start()
1194 for (i = 0; i < priv->num_grps; i++) { in gfar_start()
1220 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in gfar_new_page()
1229 rxb->page_offset = 0; in gfar_new_page()
1272 i = 0; in gfar_alloc_rx_buffs()
1285 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_bds()
1292 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds()
1298 tx_queue->skb_curtx = 0; in gfar_init_bds()
1299 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
1303 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
1304 txbdp->lstatus = 0; in gfar_init_bds()
1305 txbdp->bufPtr = 0; in gfar_init_bds()
1316 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_bds()
1319 rx_queue->next_to_clean = 0; in gfar_init_bds()
1320 rx_queue->next_to_use = 0; in gfar_init_bds()
1321 rx_queue->next_to_alloc = 0; in gfar_init_bds()
1343 priv->total_tx_ring_size = 0; in gfar_alloc_skb_resources()
1344 for (i = 0; i < priv->num_tx_queues; i++) in gfar_alloc_skb_resources()
1347 priv->total_rx_ring_size = 0; in gfar_alloc_skb_resources()
1348 for (i = 0; i < priv->num_rx_queues; i++) in gfar_alloc_skb_resources()
1361 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1372 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1383 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1392 for (j = 0; j < tx_queue->tx_ring_size; j++) in gfar_alloc_skb_resources()
1396 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1407 return 0; in gfar_alloc_skb_resources()
1436 priv->oldlink = 0; in startup_gfar()
1437 priv->oldspeed = 0; in startup_gfar()
1446 return 0; in startup_gfar()
1453 u32 val = 0; in gfar_get_flowctrl_cfg()
1467 rmt_adv = 0; in gfar_get_flowctrl_cfg()
1486 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_update_link_state()
1546 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_update_link_state()
1558 priv->tx_actual_en = 0; in gfar_update_link_state()
1568 priv->oldlink = 0; in gfar_update_link_state()
1569 priv->oldspeed = 0; in gfar_update_link_state()
1644 * Returns 0 on success.
1648 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; in init_phy()
1662 priv->oldlink = 0; in init_phy()
1663 priv->oldspeed = 0; in init_phy()
1666 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, in init_phy()
1684 memset(&edata, 0, sizeof(struct ethtool_eee)); in init_phy()
1687 return 0; in init_phy()
1694 memset(fcb, 0, GMAC_FCB_LEN); in gfar_add_fcb()
1753 (fcb_addr % 0x20) > 0x18); in gfar_csum_errata_12()
1779 int i, rq = 0; in gfar_start_xmit()
1782 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; in gfar_start_xmit()
1841 memset(skb->data, 0, GMAC_TXPAL_LEN); in gfar_start_xmit()
1891 frag = &skb_shinfo(skb)->frags[0]; in gfar_start_xmit()
1892 for (i = 0; i < nr_frags; i++, frag++) { in gfar_start_xmit()
1907 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, in gfar_start_xmit()
1994 for (i = 0; i < nr_frags; i++) { in gfar_start_xmit()
2014 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_address()
2016 return 0; in gfar_set_mac_address()
2036 return 0; in gfar_change_mtu()
2082 priv->hwts_tx_en = 0; in gfar_hwtstamp_set()
2096 priv->hwts_rx_en = 0; in gfar_hwtstamp_set()
2112 -EFAULT : 0; in gfar_hwtstamp_set()
2120 config.flags = 0; in gfar_hwtstamp_get()
2126 -EFAULT : 0; in gfar_hwtstamp_get()
2159 int frags = 0, nr_txbds = 0; in gfar_clean_tx_ring()
2161 int howmany = 0; in gfar_clean_tx_ring()
2163 unsigned int bytes_sent = 0; in gfar_clean_tx_ring()
2208 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & in gfar_clean_tx_ring()
2209 ~0x7UL); in gfar_clean_tx_ring()
2211 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); in gfar_clean_tx_ring()
2222 for (i = 0; i < frags; i++) { in gfar_clean_tx_ring()
2362 WARN(size < 0, "gianfar: rx fragment size underflow"); in gfar_add_rx_frag()
2363 if (size < 0) in gfar_add_rx_frag()
2393 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; in gfar_reuse_rx_page()
2474 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); in gfar_process_frame()
2507 int i, howmany = 0; in gfar_clean_rx_ring()
2510 unsigned int total_bytes = 0, total_pkts = 0; in gfar_clean_rx_ring()
2520 cleaned_cnt = 0; in gfar_clean_rx_ring()
2551 i = 0; in gfar_clean_rx_ring()
2610 int work_done = 0; in gfar_poll_rx_sq()
2660 return 0; in gfar_poll_tx_sq()
2685 "error interrupt (ievent=0x%08x imask=0x%08x)\n", in gfar_error()
2780 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2792 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2823 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, in register_grp_irqs()
2825 if (err < 0) { in register_grp_irqs()
2833 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, in register_grp_irqs()
2835 if (err < 0) { in register_grp_irqs()
2840 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, in register_grp_irqs()
2842 if (err < 0) { in register_grp_irqs()
2850 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, in register_grp_irqs()
2852 if (err < 0) { in register_grp_irqs()
2860 return 0; in register_grp_irqs()
2877 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2880 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2890 for (i = 0; i < priv->num_grps; i++) { in gfar_request_irq()
2893 for (j = 0; j < i; j++) in gfar_request_irq()
2899 return 0; in gfar_request_irq()
2903 * Returns 0 for success.
2938 return 0; in gfar_close()
2947 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; in gfar_clear_exact_match()
2962 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_multi()
2979 gfar_write(&regs->igaddr0, 0xffffffff); in gfar_set_multi()
2980 gfar_write(&regs->igaddr1, 0xffffffff); in gfar_set_multi()
2981 gfar_write(&regs->igaddr2, 0xffffffff); in gfar_set_multi()
2982 gfar_write(&regs->igaddr3, 0xffffffff); in gfar_set_multi()
2983 gfar_write(&regs->igaddr4, 0xffffffff); in gfar_set_multi()
2984 gfar_write(&regs->igaddr5, 0xffffffff); in gfar_set_multi()
2985 gfar_write(&regs->igaddr6, 0xffffffff); in gfar_set_multi()
2986 gfar_write(&regs->igaddr7, 0xffffffff); in gfar_set_multi()
2987 gfar_write(&regs->gaddr0, 0xffffffff); in gfar_set_multi()
2988 gfar_write(&regs->gaddr1, 0xffffffff); in gfar_set_multi()
2989 gfar_write(&regs->gaddr2, 0xffffffff); in gfar_set_multi()
2990 gfar_write(&regs->gaddr3, 0xffffffff); in gfar_set_multi()
2991 gfar_write(&regs->gaddr4, 0xffffffff); in gfar_set_multi()
2992 gfar_write(&regs->gaddr5, 0xffffffff); in gfar_set_multi()
2993 gfar_write(&regs->gaddr6, 0xffffffff); in gfar_set_multi()
2994 gfar_write(&regs->gaddr7, 0xffffffff); in gfar_set_multi()
3000 gfar_write(&regs->igaddr0, 0x0); in gfar_set_multi()
3001 gfar_write(&regs->igaddr1, 0x0); in gfar_set_multi()
3002 gfar_write(&regs->igaddr2, 0x0); in gfar_set_multi()
3003 gfar_write(&regs->igaddr3, 0x0); in gfar_set_multi()
3004 gfar_write(&regs->igaddr4, 0x0); in gfar_set_multi()
3005 gfar_write(&regs->igaddr5, 0x0); in gfar_set_multi()
3006 gfar_write(&regs->igaddr6, 0x0); in gfar_set_multi()
3007 gfar_write(&regs->igaddr7, 0x0); in gfar_set_multi()
3008 gfar_write(&regs->gaddr0, 0x0); in gfar_set_multi()
3009 gfar_write(&regs->gaddr1, 0x0); in gfar_set_multi()
3010 gfar_write(&regs->gaddr2, 0x0); in gfar_set_multi()
3011 gfar_write(&regs->gaddr3, 0x0); in gfar_set_multi()
3012 gfar_write(&regs->gaddr4, 0x0); in gfar_set_multi()
3013 gfar_write(&regs->gaddr5, 0x0); in gfar_set_multi()
3014 gfar_write(&regs->gaddr6, 0x0); in gfar_set_multi()
3015 gfar_write(&regs->gaddr7, 0x0); in gfar_set_multi()
3026 idx = 0; in gfar_set_multi()
3027 em_num = 0; in gfar_set_multi()
3046 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_reset()
3058 gfar_write(&regs->maccfg1, 0); in gfar_mac_reset()
3084 gfar_write(&regs->igaddr0, 0); in gfar_mac_reset()
3085 gfar_write(&regs->igaddr1, 0); in gfar_mac_reset()
3086 gfar_write(&regs->igaddr2, 0); in gfar_mac_reset()
3087 gfar_write(&regs->igaddr3, 0); in gfar_mac_reset()
3088 gfar_write(&regs->igaddr4, 0); in gfar_mac_reset()
3089 gfar_write(&regs->igaddr5, 0); in gfar_mac_reset()
3090 gfar_write(&regs->igaddr6, 0); in gfar_mac_reset()
3091 gfar_write(&regs->igaddr7, 0); in gfar_mac_reset()
3093 gfar_write(&regs->gaddr0, 0); in gfar_mac_reset()
3094 gfar_write(&regs->gaddr1, 0); in gfar_mac_reset()
3095 gfar_write(&regs->gaddr2, 0); in gfar_mac_reset()
3096 gfar_write(&regs->gaddr3, 0); in gfar_mac_reset()
3097 gfar_write(&regs->gaddr4, 0); in gfar_mac_reset()
3098 gfar_write(&regs->gaddr5, 0); in gfar_mac_reset()
3099 gfar_write(&regs->gaddr6, 0); in gfar_mac_reset()
3100 gfar_write(&regs->gaddr7, 0); in gfar_mac_reset()
3122 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_hw_init()
3134 memset_io(&regs->rmon, 0, offsetof(struct rmon_mib, car1)); in gfar_hw_init()
3137 gfar_write(&regs->rmon.cam1, 0xffffffff); in gfar_hw_init()
3138 gfar_write(&regs->rmon.cam2, 0xffffffff); in gfar_hw_init()
3140 gfar_write(&regs->rmon.car1, 0xffffffff); in gfar_hw_init()
3141 gfar_write(&regs->rmon.car2, 0xffffffff); in gfar_hw_init()
3161 if (priv->rx_stash_size != 0) in gfar_hw_init()
3202 int err = 0, i; in gfar_probe()
3222 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; in gfar_probe()
3234 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3269 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_probe()
3276 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_probe()
3284 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; in gfar_probe()
3296 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_probe()
3323 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3327 dev->name, "_g", '0' + i, "_tx"); in gfar_probe()
3329 dev->name, "_g", '0' + i, "_rx"); in gfar_probe()
3331 dev->name, "_g", '0' + i, "_er"); in gfar_probe()
3346 for (i = 0; i < priv->num_rx_queues; i++) in gfar_probe()
3349 for (i = 0; i < priv->num_tx_queues; i++) in gfar_probe()
3353 return 0; in gfar_probe()
3390 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_disable()
3400 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_enable()
3418 for (i = 0; i <= MAX_FILER_IDX; i++) in gfar_filer_config_wol()
3419 gfar_write_filer(priv, i, rqfcr, 0); in gfar_filer_config_wol()
3421 i = 0; in gfar_filer_config_wol()
3426 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
3427 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | in gfar_filer_config_wol()
3454 for (i = 0; i <= MAX_FILER_IDX; i++) { in gfar_filer_restore_table()
3466 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start_wol_filer()
3468 int i = 0; in gfar_start_wol_filer()
3483 for (i = 0; i < priv->num_grps; i++) { in gfar_start_wol_filer()
3501 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_suspend()
3506 return 0; in gfar_suspend()
3537 return 0; in gfar_suspend()
3544 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_resume()
3549 return 0; in gfar_resume()
3571 return 0; in gfar_resume()
3582 return 0; in gfar_restore()
3593 priv->oldlink = 0; in gfar_restore()
3594 priv->oldspeed = 0; in gfar_restore()
3603 return 0; in gfar_restore()