Home
last modified time | relevance | path

Searched refs:RX_RING_SIZE (Results 1 – 25 of 48) sorted by relevance

12

/drivers/net/ethernet/sun/
Dsungem.h883 #define RX_RING_SIZE 128 macro
907 #if RX_RING_SIZE == 32
909 #elif RX_RING_SIZE == 64
911 #elif RX_RING_SIZE == 128
913 #elif RX_RING_SIZE == 256
915 #elif RX_RING_SIZE == 512
917 #elif RX_RING_SIZE == 1024
919 #elif RX_RING_SIZE == 2048
921 #elif RX_RING_SIZE == 4096
923 #elif RX_RING_SIZE == 8192
[all …]
Dsunhme.h331 #define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */ macro
341 #if (RX_RING_SIZE == 32)
344 #if (RX_RING_SIZE == 64)
347 #if (RX_RING_SIZE == 128)
350 #if (RX_RING_SIZE == 256)
353 #error RX_RING_SIZE holds illegal value
359 #define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
361 #define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
416 struct sk_buff *rx_skbs[RX_RING_SIZE];
Dsunbmac.h251 #define RX_RING_SIZE 256 macro
253 #define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
255 #define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
298 struct sk_buff *rx_skbs[RX_RING_SIZE];
/drivers/net/ethernet/dec/tulip/
Dinterrupt.c67 entry = tp->dirty_rx % RX_RING_SIZE; in tulip_refill_rx()
110 int entry = tp->cur_rx % RX_RING_SIZE; in tulip_poll()
121 if (budget >=RX_RING_SIZE) budget--; in tulip_poll()
142 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) in tulip_poll()
254 entry = (++tp->cur_rx) % RX_RING_SIZE; in tulip_poll()
255 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) in tulip_poll()
311 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
333 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || in tulip_poll()
334 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
337 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
[all …]
Dwinbond-840.c296 dma_addr_t rx_addr[RX_RING_SIZE];
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
805 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; in init_rxtx_rings()
808 for (i = 0; i < RX_RING_SIZE; i++) { in init_rxtx_rings()
817 for (i = 0; i < RX_RING_SIZE; i++) { in init_rxtx_rings()
830 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_rxtx_rings()
841 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, in init_rxtx_rings()
850 for (i = 0; i < RX_RING_SIZE; i++) { in free_rxtx_rings()
942 for (i = 0; i < RX_RING_SIZE; i++) in tx_timeout()
984 sizeof(struct w840_rx_desc)*RX_RING_SIZE + in alloc_ringdesc()
[all …]
/drivers/net/ethernet/amd/
D7990.h39 #define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS) macro
41 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
88 volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
91 volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
Dariadne.c80 #define RX_RING_SIZE 16 macro
88 volatile struct RDRE *rx_ring[RX_RING_SIZE];
90 volatile u_short *rx_buff[RX_RING_SIZE];
100 struct RDRE rx_ring[RX_RING_SIZE];
102 u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
144 for (i = 0; i < RX_RING_SIZE; i++) { in ariadne_init_ring()
163 int entry = priv->cur_rx % RX_RING_SIZE; in ariadne_rx()
197 for (i = 0; i < RX_RING_SIZE; i++) in ariadne_rx()
198 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) in ariadne_rx()
201 if (i > RX_RING_SIZE - 2) { in ariadne_rx()
[all …]
Ddeclance.c158 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) macro
159 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
221 struct lance_rx_desc brx_ring[RX_RING_SIZE];
265 char *rx_buf_ptr_cpu[RX_RING_SIZE];
269 uint rx_buf_ptr_lnc[RX_RING_SIZE];
509 for (i = 0; i < RX_RING_SIZE; i++) { in lance_init_ring()
568 for (i = 0; i < RX_RING_SIZE; i++) { in lance_rx()
1086 for (i = 0; i < RX_RING_SIZE; i++) { in dec_lance_probe()
1096 2 * RX_RING_SIZE * RX_BUFF_SIZE + in dec_lance_probe()
1100 RX_RING_SIZE * RX_BUFF_SIZE + in dec_lance_probe()
[all …]
Da2065.c72 #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) macro
75 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
95 struct lance_rx_desc brx_ring[RX_RING_SIZE];
98 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
253 char buf[RX_RING_SIZE + 1]; in lance_rx()
255 for (i = 0; i < RX_RING_SIZE; i++) { in lance_rx()
262 buf[RX_RING_SIZE] = 0; in lance_rx()
Dlance.c197 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) macro
198 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
237 struct lance_rx_head rx_ring[RX_RING_SIZE];
244 struct sk_buff* rx_skbuff[RX_RING_SIZE];
555 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, in lance_probe1()
844 for (i = 0; i < RX_RING_SIZE; i++) { in lance_purge_ring()
870 for (i = 0; i < RX_RING_SIZE; i++) { in lance_init_ring()
934 for (i = 0; i < RX_RING_SIZE; i++) in lance_tx_timeout()
1190 for (i=0; i < RX_RING_SIZE; i++) in lance_rx()
1194 if (i > RX_RING_SIZE -2) in lance_rx()
Datarilance.c115 #define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) macro
117 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
158 struct lance_rx_head rx_head[RX_RING_SIZE];
718 for( i = 0; i < RX_RING_SIZE; i++ ) { in lance_init_ring()
754 for( i = 0 ; i < RX_RING_SIZE; i++ ) in lance_tx_timeout()
1004 for( i = 0; i < RX_RING_SIZE; i++ ) in lance_rx()
1009 if (i > RX_RING_SIZE - 2) { in lance_rx()
Dsun3lance.c97 #define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) macro
99 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
142 struct lance_rx_head rx_head[RX_RING_SIZE];
143 char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
473 for( i = 0; i < RX_RING_SIZE; i++ ) { in lance_init_ring()
546 for( i = 0 ; i < RX_RING_SIZE; i++ ) in lance_start_xmit()
/drivers/net/ethernet/packetengines/
Dhamachi.c120 #define RX_RING_SIZE 512 macro
122 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc)
486 struct sk_buff* rx_skbuff[RX_RING_SIZE];
1066 for (i = 0; i < RX_RING_SIZE; i++) in hamachi_tx_timeout()
1088 for (i = 0; i < RX_RING_SIZE; i++) in hamachi_tx_timeout()
1124 for (i = 0; i < RX_RING_SIZE; i++){ in hamachi_tx_timeout()
1136 for (i = 0; i < RX_RING_SIZE; i++) { in hamachi_tx_timeout()
1149 hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in hamachi_tx_timeout()
1151 hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); in hamachi_tx_timeout()
1185 for (i = 0; i < RX_RING_SIZE; i++) { in hamachi_init_ring()
[all …]
Dyellowfin.c75 #define RX_RING_SIZE 64 macro
78 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
311 struct sk_buff* rx_skbuff[RX_RING_SIZE];
702 for (i = 0; i < RX_RING_SIZE; i++) in yellowfin_tx_timeout()
738 for (i = 0; i < RX_RING_SIZE; i++) { in yellowfin_init_ring()
742 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc)); in yellowfin_init_ring()
745 for (i = 0; i < RX_RING_SIZE; i++) { in yellowfin_init_ring()
754 if (i != RX_RING_SIZE) { in yellowfin_init_ring()
760 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in yellowfin_init_ring()
1044 int entry = yp->cur_rx % RX_RING_SIZE; in yellowfin_rx()
[all …]
/drivers/net/ethernet/pasemi/
Dpasemi_mac.h30 #define RX_RING_SIZE 2048 macro
113 #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
114 #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
115 #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
Dpasemi_mac.c431 ring->size = RX_RING_SIZE; in pasemi_mac_setup_rx_resources()
433 RX_RING_SIZE, GFP_KERNEL); in pasemi_mac_setup_rx_resources()
439 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) in pasemi_mac_setup_rx_resources()
443 RX_RING_SIZE * sizeof(u64), in pasemi_mac_setup_rx_resources()
448 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); in pasemi_mac_setup_rx_resources()
455 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); in pasemi_mac_setup_rx_resources()
469 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); in pasemi_mac_setup_rx_resources()
597 for (i = 0; i < RX_RING_SIZE; i++) { in pasemi_mac_free_rx_buffers()
610 for (i = 0; i < RX_RING_SIZE; i++) in pasemi_mac_free_rx_buffers()
618 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), in pasemi_mac_free_rx_resources()
[all …]
/drivers/net/wan/
Ddscc4.c165 #define RX_RING_SIZE 32 macro
167 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
200 struct sk_buff *rx_skbuff[RX_RING_SIZE];
441 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); in dscc4_rx_update()
507 for (i = 0; i < RX_RING_SIZE; i++) { in dscc4_release_ring()
521 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; in try_get_rx_skb()
648 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; in dscc4_rx_skb()
653 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; in dscc4_rx_skb()
680 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { in dscc4_rx_skb()
1664 (dpriv->rx_current%RX_RING_SIZE)* in dscc4_tx_irq()
[all …]
/drivers/net/ethernet/smsc/
Depic100.c55 #define RX_RING_SIZE 256 macro
57 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
262 struct sk_buff* rx_skbuff[RX_RING_SIZE];
787 epic_rx(dev, RX_RING_SIZE); in epic_pause()
822 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* in epic_restart()
925 for (i = 0; i < RX_RING_SIZE; i++) { in epic_init_ring()
936 for (i = 0; i < RX_RING_SIZE; i++) { in epic_init_ring()
946 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in epic_init_ring()
1157 int entry = ep->cur_rx % RX_RING_SIZE; in epic_rx()
1158 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; in epic_rx()
[all …]
Dsmsc9420.c596 for (i = 0; i < RX_RING_SIZE; i++) { in smsc9420_free_rx_ring()
875 pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; in smsc9420_alloc_new_rx_buffers()
897 pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; in smsc9420_rx_poll()
1284 RX_RING_SIZE), GFP_KERNEL); in smsc9420_alloc_rx_ring()
1291 for (i = 0; i < RX_RING_SIZE; i++) { in smsc9420_alloc_rx_ring()
1298 pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); in smsc9420_alloc_rx_ring()
1301 for (i = 0; i < RX_RING_SIZE; i++) { in smsc9420_alloc_rx_ring()
1634 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE + in smsc9420_probe()
1643 (pd->rx_ring + RX_RING_SIZE); in smsc9420_probe()
1645 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; in smsc9420_probe()
[all …]
/drivers/net/ethernet/dlink/
Ddl2k.h41 #define RX_RING_SIZE 256 macro
43 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
382 struct sk_buff *rx_skbuff[RX_RING_SIZE];
Dsundance.c70 #define RX_RING_SIZE 64 macro
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
364 struct sk_buff* rx_skbuff[RX_RING_SIZE];
1013 for (i = 0; i < RX_RING_SIZE; i++) { in init_ring()
1015 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); in init_ring()
1022 for (i = 0; i < RX_RING_SIZE; i++) { in init_ring()
1040 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1311 int entry = np->cur_rx % RX_RING_SIZE; in rx_poll()
1382 entry = (entry + 1) % RX_RING_SIZE; in rx_poll()
1409 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; in refill_rx()
[all …]
/drivers/net/ethernet/natsemi/
Dnatsemi.c104 #define RX_RING_SIZE 32 macro
544 struct sk_buff *rx_skbuff[RX_RING_SIZE];
545 dma_addr_t rx_dma[RX_RING_SIZE];
1723 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), in init_registers()
1875 for (i = 0; i < RX_RING_SIZE; i++) { in dump_ring()
1919 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), in alloc_ring()
1923 np->tx_ring = &np->rx_ring[RX_RING_SIZE]; in alloc_ring()
1934 int entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1947 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { in refill_rx()
1975 *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); in init_ring()
[all …]
/drivers/net/ethernet/via/
Dvia-rhine.c79 #define RX_RING_SIZE 64 macro
431 struct sk_buff *rx_skbuff[RX_RING_SIZE];
432 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
1085 RX_RING_SIZE * sizeof(struct rx_desc) + in alloc_ring()
1098 RX_RING_SIZE * sizeof(struct rx_desc) + in alloc_ring()
1106 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1108 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1118 RX_RING_SIZE * sizeof(struct rx_desc) + in free_ring()
1144 for (i = 0; i < RX_RING_SIZE; i++) { in alloc_rbufs()
1155 for (i = 0; i < RX_RING_SIZE; i++) { in alloc_rbufs()
[all …]
/drivers/net/ethernet/3com/
D3c515.c57 #define RX_RING_SIZE 16 macro
305 struct boom_rx_desc rx_ring[RX_RING_SIZE];
308 struct sk_buff *rx_skbuff[RX_RING_SIZE];
820 for (i = 0; i < RX_RING_SIZE; i++) { in corkscrew_open()
822 if (i < (RX_RING_SIZE - 1)) in corkscrew_open()
1333 int entry = vp->cur_rx % RX_RING_SIZE; in boomerang_rx()
1398 entry = (++vp->cur_rx) % RX_RING_SIZE; in boomerang_rx()
1403 entry = vp->dirty_rx % RX_RING_SIZE; in boomerang_rx()
1453 for (i = 0; i < RX_RING_SIZE; i++) in corkscrew_close()
/drivers/net/ethernet/renesas/
Dsh_eth.c635 for (i = 0; i < RX_RING_SIZE; i++) { in sh_eth_ring_free()
660 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; in sh_eth_ring_format()
669 for (i = 0; i < RX_RING_SIZE; i++) { in sh_eth_ring_format()
695 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); in sh_eth_ring_format()
737 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, in sh_eth_ring_init()
754 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; in sh_eth_ring_init()
912 int entry = mdp->cur_rx % RX_RING_SIZE; in sh_eth_rx()
913 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; in sh_eth_rx()
960 entry = (++mdp->cur_rx) % RX_RING_SIZE; in sh_eth_rx()
966 entry = mdp->dirty_rx % RX_RING_SIZE; in sh_eth_rx()
[all …]

12