Lines Matching refs:rp
3270 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) in niu_hash_rxaddr() argument
3278 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, in niu_find_rxpage() argument
3281 unsigned int h = niu_hash_rxaddr(rp, addr); in niu_find_rxpage()
3285 pp = &rp->rxhash[h]; in niu_find_rxpage()
3298 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) in niu_hash_page() argument
3300 unsigned int h = niu_hash_rxaddr(rp, base); in niu_hash_page()
3303 page->mapping = (struct address_space *) rp->rxhash[h]; in niu_hash_page()
3304 rp->rxhash[h] = page; in niu_hash_page()
3307 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, in niu_rbr_add_page() argument
3325 niu_hash_page(rp, page, addr); in niu_rbr_add_page()
3326 if (rp->rbr_blocks_per_page > 1) in niu_rbr_add_page()
3327 page_ref_add(page, rp->rbr_blocks_per_page - 1); in niu_rbr_add_page()
3329 for (i = 0; i < rp->rbr_blocks_per_page; i++) { in niu_rbr_add_page()
3330 __le32 *rbr = &rp->rbr[start_index + i]; in niu_rbr_add_page()
3333 addr += rp->rbr_block_size; in niu_rbr_add_page()
3339 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) in niu_rbr_refill() argument
3341 int index = rp->rbr_index; in niu_rbr_refill()
3343 rp->rbr_pending++; in niu_rbr_refill()
3344 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { in niu_rbr_refill()
3345 int err = niu_rbr_add_page(np, rp, mask, index); in niu_rbr_refill()
3348 rp->rbr_pending--; in niu_rbr_refill()
3352 rp->rbr_index += rp->rbr_blocks_per_page; in niu_rbr_refill()
3353 BUG_ON(rp->rbr_index > rp->rbr_table_size); in niu_rbr_refill()
3354 if (rp->rbr_index == rp->rbr_table_size) in niu_rbr_refill()
3355 rp->rbr_index = 0; in niu_rbr_refill()
3357 if (rp->rbr_pending >= rp->rbr_kick_thresh) { in niu_rbr_refill()
3358 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); in niu_rbr_refill()
3359 rp->rbr_pending = 0; in niu_rbr_refill()
3364 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) in niu_rx_pkt_ignore() argument
3366 unsigned int index = rp->rcr_index; in niu_rx_pkt_ignore()
3369 rp->rx_dropped++; in niu_rx_pkt_ignore()
3377 val = le64_to_cpup(&rp->rcr[index]); in niu_rx_pkt_ignore()
3380 page = niu_find_rxpage(rp, addr, &link); in niu_rx_pkt_ignore()
3382 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_rx_pkt_ignore()
3391 rp->rbr_refill_pending++; in niu_rx_pkt_ignore()
3394 index = NEXT_RCR(rp, index); in niu_rx_pkt_ignore()
3399 rp->rcr_index = index; in niu_rx_pkt_ignore()
3405 struct rx_ring_info *rp) in niu_process_rx_pkt() argument
3407 unsigned int index = rp->rcr_index; in niu_process_rx_pkt()
3414 return niu_rx_pkt_ignore(np, rp); in niu_process_rx_pkt()
3424 val = le64_to_cpup(&rp->rcr[index]); in niu_process_rx_pkt()
3432 page = niu_find_rxpage(rp, addr, &link); in niu_process_rx_pkt()
3434 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_process_rx_pkt()
3453 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { in niu_process_rx_pkt()
3459 rp->rbr_refill_pending++; in niu_process_rx_pkt()
3463 index = NEXT_RCR(rp, index); in niu_process_rx_pkt()
3468 rp->rcr_index = index; in niu_process_rx_pkt()
3484 rp->rx_packets++; in niu_process_rx_pkt()
3485 rp->rx_bytes += skb->len; in niu_process_rx_pkt()
3488 skb_record_rx_queue(skb, rp->rx_channel); in niu_process_rx_pkt()
3494 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) in niu_rbr_fill() argument
3496 int blocks_per_page = rp->rbr_blocks_per_page; in niu_rbr_fill()
3497 int err, index = rp->rbr_index; in niu_rbr_fill()
3500 while (index < (rp->rbr_table_size - blocks_per_page)) { in niu_rbr_fill()
3501 err = niu_rbr_add_page(np, rp, mask, index); in niu_rbr_fill()
3508 rp->rbr_index = index; in niu_rbr_fill()
3512 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) in niu_rbr_free() argument
3519 page = rp->rxhash[i]; in niu_rbr_free()
3535 for (i = 0; i < rp->rbr_table_size; i++) in niu_rbr_free()
3536 rp->rbr[i] = cpu_to_le32(0); in niu_rbr_free()
3537 rp->rbr_index = 0; in niu_rbr_free()
3540 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) in release_tx_packet() argument
3542 struct tx_buff_info *tb = &rp->tx_buffs[idx]; in release_tx_packet()
3551 rp->tx_packets++; in release_tx_packet()
3552 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - in release_tx_packet()
3559 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) in release_tx_packet()
3560 rp->mark_pending--; in release_tx_packet()
3564 idx = NEXT_TX(rp, idx); in release_tx_packet()
3569 tb = &rp->tx_buffs[idx]; in release_tx_packet()
3574 idx = NEXT_TX(rp, idx); in release_tx_packet()
3582 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) argument
3584 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) in niu_tx_work() argument
3591 index = (rp - np->tx_rings); in niu_tx_work()
3594 cs = rp->tx_cs; in niu_tx_work()
3599 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & in niu_tx_work()
3602 rp->last_pkt_cnt = tmp; in niu_tx_work()
3604 cons = rp->cons; in niu_tx_work()
3610 cons = release_tx_packet(np, rp, cons); in niu_tx_work()
3612 rp->cons = cons; in niu_tx_work()
3617 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { in niu_tx_work()
3620 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) in niu_tx_work()
3627 struct rx_ring_info *rp, in niu_sync_rx_discard_stats() argument
3641 int rx_channel = rp->rx_channel; in niu_sync_rx_discard_stats()
3652 rp->rx_errors += misc & RXMISC_COUNT; in niu_sync_rx_discard_stats()
3667 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; in niu_sync_rx_discard_stats()
3679 struct rx_ring_info *rp, int budget) in niu_rx_work() argument
3682 struct rxdma_mailbox *mbox = rp->mbox; in niu_rx_work()
3686 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_work()
3687 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; in niu_rx_work()
3697 __func__, rp->rx_channel, (unsigned long long)stat, qlen); in niu_rx_work()
3702 rcr_done += niu_process_rx_pkt(napi, np, rp); in niu_rx_work()
3706 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { in niu_rx_work()
3709 for (i = 0; i < rp->rbr_refill_pending; i++) in niu_rx_work()
3710 niu_rbr_refill(np, rp, GFP_ATOMIC); in niu_rx_work()
3711 rp->rbr_refill_pending = 0; in niu_rx_work()
3718 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); in niu_rx_work()
3722 niu_sync_rx_discard_stats(np, rp, 0x7FFF); in niu_rx_work()
3738 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core() local
3739 if (tx_vec & (1 << rp->tx_channel)) in niu_poll_core()
3740 niu_tx_work(np, rp); in niu_poll_core()
3741 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); in niu_poll_core()
3745 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_poll_core() local
3747 if (rx_vec & (1 << rp->rx_channel)) { in niu_poll_core()
3750 this_work_done = niu_rx_work(&lp->napi, np, rp, in niu_poll_core()
3756 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); in niu_poll_core()
3777 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, in niu_log_rxchan_errors() argument
3780 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); in niu_log_rxchan_errors()
3814 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) in niu_rx_error() argument
3816 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_error()
3826 rp->rx_channel, in niu_rx_error()
3829 niu_log_rxchan_errors(np, rp, stat); in niu_rx_error()
3832 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_rx_error()
3838 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, in niu_log_txchan_errors() argument
3841 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); in niu_log_txchan_errors()
3863 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) in niu_tx_error() argument
3867 cs = nr64(TX_CS(rp->tx_channel)); in niu_tx_error()
3868 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); in niu_tx_error()
3869 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); in niu_tx_error()
3872 rp->tx_channel, in niu_tx_error()
3877 niu_log_txchan_errors(np, rp, cs); in niu_tx_error()
4072 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_slowpath_interrupt() local
4074 if (rx_vec & (1 << rp->rx_channel)) { in niu_slowpath_interrupt()
4075 int r = niu_rx_error(np, rp); in niu_slowpath_interrupt()
4080 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_slowpath_interrupt()
4090 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt() local
4092 if (tx_vec & (1 << rp->tx_channel)) { in niu_slowpath_interrupt()
4093 int r = niu_tx_error(np, rp); in niu_slowpath_interrupt()
4123 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, in niu_rxchan_intr() argument
4126 struct rxdma_mailbox *mbox = rp->mbox; in niu_rxchan_intr()
4131 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); in niu_rxchan_intr()
4137 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, in niu_txchan_intr() argument
4140 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); in niu_txchan_intr()
4143 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); in niu_txchan_intr()
4156 struct rx_ring_info *rp = &np->rx_rings[i]; in __niu_fastpath_interrupt() local
4157 int ldn = LDN_RXDMA(rp->rx_channel); in __niu_fastpath_interrupt()
4163 if (rx_vec & (1 << rp->rx_channel)) in __niu_fastpath_interrupt()
4164 niu_rxchan_intr(np, rp, ldn); in __niu_fastpath_interrupt()
4168 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt() local
4169 int ldn = LDN_TXDMA(rp->tx_channel); in __niu_fastpath_interrupt()
4175 if (tx_vec & (1 << rp->tx_channel)) in __niu_fastpath_interrupt()
4176 niu_txchan_intr(np, rp, ldn); in __niu_fastpath_interrupt()
4236 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) in niu_free_rx_ring_info() argument
4238 if (rp->mbox) { in niu_free_rx_ring_info()
4241 rp->mbox, rp->mbox_dma); in niu_free_rx_ring_info()
4242 rp->mbox = NULL; in niu_free_rx_ring_info()
4244 if (rp->rcr) { in niu_free_rx_ring_info()
4247 rp->rcr, rp->rcr_dma); in niu_free_rx_ring_info()
4248 rp->rcr = NULL; in niu_free_rx_ring_info()
4249 rp->rcr_table_size = 0; in niu_free_rx_ring_info()
4250 rp->rcr_index = 0; in niu_free_rx_ring_info()
4252 if (rp->rbr) { in niu_free_rx_ring_info()
4253 niu_rbr_free(np, rp); in niu_free_rx_ring_info()
4257 rp->rbr, rp->rbr_dma); in niu_free_rx_ring_info()
4258 rp->rbr = NULL; in niu_free_rx_ring_info()
4259 rp->rbr_table_size = 0; in niu_free_rx_ring_info()
4260 rp->rbr_index = 0; in niu_free_rx_ring_info()
4262 kfree(rp->rxhash); in niu_free_rx_ring_info()
4263 rp->rxhash = NULL; in niu_free_rx_ring_info()
4266 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) in niu_free_tx_ring_info() argument
4268 if (rp->mbox) { in niu_free_tx_ring_info()
4271 rp->mbox, rp->mbox_dma); in niu_free_tx_ring_info()
4272 rp->mbox = NULL; in niu_free_tx_ring_info()
4274 if (rp->descr) { in niu_free_tx_ring_info()
4278 if (rp->tx_buffs[i].skb) in niu_free_tx_ring_info()
4279 (void) release_tx_packet(np, rp, i); in niu_free_tx_ring_info()
4284 rp->descr, rp->descr_dma); in niu_free_tx_ring_info()
4285 rp->descr = NULL; in niu_free_tx_ring_info()
4286 rp->pending = 0; in niu_free_tx_ring_info()
4287 rp->prod = 0; in niu_free_tx_ring_info()
4288 rp->cons = 0; in niu_free_tx_ring_info()
4289 rp->wrap_bit = 0; in niu_free_tx_ring_info()
4299 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_free_channels() local
4301 niu_free_rx_ring_info(np, rp); in niu_free_channels()
4310 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels() local
4312 niu_free_tx_ring_info(np, rp); in niu_free_channels()
4321 struct rx_ring_info *rp) in niu_alloc_rx_ring_info() argument
4325 rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), in niu_alloc_rx_ring_info()
4327 if (!rp->rxhash) in niu_alloc_rx_ring_info()
4330 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4332 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4333 if (!rp->mbox) in niu_alloc_rx_ring_info()
4335 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_rx_ring_info()
4337 rp->mbox); in niu_alloc_rx_ring_info()
4341 rp->rcr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4343 &rp->rcr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4344 if (!rp->rcr) in niu_alloc_rx_ring_info()
4346 if ((unsigned long)rp->rcr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4348 rp->rcr); in niu_alloc_rx_ring_info()
4351 rp->rcr_table_size = MAX_RCR_RING_SIZE; in niu_alloc_rx_ring_info()
4352 rp->rcr_index = 0; in niu_alloc_rx_ring_info()
4354 rp->rbr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4356 &rp->rbr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4357 if (!rp->rbr) in niu_alloc_rx_ring_info()
4359 if ((unsigned long)rp->rbr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4361 rp->rbr); in niu_alloc_rx_ring_info()
4364 rp->rbr_table_size = MAX_RBR_RING_SIZE; in niu_alloc_rx_ring_info()
4365 rp->rbr_index = 0; in niu_alloc_rx_ring_info()
4366 rp->rbr_pending = 0; in niu_alloc_rx_ring_info()
4371 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) in niu_set_max_burst() argument
4378 rp->max_burst = mtu + 32; in niu_set_max_burst()
4379 if (rp->max_burst > 4096) in niu_set_max_burst()
4380 rp->max_burst = 4096; in niu_set_max_burst()
4384 struct tx_ring_info *rp) in niu_alloc_tx_ring_info() argument
4388 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4390 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4391 if (!rp->mbox) in niu_alloc_tx_ring_info()
4393 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_tx_ring_info()
4395 rp->mbox); in niu_alloc_tx_ring_info()
4399 rp->descr = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4401 &rp->descr_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4402 if (!rp->descr) in niu_alloc_tx_ring_info()
4404 if ((unsigned long)rp->descr & (64UL - 1)) { in niu_alloc_tx_ring_info()
4406 rp->descr); in niu_alloc_tx_ring_info()
4410 rp->pending = MAX_TX_RING_SIZE; in niu_alloc_tx_ring_info()
4411 rp->prod = 0; in niu_alloc_tx_ring_info()
4412 rp->cons = 0; in niu_alloc_tx_ring_info()
4413 rp->wrap_bit = 0; in niu_alloc_tx_ring_info()
4416 rp->mark_freq = rp->pending / 4; in niu_alloc_tx_ring_info()
4418 niu_set_max_burst(np, rp); in niu_alloc_tx_ring_info()
4423 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) in niu_size_rbr() argument
4429 rp->rbr_block_size = 1 << bss; in niu_size_rbr()
4430 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); in niu_size_rbr()
4432 rp->rbr_sizes[0] = 256; in niu_size_rbr()
4433 rp->rbr_sizes[1] = 1024; in niu_size_rbr()
4437 rp->rbr_sizes[2] = 4096; in niu_size_rbr()
4441 rp->rbr_sizes[2] = 8192; in niu_size_rbr()
4445 rp->rbr_sizes[2] = 2048; in niu_size_rbr()
4447 rp->rbr_sizes[3] = rp->rbr_block_size; in niu_size_rbr()
4482 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_alloc_channels() local
4484 rp->np = np; in niu_alloc_channels()
4485 rp->rx_channel = first_rx_channel + i; in niu_alloc_channels()
4487 err = niu_alloc_rx_ring_info(np, rp); in niu_alloc_channels()
4491 niu_size_rbr(np, rp); in niu_alloc_channels()
4494 rp->nonsyn_window = 64; in niu_alloc_channels()
4495 rp->nonsyn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4496 rp->syn_window = 64; in niu_alloc_channels()
4497 rp->syn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4498 rp->rcr_pkt_threshold = 16; in niu_alloc_channels()
4499 rp->rcr_timeout = 8; in niu_alloc_channels()
4500 rp->rbr_kick_thresh = RBR_REFILL_MIN; in niu_alloc_channels()
4501 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) in niu_alloc_channels()
4502 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; in niu_alloc_channels()
4504 err = niu_rbr_fill(np, rp, GFP_KERNEL); in niu_alloc_channels()
4522 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_alloc_channels() local
4524 rp->np = np; in niu_alloc_channels()
4525 rp->tx_channel = first_tx_channel + i; in niu_alloc_channels()
4527 err = niu_alloc_tx_ring_info(np, rp); in niu_alloc_channels()
4653 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_init_one_tx_channel() argument
4655 int err, channel = rp->tx_channel; in niu_init_one_tx_channel()
4670 nw64(TXC_DMA_MAX(channel), rp->max_burst); in niu_init_one_tx_channel()
4673 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | in niu_init_one_tx_channel()
4676 channel, (unsigned long long)rp->descr_dma); in niu_init_one_tx_channel()
4685 ring_len = (rp->pending / 8); in niu_init_one_tx_channel()
4688 rp->descr_dma); in niu_init_one_tx_channel()
4691 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || in niu_init_one_tx_channel()
4692 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { in niu_init_one_tx_channel()
4694 channel, (unsigned long long)rp->mbox_dma); in niu_init_one_tx_channel()
4697 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); in niu_init_one_tx_channel()
4698 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); in niu_init_one_tx_channel()
4702 rp->last_pkt_cnt = 0; in niu_init_one_tx_channel()
4793 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) in niu_rx_channel_wred_init() argument
4797 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | in niu_rx_channel_wred_init()
4798 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | in niu_rx_channel_wred_init()
4799 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | in niu_rx_channel_wred_init()
4800 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); in niu_rx_channel_wred_init()
4801 nw64(RDC_RED_PARA(rp->rx_channel), val); in niu_rx_channel_wred_init()
4804 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) in niu_compute_rbr_cfig_b() argument
4809 switch (rp->rbr_block_size) { in niu_compute_rbr_cfig_b()
4826 switch (rp->rbr_sizes[2]) { in niu_compute_rbr_cfig_b()
4844 switch (rp->rbr_sizes[1]) { in niu_compute_rbr_cfig_b()
4862 switch (rp->rbr_sizes[0]) { in niu_compute_rbr_cfig_b()
4906 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_init_one_rx_channel() argument
4908 int err, channel = rp->rx_channel; in niu_init_one_rx_channel()
4919 niu_rx_channel_wred_init(np, rp); in niu_init_one_rx_channel()
4927 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); in niu_init_one_rx_channel()
4929 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | in niu_init_one_rx_channel()
4932 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4933 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); in niu_init_one_rx_channel()
4934 err = niu_compute_rbr_cfig_b(rp, &val); in niu_init_one_rx_channel()
4939 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4940 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); in niu_init_one_rx_channel()
4942 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | in niu_init_one_rx_channel()
4944 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); in niu_init_one_rx_channel()
4950 nw64(RBR_KICK(channel), rp->rbr_index); in niu_init_one_rx_channel()
4980 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_init_rx_channels() local
4982 err = niu_init_one_rx_channel(np, rp); in niu_init_rx_channels()
5819 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_stop_one_tx_channel() argument
5821 (void) niu_tx_channel_stop(np, rp->tx_channel); in niu_stop_one_tx_channel()
5829 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_stop_tx_channels() local
5831 niu_stop_one_tx_channel(np, rp); in niu_stop_tx_channels()
5835 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_reset_one_tx_channel() argument
5837 (void) niu_tx_channel_reset(np, rp->tx_channel); in niu_reset_one_tx_channel()
5845 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_tx_channels() local
5847 niu_reset_one_tx_channel(np, rp); in niu_reset_tx_channels()
5851 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_stop_one_rx_channel() argument
5853 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); in niu_stop_one_rx_channel()
5861 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_stop_rx_channels() local
5863 niu_stop_one_rx_channel(np, rp); in niu_stop_rx_channels()
5867 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_reset_one_rx_channel() argument
5869 int channel = rp->rx_channel; in niu_reset_one_rx_channel()
5882 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_rx_channels() local
5884 niu_reset_one_rx_channel(np, rp); in niu_reset_rx_channels()
5928 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_init_hw() local
5930 err = niu_init_one_tx_channel(np, rp); in niu_init_hw()
6231 struct rx_ring_info *rp = &rx_rings[i]; in niu_get_rx_stats() local
6233 niu_sync_rx_discard_stats(np, rp, 0); in niu_get_rx_stats()
6235 pkts += rp->rx_packets; in niu_get_rx_stats()
6236 bytes += rp->rx_bytes; in niu_get_rx_stats()
6237 dropped += rp->rx_dropped; in niu_get_rx_stats()
6238 errors += rp->rx_errors; in niu_get_rx_stats()
6262 struct tx_ring_info *rp = &tx_rings[i]; in niu_get_tx_stats() local
6264 pkts += rp->tx_packets; in niu_get_tx_stats()
6265 bytes += rp->tx_bytes; in niu_get_tx_stats()
6266 errors += rp->tx_errors; in niu_get_tx_stats()
6436 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_buffers() local
6441 page = rp->rxhash[j]; in niu_reset_buffers()
6447 rp->rbr[k++] = cpu_to_le32(base); in niu_reset_buffers()
6452 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); in niu_reset_buffers()
6457 rp->rbr_index = rp->rbr_table_size - 1; in niu_reset_buffers()
6458 rp->rcr_index = 0; in niu_reset_buffers()
6459 rp->rbr_pending = 0; in niu_reset_buffers()
6460 rp->rbr_refill_pending = 0; in niu_reset_buffers()
6465 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_buffers() local
6468 if (rp->tx_buffs[j].skb) in niu_reset_buffers()
6469 (void) release_tx_packet(np, rp, j); in niu_reset_buffers()
6472 rp->pending = MAX_TX_RING_SIZE; in niu_reset_buffers()
6473 rp->prod = 0; in niu_reset_buffers()
6474 rp->cons = 0; in niu_reset_buffers()
6475 rp->wrap_bit = 0; in niu_reset_buffers()
6528 static void niu_set_txd(struct tx_ring_info *rp, int index, in niu_set_txd() argument
6532 __le64 *desc = &rp->descr[index]; in niu_set_txd()
6611 struct tx_ring_info *rp; in niu_start_xmit() local
6619 rp = &np->tx_rings[i]; in niu_start_xmit()
6622 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { in niu_start_xmit()
6625 rp->tx_errors++; in niu_start_xmit()
6658 prod = rp->prod; in niu_start_xmit()
6660 rp->tx_buffs[prod].skb = skb; in niu_start_xmit()
6661 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6664 if (++rp->mark_counter == rp->mark_freq) { in niu_start_xmit()
6665 rp->mark_counter = 0; in niu_start_xmit()
6667 rp->mark_pending++; in niu_start_xmit()
6683 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); in niu_start_xmit()
6686 prod = NEXT_TX(rp, prod); in niu_start_xmit()
6699 rp->tx_buffs[prod].skb = NULL; in niu_start_xmit()
6700 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6702 niu_set_txd(rp, prod, mapping, len, 0, 0); in niu_start_xmit()
6704 prod = NEXT_TX(rp, prod); in niu_start_xmit()
6707 if (prod < rp->prod) in niu_start_xmit()
6708 rp->wrap_bit ^= TX_RING_KICK_WRAP; in niu_start_xmit()
6709 rp->prod = prod; in niu_start_xmit()
6711 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); in niu_start_xmit()
6713 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { in niu_start_xmit()
6715 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) in niu_start_xmit()
6723 rp->tx_errors++; in niu_start_xmit()
7790 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_get_ethtool_stats() local
7792 niu_sync_rx_discard_stats(np, rp, 0); in niu_get_ethtool_stats()
7794 data[0] = rp->rx_channel; in niu_get_ethtool_stats()
7795 data[1] = rp->rx_packets; in niu_get_ethtool_stats()
7796 data[2] = rp->rx_bytes; in niu_get_ethtool_stats()
7797 data[3] = rp->rx_dropped; in niu_get_ethtool_stats()
7798 data[4] = rp->rx_errors; in niu_get_ethtool_stats()
7802 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_get_ethtool_stats() local
7804 data[0] = rp->tx_channel; in niu_get_ethtool_stats()
7805 data[1] = rp->tx_packets; in niu_get_ethtool_stats()
7806 data[2] = rp->tx_bytes; in niu_get_ethtool_stats()
7807 data[3] = rp->tx_errors; in niu_get_ethtool_stats()