• Home
  • Raw
  • Download

Lines Matching refs:qdev

185 static int ql_update_ring_coalescing(struct ql_adapter *qdev)  in ql_update_ring_coalescing()  argument
191 if (!netif_running(qdev->ndev)) in ql_update_ring_coalescing()
197 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; in ql_update_ring_coalescing()
198 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || in ql_update_ring_coalescing()
200 qdev->tx_max_coalesced_frames) { in ql_update_ring_coalescing()
201 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in ql_update_ring_coalescing()
202 rx_ring = &qdev->rx_ring[i]; in ql_update_ring_coalescing()
204 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in ql_update_ring_coalescing()
206 cpu_to_le16(qdev->tx_max_coalesced_frames); in ql_update_ring_coalescing()
208 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), in ql_update_ring_coalescing()
211 netif_err(qdev, ifup, qdev->ndev, in ql_update_ring_coalescing()
219 cqicb = (struct cqicb *)&qdev->rx_ring[0]; in ql_update_ring_coalescing()
220 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || in ql_update_ring_coalescing()
222 qdev->rx_max_coalesced_frames) { in ql_update_ring_coalescing()
223 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { in ql_update_ring_coalescing()
224 rx_ring = &qdev->rx_ring[i]; in ql_update_ring_coalescing()
226 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); in ql_update_ring_coalescing()
228 cpu_to_le16(qdev->rx_max_coalesced_frames); in ql_update_ring_coalescing()
230 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), in ql_update_ring_coalescing()
233 netif_err(qdev, ifup, qdev->ndev, in ql_update_ring_coalescing()
243 static void ql_update_stats(struct ql_adapter *qdev) in ql_update_stats() argument
247 u64 *iter = &qdev->nic_stats.tx_pkts; in ql_update_stats()
249 spin_lock(&qdev->stats_lock); in ql_update_stats()
250 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { in ql_update_stats()
251 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
259 if (ql_read_xgmac_reg64(qdev, i, &data)) { in ql_update_stats()
260 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
273 if (ql_read_xgmac_reg64(qdev, i, &data)) { in ql_update_stats()
274 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
290 if (ql_read_xgmac_reg64(qdev, i, &data)) { in ql_update_stats()
291 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
304 if (ql_read_xgmac_reg64(qdev, i, &data)) { in ql_update_stats()
305 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
317 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { in ql_update_stats()
318 netif_err(qdev, drv, qdev->ndev, in ql_update_stats()
324 ql_sem_unlock(qdev, qdev->xg_sem_mask); in ql_update_stats()
326 spin_unlock(&qdev->stats_lock); in ql_update_stats()
328 QL_DUMP_STAT(qdev); in ql_update_stats()
364 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_ethtool_stats() local
368 ql_update_stats(qdev); in ql_get_ethtool_stats()
371 char *p = (char *)qdev + in ql_get_ethtool_stats()
381 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_settings() local
386 if ((qdev->link_status & STS_LINK_TYPE_MASK) == in ql_get_settings()
407 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_drvinfo() local
413 (qdev->fw_rev_id & 0x00ff0000) >> 16, in ql_get_drvinfo()
414 (qdev->fw_rev_id & 0x0000ff00) >> 8, in ql_get_drvinfo()
415 (qdev->fw_rev_id & 0x000000ff)); in ql_get_drvinfo()
416 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), in ql_get_drvinfo()
422 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_wol() local
423 unsigned short ssys_dev = qdev->pdev->subsystem_device; in ql_get_wol()
429 wol->wolopts = qdev->wol; in ql_get_wol()
435 struct ql_adapter *qdev = netdev_priv(ndev); in ql_set_wol() local
436 unsigned short ssys_dev = qdev->pdev->subsystem_device; in ql_set_wol()
441 netif_info(qdev, drv, qdev->ndev, in ql_set_wol()
447 qdev->wol = wol->wolopts; in ql_set_wol()
449 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); in ql_set_wol()
457 struct ql_adapter *qdev = netdev_priv(ndev); in ql_set_phys_id() local
462 if (ql_mb_get_led_cfg(qdev)) in ql_set_phys_id()
466 ql_mb_set_led_cfg(qdev, QL_LED_BLINK); in ql_set_phys_id()
471 if (ql_mb_set_led_cfg(qdev, qdev->led_config)) in ql_set_phys_id()
480 static int ql_start_loopback(struct ql_adapter *qdev) in ql_start_loopback() argument
482 if (netif_carrier_ok(qdev->ndev)) { in ql_start_loopback()
483 set_bit(QL_LB_LINK_UP, &qdev->flags); in ql_start_loopback()
484 netif_carrier_off(qdev->ndev); in ql_start_loopback()
486 clear_bit(QL_LB_LINK_UP, &qdev->flags); in ql_start_loopback()
487 qdev->link_config |= CFG_LOOPBACK_PCS; in ql_start_loopback()
488 return ql_mb_set_port_cfg(qdev); in ql_start_loopback()
491 static void ql_stop_loopback(struct ql_adapter *qdev) in ql_stop_loopback() argument
493 qdev->link_config &= ~CFG_LOOPBACK_PCS; in ql_stop_loopback()
494 ql_mb_set_port_cfg(qdev); in ql_stop_loopback()
495 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { in ql_stop_loopback()
496 netif_carrier_on(qdev->ndev); in ql_stop_loopback()
497 clear_bit(QL_LB_LINK_UP, &qdev->flags); in ql_stop_loopback()
511 void ql_check_lb_frame(struct ql_adapter *qdev, in ql_check_lb_frame() argument
519 atomic_dec(&qdev->lb_count); in ql_check_lb_frame()
524 static int ql_run_loopback_test(struct ql_adapter *qdev) in ql_run_loopback_test() argument
532 skb = netdev_alloc_skb(qdev->ndev, size); in ql_run_loopback_test()
539 rc = ql_lb_send(skb, qdev->ndev); in ql_run_loopback_test()
542 atomic_inc(&qdev->lb_count); in ql_run_loopback_test()
546 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); in ql_run_loopback_test()
547 return atomic_read(&qdev->lb_count) ? -EIO : 0; in ql_run_loopback_test()
550 static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) in ql_loopback_test() argument
552 *data = ql_start_loopback(qdev); in ql_loopback_test()
555 *data = ql_run_loopback_test(qdev); in ql_loopback_test()
557 ql_stop_loopback(qdev); in ql_loopback_test()
564 struct ql_adapter *qdev = netdev_priv(ndev); in ql_self_test() local
569 set_bit(QL_SELFTEST, &qdev->flags); in ql_self_test()
572 if (ql_loopback_test(qdev, &data[0])) in ql_self_test()
579 clear_bit(QL_SELFTEST, &qdev->flags); in ql_self_test()
585 netif_err(qdev, drv, qdev->ndev, in ql_self_test()
593 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_regs_len() local
595 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) in ql_get_regs_len()
604 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_regs() local
606 ql_get_dump(qdev, p); in ql_get_regs()
607 qdev->core_is_dumped = 0; in ql_get_regs()
608 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) in ql_get_regs()
616 struct ql_adapter *qdev = netdev_priv(dev); in ql_get_coalesce() local
618 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; in ql_get_coalesce()
619 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; in ql_get_coalesce()
631 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; in ql_get_coalesce()
632 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; in ql_get_coalesce()
639 struct ql_adapter *qdev = netdev_priv(ndev); in ql_set_coalesce() local
642 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) in ql_set_coalesce()
647 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) in ql_set_coalesce()
653 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && in ql_set_coalesce()
654 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && in ql_set_coalesce()
655 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && in ql_set_coalesce()
656 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) in ql_set_coalesce()
659 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; in ql_set_coalesce()
660 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; in ql_set_coalesce()
661 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; in ql_set_coalesce()
662 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; in ql_set_coalesce()
664 return ql_update_ring_coalescing(qdev); in ql_set_coalesce()
670 struct ql_adapter *qdev = netdev_priv(netdev); in ql_get_pauseparam() local
672 ql_mb_get_port_cfg(qdev); in ql_get_pauseparam()
673 if (qdev->link_config & CFG_PAUSE_STD) { in ql_get_pauseparam()
682 struct ql_adapter *qdev = netdev_priv(netdev); in ql_set_pauseparam() local
686 qdev->link_config |= CFG_PAUSE_STD; in ql_set_pauseparam()
688 qdev->link_config &= ~CFG_PAUSE_STD; in ql_set_pauseparam()
692 status = ql_mb_set_port_cfg(qdev); in ql_set_pauseparam()
698 struct ql_adapter *qdev = netdev_priv(ndev); in ql_get_msglevel() local
699 return qdev->msg_enable; in ql_get_msglevel()
704 struct ql_adapter *qdev = netdev_priv(ndev); in ql_set_msglevel() local
705 qdev->msg_enable = value; in ql_set_msglevel()