• Home
  • Raw
  • Download

Lines Matching refs:pr

192 				struct ehea_port_res *pr = &port->port_res[l];  in ehea_update_firmware_handles()  local
195 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
197 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
199 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
201 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
203 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
205 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
382 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) in ehea_refill_rq1() argument
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
385 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
386 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
387 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
391 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
395 pr->rq1_skba.index = index; in ehea_refill_rq1()
396 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
405 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
418 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
421 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) in ehea_init_fill_rq1() argument
423 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
424 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
427 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
438 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
441 static int ehea_refill_rq_def(struct ehea_port_res *pr, in ehea_refill_rq_def() argument
445 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
446 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
471 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
491 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
508 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
510 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
516 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq2() argument
518 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
524 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq3() argument
526 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
544 struct ehea_port_res *pr) in ehea_fill_skb() argument
559 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
616 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, in ehea_treat_poll_error() argument
623 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
625 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
627 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
631 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
635 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
640 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
642 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
645 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
653 struct ehea_port_res *pr, in ehea_proc_rwqes() argument
656 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
657 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
660 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
661 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
662 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
663 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
664 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
665 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
700 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
710 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
721 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
731 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
733 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
734 port_reset = ehea_treat_poll_error(pr, rq, cqe, in ehea_proc_rwqes()
743 pr->rx_packets += processed; in ehea_proc_rwqes()
744 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
746 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); in ehea_proc_rwqes()
747 ehea_refill_rq2(pr, processed_rq2); in ehea_proc_rwqes()
748 ehea_refill_rq3(pr, processed_rq3); in ehea_proc_rwqes()
760 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag() local
761 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
773 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs() local
776 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
778 atomic_dec(&pr->swqe_avail); in check_sqs()
786 ehea_post_swqe(pr->qp, swqe); in check_sqs()
789 pr->sq_restart_flag == 0, in check_sqs()
794 ehea_schedule_port_reset(pr->port); in check_sqs()
801 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) in ehea_proc_cqes() argument
804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
810 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
811 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
821 pr->sq_restart_flag = 1; in ehea_proc_cqes()
830 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
835 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
840 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
847 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
849 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
859 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
862 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
865 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
870 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
879 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, in ehea_poll() local
881 struct net_device *dev = pr->port->netdev; in ehea_poll()
887 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
888 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
892 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
894 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
897 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
906 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
907 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
926 struct ehea_port_res *pr = param; in ehea_recv_irq_handler() local
928 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
1263 static int ehea_fill_port_res(struct ehea_port_res *pr) in ehea_fill_port_res() argument
1266 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1268 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1270 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1272 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1280 struct ehea_port_res *pr; in ehea_reg_interrupts() local
1302 pr = &port->port_res[i]; in ehea_reg_interrupts()
1303 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1305 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1307 IRQF_DISABLED, pr->int_send_name, in ehea_reg_interrupts()
1308 pr); in ehea_reg_interrupts()
1311 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1316 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1339 struct ehea_port_res *pr; in ehea_free_interrupts() local
1345 pr = &port->port_res[i]; in ehea_free_interrupts()
1346 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1349 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1407 static int ehea_gen_smrs(struct ehea_port_res *pr) in ehea_gen_smrs() argument
1410 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1412 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1416 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1423 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1429 static int ehea_rem_smrs(struct ehea_port_res *pr) in ehea_rem_smrs() argument
1431 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1432 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1453 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, in ehea_init_port_res() argument
1462 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1463 tx_packets = pr->tx_packets; in ehea_init_port_res()
1464 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1465 rx_packets = pr->rx_packets; in ehea_init_port_res()
1467 memset(pr, 0, sizeof(struct ehea_port_res)); in ehea_init_port_res()
1469 pr->tx_bytes = rx_bytes; in ehea_init_port_res()
1470 pr->tx_packets = tx_packets; in ehea_init_port_res()
1471 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1472 pr->rx_packets = rx_packets; in ehea_init_port_res()
1474 pr->port = port; in ehea_init_port_res()
1476 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1477 if (!pr->eq) { in ehea_init_port_res()
1482 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1483 pr->eq->fw_handle, in ehea_init_port_res()
1485 if (!pr->recv_cq) { in ehea_init_port_res()
1490 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1491 pr->eq->fw_handle, in ehea_init_port_res()
1493 if (!pr->send_cq) { in ehea_init_port_res()
1500 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1501 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1525 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1526 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1529 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1530 if (!pr->qp) { in ehea_init_port_res()
1544 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1546 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1547 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1548 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1549 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1553 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1554 if (ehea_gen_smrs(pr) != 0) { in ehea_init_port_res()
1559 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1563 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); in ehea_init_port_res()
1570 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1571 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1572 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1573 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1574 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1575 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1576 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1577 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1582 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) in ehea_clean_portres() argument
1586 if (pr->qp) in ehea_clean_portres()
1587 netif_napi_del(&pr->napi); in ehea_clean_portres()
1589 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1592 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1593 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1594 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1596 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1597 if (pr->rq1_skba.arr[i]) in ehea_clean_portres()
1598 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1600 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1601 if (pr->rq2_skba.arr[i]) in ehea_clean_portres()
1602 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1604 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1605 if (pr->rq3_skba.arr[i]) in ehea_clean_portres()
1606 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1608 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1609 if (pr->sq_skba.arr[i]) in ehea_clean_portres()
1610 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1612 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1613 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1614 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1615 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1616 ret = ehea_rem_smrs(pr); in ehea_clean_portres()
2046 struct ehea_port_res *pr; in ehea_start_xmit() local
2049 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2052 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2054 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2061 pr->tx_packets++; in ehea_start_xmit()
2062 pr->tx_bytes += skb->len; in ehea_start_xmit()
2066 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2070 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2074 pr->swqe_ll_count = 0; in ehea_start_xmit()
2076 pr->swqe_ll_count += 1; in ehea_start_xmit()
2080 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2082 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2083 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2085 pr->sq_skba.index++; in ehea_start_xmit()
2086 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2088 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2092 pr->swqe_id_counter += 1; in ehea_start_xmit()
2095 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2104 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2106 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2107 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2517 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq() local
2518 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2522 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2551 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps() local
2552 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2587 dret = ehea_rem_smrs(pr); in ehea_stop_qps()
2601 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) in ehea_update_rqs() argument
2606 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2607 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2609 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2653 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps() local
2654 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2656 ret = ehea_gen_smrs(pr); in ehea_restart_qps()
2662 ehea_update_rqs(qp, pr); in ehea_restart_qps()
2694 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2695 ehea_refill_rq2(pr, 0); in ehea_restart_qps()
2696 ehea_refill_rq3(pr, 0); in ehea_restart_qps()