• Home
  • Raw
  • Download

Lines Matching refs:pr

190 				struct ehea_port_res *pr = &port->port_res[l];  in ehea_update_firmware_handles()  local
193 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
195 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
197 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
199 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
201 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
203 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
379 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) in ehea_refill_rq1() argument
381 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
382 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
383 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
384 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
388 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
392 pr->rq1_skba.index = index; in ehea_refill_rq1()
393 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
402 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
415 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
418 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) in ehea_init_fill_rq1() argument
420 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
421 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
424 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
435 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
438 static int ehea_refill_rq_def(struct ehea_port_res *pr, in ehea_refill_rq_def() argument
442 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
443 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
468 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
488 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
505 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
507 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
513 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq2() argument
515 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
521 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq3() argument
523 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
541 struct ehea_port_res *pr) in ehea_fill_skb() argument
556 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
613 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, in ehea_treat_poll_error() argument
620 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
622 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
624 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
628 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
632 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
637 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
639 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
642 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
650 struct ehea_port_res *pr, in ehea_proc_rwqes() argument
653 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
654 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
657 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
658 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
659 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
660 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
661 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
662 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
697 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
707 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
718 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
728 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
730 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
731 port_reset = ehea_treat_poll_error(pr, rq, cqe, in ehea_proc_rwqes()
740 pr->rx_packets += processed; in ehea_proc_rwqes()
741 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
743 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); in ehea_proc_rwqes()
744 ehea_refill_rq2(pr, processed_rq2); in ehea_proc_rwqes()
745 ehea_refill_rq3(pr, processed_rq3); in ehea_proc_rwqes()
757 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag() local
758 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
770 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs() local
772 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
774 atomic_dec(&pr->swqe_avail); in check_sqs()
782 ehea_post_swqe(pr->qp, swqe); in check_sqs()
785 pr->sq_restart_flag == 0, in check_sqs()
790 ehea_schedule_port_reset(pr->port); in check_sqs()
797 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) in ehea_proc_cqes() argument
800 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
806 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
807 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
817 pr->sq_restart_flag = 1; in ehea_proc_cqes()
826 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
831 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
836 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
843 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
845 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
855 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
858 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
861 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
866 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
875 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, in ehea_poll() local
877 struct net_device *dev = pr->port->netdev; in ehea_poll()
883 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
884 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
888 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
889 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
890 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
891 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
893 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
894 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
902 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
903 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
911 struct ehea_port_res *pr = param; in ehea_recv_irq_handler() local
913 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
1247 static int ehea_fill_port_res(struct ehea_port_res *pr) in ehea_fill_port_res() argument
1250 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1252 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1254 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1256 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1264 struct ehea_port_res *pr; in ehea_reg_interrupts() local
1286 pr = &port->port_res[i]; in ehea_reg_interrupts()
1287 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1289 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1291 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1294 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1299 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1322 struct ehea_port_res *pr; in ehea_free_interrupts() local
1328 pr = &port->port_res[i]; in ehea_free_interrupts()
1329 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1332 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1390 static int ehea_gen_smrs(struct ehea_port_res *pr) in ehea_gen_smrs() argument
1393 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1395 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1406 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1412 static int ehea_rem_smrs(struct ehea_port_res *pr) in ehea_rem_smrs() argument
1414 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1415 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1436 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, in ehea_init_port_res() argument
1445 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1446 tx_packets = pr->tx_packets; in ehea_init_port_res()
1447 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1448 rx_packets = pr->rx_packets; in ehea_init_port_res()
1450 memset(pr, 0, sizeof(struct ehea_port_res)); in ehea_init_port_res()
1452 pr->tx_bytes = tx_bytes; in ehea_init_port_res()
1453 pr->tx_packets = tx_packets; in ehea_init_port_res()
1454 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1455 pr->rx_packets = rx_packets; in ehea_init_port_res()
1457 pr->port = port; in ehea_init_port_res()
1459 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1460 if (!pr->eq) { in ehea_init_port_res()
1465 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1466 pr->eq->fw_handle, in ehea_init_port_res()
1468 if (!pr->recv_cq) { in ehea_init_port_res()
1473 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1474 pr->eq->fw_handle, in ehea_init_port_res()
1476 if (!pr->send_cq) { in ehea_init_port_res()
1483 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1484 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1508 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1509 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1512 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1513 if (!pr->qp) { in ehea_init_port_res()
1527 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1529 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1530 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1531 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1532 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1536 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1537 if (ehea_gen_smrs(pr) != 0) { in ehea_init_port_res()
1542 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1546 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); in ehea_init_port_res()
1553 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1554 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1555 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1556 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1557 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1558 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1559 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1560 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1565 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) in ehea_clean_portres() argument
1569 if (pr->qp) in ehea_clean_portres()
1570 netif_napi_del(&pr->napi); in ehea_clean_portres()
1572 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1575 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1576 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1577 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1579 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1580 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1582 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1583 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1585 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1586 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1588 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1589 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1591 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1592 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1593 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1594 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1595 ret = ehea_rem_smrs(pr); in ehea_clean_portres()
2017 struct ehea_port_res *pr; in ehea_start_xmit() local
2020 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2023 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2025 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2032 pr->tx_packets++; in ehea_start_xmit()
2033 pr->tx_bytes += skb->len; in ehea_start_xmit()
2037 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2041 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2045 pr->swqe_ll_count = 0; in ehea_start_xmit()
2047 pr->swqe_ll_count += 1; in ehea_start_xmit()
2051 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2053 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2054 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2056 pr->sq_skba.index++; in ehea_start_xmit()
2057 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2059 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2063 pr->swqe_id_counter += 1; in ehea_start_xmit()
2066 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2075 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2077 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2078 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2490 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq() local
2491 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2495 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2524 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps() local
2525 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2560 dret = ehea_rem_smrs(pr); in ehea_stop_qps()
2574 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) in ehea_update_rqs() argument
2579 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2580 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2582 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2626 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps() local
2627 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2629 ret = ehea_gen_smrs(pr); in ehea_restart_qps()
2635 ehea_update_rqs(qp, pr); in ehea_restart_qps()
2667 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2668 ehea_refill_rq2(pr, 0); in ehea_restart_qps()
2669 ehea_refill_rq3(pr, 0); in ehea_restart_qps()