Lines Matching refs:flow
134 struct tid_rdma_flow *flow,
875 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_4k() argument
892 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k()
895 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k()
929 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k()
1013 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, in tid_rdma_find_phys_blocks_8k() argument
1027 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k()
1030 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k()
1080 static u32 kern_find_pages(struct tid_rdma_flow *flow, in kern_find_pages() argument
1084 struct tid_rdma_request *req = flow->req; in kern_find_pages()
1086 u32 length = flow->req->seg_len; in kern_find_pages()
1110 flow->length = flow->req->seg_len - length; in kern_find_pages()
1115 static void dma_unmap_flow(struct tid_rdma_flow *flow) in dma_unmap_flow() argument
1121 dd = flow->req->rcd->dd; in dma_unmap_flow()
1122 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_unmap_flow()
1134 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) in dma_map_flow() argument
1137 struct hfi1_devdata *dd = flow->req->rcd->dd; in dma_map_flow()
1140 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; in dma_map_flow()
1150 dma_unmap_flow(flow); in dma_map_flow()
1159 static inline bool dma_mapped(struct tid_rdma_flow *flow) in dma_mapped() argument
1161 return !!flow->pagesets[0].mapped; in dma_mapped()
1168 static int kern_get_phys_blocks(struct tid_rdma_flow *flow, in kern_get_phys_blocks() argument
1175 if (flow->npagesets) { in kern_get_phys_blocks()
1176 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, in kern_get_phys_blocks()
1177 flow); in kern_get_phys_blocks()
1178 if (!dma_mapped(flow)) in kern_get_phys_blocks()
1179 return dma_map_flow(flow, pages); in kern_get_phys_blocks()
1183 npages = kern_find_pages(flow, pages, ss, last); in kern_get_phys_blocks()
1185 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) in kern_get_phys_blocks()
1186 flow->npagesets = in kern_get_phys_blocks()
1187 tid_rdma_find_phys_blocks_4k(flow, pages, npages, in kern_get_phys_blocks()
1188 flow->pagesets); in kern_get_phys_blocks()
1190 flow->npagesets = in kern_get_phys_blocks()
1191 tid_rdma_find_phys_blocks_8k(flow, pages, npages, in kern_get_phys_blocks()
1192 flow->pagesets); in kern_get_phys_blocks()
1194 return dma_map_flow(flow, pages); in kern_get_phys_blocks()
1197 static inline void kern_add_tid_node(struct tid_rdma_flow *flow, in kern_add_tid_node() argument
1201 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; in kern_add_tid_node()
1203 WARN_ON_ONCE(flow->tnode_cnt >= in kern_add_tid_node()
1213 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, in kern_add_tid_node()
1230 static int kern_alloc_tids(struct tid_rdma_flow *flow) in kern_alloc_tids() argument
1232 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_alloc_tids()
1238 flow->tnode_cnt = 0; in kern_alloc_tids()
1239 ngroups = flow->npagesets / dd->rcv_entries.group_size; in kern_alloc_tids()
1245 kern_add_tid_node(flow, rcd, "complete groups", group, in kern_alloc_tids()
1253 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1259 use = min_t(u32, flow->npagesets - pageidx, in kern_alloc_tids()
1261 kern_add_tid_node(flow, rcd, "used groups", used, use); in kern_alloc_tids()
1264 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1280 use = min_t(u32, flow->npagesets - pageidx, group->size); in kern_alloc_tids()
1281 kern_add_tid_node(flow, rcd, "complete continue", group, use); in kern_alloc_tids()
1283 if (pageidx >= flow->npagesets) in kern_alloc_tids()
1286 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", in kern_alloc_tids()
1287 (u64)flow->npagesets); in kern_alloc_tids()
1293 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num, in kern_program_rcv_group() argument
1296 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_program_rcv_group()
1298 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_program_rcv_group()
1301 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; in kern_program_rcv_group()
1312 pset = &flow->pagesets[(*pset_idx)++]; in kern_program_rcv_group()
1335 flow->tid_entry[flow->tidcnt++] = in kern_program_rcv_group()
1340 flow->req->qp, flow->tidcnt - 1, in kern_program_rcv_group()
1341 flow->tid_entry[flow->tidcnt - 1]); in kern_program_rcv_group()
1344 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); in kern_program_rcv_group()
1361 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num) in kern_unprogram_rcv_group() argument
1363 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1365 struct kern_tid_node *node = &flow->tnode[grp_num]; in kern_unprogram_rcv_group()
1392 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group()
1400 static void kern_program_rcvarray(struct tid_rdma_flow *flow) in kern_program_rcvarray() argument
1405 flow->npkts = 0; in kern_program_rcvarray()
1406 flow->tidcnt = 0; in kern_program_rcvarray()
1407 for (i = 0; i < flow->tnode_cnt; i++) in kern_program_rcvarray()
1408 kern_program_rcv_group(flow, i, &pset_idx); in kern_program_rcvarray()
1409 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); in kern_program_rcvarray()
1458 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() local
1482 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { in hfi1_kern_exp_rcv_setup()
1483 hfi1_wait_kmem(flow->req->qp); in hfi1_kern_exp_rcv_setup()
1488 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1496 if (kern_alloc_tids(flow)) in hfi1_kern_exp_rcv_setup()
1502 kern_program_rcvarray(flow); in hfi1_kern_exp_rcv_setup()
1512 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); in hfi1_kern_exp_rcv_setup()
1513 flow->idx = qpriv->flow_state.index; in hfi1_kern_exp_rcv_setup()
1514 flow->flow_state.generation = qpriv->flow_state.generation; in hfi1_kern_exp_rcv_setup()
1515 flow->flow_state.spsn = qpriv->flow_state.psn; in hfi1_kern_exp_rcv_setup()
1516 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; in hfi1_kern_exp_rcv_setup()
1517 flow->flow_state.r_next_psn = in hfi1_kern_exp_rcv_setup()
1518 full_flow_psn(flow, flow->flow_state.spsn); in hfi1_kern_exp_rcv_setup()
1519 qpriv->flow_state.psn += flow->npkts; in hfi1_kern_exp_rcv_setup()
1521 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1530 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1535 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow) in hfi1_tid_rdma_reset_flow() argument
1537 flow->npagesets = 0; in hfi1_tid_rdma_reset_flow()
1549 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() local
1562 for (i = 0; i < flow->tnode_cnt; i++) in hfi1_kern_exp_rcv_clear()
1563 kern_unprogram_rcv_group(flow, i); in hfi1_kern_exp_rcv_clear()
1565 flow->tnode_cnt = 0; in hfi1_kern_exp_rcv_clear()
1570 dma_unmap_flow(flow); in hfi1_kern_exp_rcv_clear()
1572 hfi1_tid_rdma_reset_flow(flow); in hfi1_kern_exp_rcv_clear()
1678 struct tid_rdma_flow *flow; in find_flow_ib() local
1684 flow = &req->flows[tail]; in find_flow_ib()
1685 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 && in find_flow_ib()
1686 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) { in find_flow_ib()
1689 return flow; in find_flow_ib()
1701 struct tid_rdma_flow *flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_packet() local
1711 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); in hfi1_build_tid_rdma_read_packet()
1712 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_read_packet()
1715 req_addr = &flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_packet()
1716 req_len = sizeof(*flow->tid_entry) * in hfi1_build_tid_rdma_read_packet()
1717 (flow->tidcnt - flow->tid_idx); in hfi1_build_tid_rdma_read_packet()
1742 req->cur_seg * req->seg_len + flow->sent); in hfi1_build_tid_rdma_read_packet()
1746 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_read_packet()
1748 ((flow->flow_state.spsn + flow->pkt) & in hfi1_build_tid_rdma_read_packet()
1752 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_read_packet()
1762 flow->sent += *len; in hfi1_build_tid_rdma_read_packet()
1787 struct tid_rdma_flow *flow = NULL; in hfi1_build_tid_rdma_read_req() local
1857 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_read_req()
1858 flow->pkt = 0; in hfi1_build_tid_rdma_read_req()
1859 flow->tid_idx = 0; in hfi1_build_tid_rdma_read_req()
1860 flow->sent = 0; in hfi1_build_tid_rdma_read_req()
1863 flow->flow_state.ib_spsn = req->s_next_psn; in hfi1_build_tid_rdma_read_req()
1864 flow->flow_state.ib_lpsn = in hfi1_build_tid_rdma_read_req()
1865 flow->flow_state.ib_spsn + flow->npkts - 1; in hfi1_build_tid_rdma_read_req()
1869 req->s_next_psn += flow->npkts; in hfi1_build_tid_rdma_read_req()
1890 struct tid_rdma_flow *flow; in tid_rdma_rcv_read_request() local
1896 flow = &req->flows[req->setup_head]; in tid_rdma_rcv_read_request()
1900 if (pktlen > sizeof(flow->tid_entry)) in tid_rdma_rcv_read_request()
1902 memcpy(flow->tid_entry, packet->ebuf, pktlen); in tid_rdma_rcv_read_request()
1903 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in tid_rdma_rcv_read_request()
1909 flow->npkts = rvt_div_round_up_mtu(qp, len); in tid_rdma_rcv_read_request()
1910 for (i = 0; i < flow->tidcnt; i++) { in tid_rdma_rcv_read_request()
1912 flow->tid_entry[i]); in tid_rdma_rcv_read_request()
1913 tlen = EXP_TID_GET(flow->tid_entry[i], LEN); in tid_rdma_rcv_read_request()
1930 flow->pkt = 0; in tid_rdma_rcv_read_request()
1931 flow->tid_idx = 0; in tid_rdma_rcv_read_request()
1932 flow->tid_offset = 0; in tid_rdma_rcv_read_request()
1933 flow->sent = 0; in tid_rdma_rcv_read_request()
1934 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp); in tid_rdma_rcv_read_request()
1935 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in tid_rdma_rcv_read_request()
1938 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in tid_rdma_rcv_read_request()
1939 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in tid_rdma_rcv_read_request()
1940 flow->length = len; in tid_rdma_rcv_read_request()
1942 flow->flow_state.lpsn = flow->flow_state.spsn + in tid_rdma_rcv_read_request()
1943 flow->npkts - 1; in tid_rdma_rcv_read_request()
1944 flow->flow_state.ib_spsn = psn; in tid_rdma_rcv_read_request()
1945 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; in tid_rdma_rcv_read_request()
1947 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); in tid_rdma_rcv_read_request()
1959 e->lpsn = psn + flow->npkts - 1; in tid_rdma_rcv_read_request()
2344 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_read_resp() local
2345 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_read_resp()
2353 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_read_resp()
2354 flow->sent += *len; in hfi1_build_tid_rdma_read_resp()
2355 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_read_resp()
2356 last_pkt = (flow->sent >= flow->length); in hfi1_build_tid_rdma_read_resp()
2358 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_read_resp()
2359 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_read_resp()
2373 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_read_resp()
2379 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn + in hfi1_build_tid_rdma_read_resp()
2380 flow->pkt)); in hfi1_build_tid_rdma_read_resp()
2383 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_read_resp()
2384 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_read_resp()
2386 (flow->flow_state.generation << in hfi1_build_tid_rdma_read_resp()
2395 flow->tid_offset = 0; in hfi1_build_tid_rdma_read_resp()
2396 flow->tid_idx++; in hfi1_build_tid_rdma_read_resp()
2398 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_read_resp()
2449 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_read_resp() local
2467 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_read_resp()
2469 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) { in hfi1_rc_rcv_tid_rdma_read_resp()
2470 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_read_resp()
2472 if (cmp_psn(kpsn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_read_resp()
2474 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2505 flow->flow_state.r_next_psn = mask_psn(kpsn + 1); in hfi1_rc_rcv_tid_rdma_read_resp()
2524 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); in hfi1_rc_rcv_tid_rdma_read_resp()
2626 struct tid_rdma_flow *flow; in restart_tid_rdma_read_req() local
2631 flow = &req->flows[req->clear_tail]; in restart_tid_rdma_read_req()
2632 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); in restart_tid_rdma_read_req()
2658 struct tid_rdma_flow *flow; in handle_read_kdeth_eflags() local
2749 flow = &req->flows[req->clear_tail]; in handle_read_kdeth_eflags()
2752 flow); in handle_read_kdeth_eflags()
2755 flow->flow_state.r_next_psn); in handle_read_kdeth_eflags()
2778 fpsn = full_flow_psn(flow, in handle_read_kdeth_eflags()
2779 flow->flow_state.lpsn); in handle_read_kdeth_eflags()
2786 flow->flow_state.r_next_psn = in handle_read_kdeth_eflags()
2792 flow->idx); in handle_read_kdeth_eflags()
2793 flow->flow_state.r_next_psn = last_psn; in handle_read_kdeth_eflags()
2858 struct tid_rdma_flow *flow; in hfi1_handle_kdeth_eflags() local
2937 flow = &req->flows[req->clear_tail]; in hfi1_handle_kdeth_eflags()
2943 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow); in hfi1_handle_kdeth_eflags()
2951 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2953 flow->idx); in hfi1_handle_kdeth_eflags()
2955 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
2967 flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
2979 if (psn == full_flow_psn(flow, in hfi1_handle_kdeth_eflags()
2980 flow->flow_state.lpsn)) in hfi1_handle_kdeth_eflags()
2982 flow->flow_state.r_next_psn = in hfi1_handle_kdeth_eflags()
2985 flow->flow_state.r_next_psn; in hfi1_handle_kdeth_eflags()
3025 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); in hfi1_handle_kdeth_eflags()
3041 struct tid_rdma_flow *flow; in hfi1_tid_rdma_restart_req() local
3049 flow = find_flow_ib(req, *bth2, &fidx); in hfi1_tid_rdma_restart_req()
3050 if (!flow) { in hfi1_tid_rdma_restart_req()
3061 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req()
3066 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn); in hfi1_tid_rdma_restart_req()
3069 full_flow_psn(flow, in hfi1_tid_rdma_restart_req()
3070 flow->flow_state.spsn)); in hfi1_tid_rdma_restart_req()
3072 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3073 diff = delta_pkts + flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3075 flow->sent = 0; in hfi1_tid_rdma_restart_req()
3076 flow->pkt = 0; in hfi1_tid_rdma_restart_req()
3077 flow->tid_idx = 0; in hfi1_tid_rdma_restart_req()
3078 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3080 for (tididx = 0; tididx < flow->tidcnt; tididx++) { in hfi1_tid_rdma_restart_req()
3081 u32 tidentry = flow->tid_entry[tididx], tidlen, in hfi1_tid_rdma_restart_req()
3084 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3088 flow->pkt += npkts; in hfi1_tid_rdma_restart_req()
3089 flow->sent += (npkts == tidnpkts ? tidlen : in hfi1_tid_rdma_restart_req()
3091 flow->tid_offset += npkts * qp->pmtu; in hfi1_tid_rdma_restart_req()
3099 flow->sent, 0); in hfi1_tid_rdma_restart_req()
3107 flow->pkt -= flow->resync_npkts; in hfi1_tid_rdma_restart_req()
3110 if (flow->tid_offset == in hfi1_tid_rdma_restart_req()
3111 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { in hfi1_tid_rdma_restart_req()
3113 flow->tid_offset = 0; in hfi1_tid_rdma_restart_req()
3115 flow->tid_idx = tididx; in hfi1_tid_rdma_restart_req()
3122 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3831 struct tid_rdma_flow *flow = NULL; in hfi1_build_tid_rdma_write_resp() local
3840 flow = &req->flows[req->flow_idx]; in hfi1_build_tid_rdma_write_resp()
3861 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3868 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3876 flow->flow_state.resp_ib_psn = bth2; in hfi1_build_tid_rdma_write_resp()
3877 resp_addr = (void *)flow->tid_entry; in hfi1_build_tid_rdma_write_resp()
3878 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt; in hfi1_build_tid_rdma_write_resp()
3908 cpu_to_be32((flow->flow_state.generation << in hfi1_build_tid_rdma_write_resp()
3910 (flow->flow_state.spsn & in hfi1_build_tid_rdma_write_resp()
3914 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_resp()
4034 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_write_resp() local
4094 flow = &req->flows[req->setup_head]; in hfi1_rc_rcv_tid_rdma_write_resp()
4095 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4096 flow->tid_idx = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4097 flow->tid_offset = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4098 flow->sent = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4099 flow->resync_npkts = 0; in hfi1_rc_rcv_tid_rdma_write_resp()
4100 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp); in hfi1_rc_rcv_tid_rdma_write_resp()
4101 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & in hfi1_rc_rcv_tid_rdma_write_resp()
4104 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; in hfi1_rc_rcv_tid_rdma_write_resp()
4105 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_resp()
4106 flow->flow_state.resp_ib_psn = psn; in hfi1_rc_rcv_tid_rdma_write_resp()
4107 flow->length = min_t(u32, req->seg_len, in hfi1_rc_rcv_tid_rdma_write_resp()
4110 flow->npkts = rvt_div_round_up_mtu(qp, flow->length); in hfi1_rc_rcv_tid_rdma_write_resp()
4111 flow->flow_state.lpsn = flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_write_resp()
4112 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_write_resp()
4115 if (pktlen > sizeof(flow->tid_entry)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4119 memcpy(flow->tid_entry, packet->ebuf, pktlen); in hfi1_rc_rcv_tid_rdma_write_resp()
4120 flow->tidcnt = pktlen / sizeof(*flow->tid_entry); in hfi1_rc_rcv_tid_rdma_write_resp()
4121 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow); in hfi1_rc_rcv_tid_rdma_write_resp()
4129 for (i = 0; i < flow->tidcnt; i++) { in hfi1_rc_rcv_tid_rdma_write_resp()
4131 qp, i, flow->tid_entry[i]); in hfi1_rc_rcv_tid_rdma_write_resp()
4132 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) { in hfi1_rc_rcv_tid_rdma_write_resp()
4136 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN); in hfi1_rc_rcv_tid_rdma_write_resp()
4138 if (tidlen * PAGE_SIZE < flow->length) { in hfi1_rc_rcv_tid_rdma_write_resp()
4198 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_build_tid_rdma_packet() local
4202 u32 tidentry = flow->tid_entry[flow->tid_idx]; in hfi1_build_tid_rdma_packet()
4213 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_packet()
4214 flow->sent += *len; in hfi1_build_tid_rdma_packet()
4215 next_offset = flow->tid_offset + *len; in hfi1_build_tid_rdma_packet()
4216 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) && in hfi1_build_tid_rdma_packet()
4217 next_offset >= tidlen) || (flow->sent >= flow->length); in hfi1_build_tid_rdma_packet()
4218 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_packet()
4219 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_packet()
4229 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om); in hfi1_build_tid_rdma_packet()
4234 *bth1 = flow->tid_qpn; in hfi1_build_tid_rdma_packet()
4235 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & in hfi1_build_tid_rdma_packet()
4237 (flow->flow_state.generation << in hfi1_build_tid_rdma_packet()
4241 if (flow->flow_state.lpsn + 1 + in hfi1_build_tid_rdma_packet()
4249 flow->tid_offset = 0; in hfi1_build_tid_rdma_packet()
4250 flow->tid_idx++; in hfi1_build_tid_rdma_packet()
4252 flow->tid_offset = next_offset; in hfi1_build_tid_rdma_packet()
4265 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_write_data() local
4283 flow = &req->flows[req->clear_tail]; in hfi1_rc_rcv_tid_rdma_write_data()
4284 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { in hfi1_rc_rcv_tid_rdma_write_data()
4285 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_write_data()
4287 if (cmp_psn(psn, flow->flow_state.r_next_psn)) in hfi1_rc_rcv_tid_rdma_write_data()
4290 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4312 full_flow_psn(flow, flow->flow_state.spsn)) * in hfi1_rc_rcv_tid_rdma_write_data()
4334 flow->flow_state.r_next_psn = mask_psn(psn + 1); in hfi1_rc_rcv_tid_rdma_write_data()
4337 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_data()
4393 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4402 priv->s_nak_psn = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_write_data()
4421 struct tid_rdma_flow *flow = &req->flows[iflow]; in hfi1_build_tid_rdma_write_ack() local
4442 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_build_tid_rdma_write_ack()
4448 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << in hfi1_build_tid_rdma_write_ack()
4454 cpu_to_be32(flow->flow_state.resp_ib_psn); in hfi1_build_tid_rdma_write_ack()
4492 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_ack() local
4533 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4534 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4537 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || in hfi1_rc_rcv_tid_rdma_ack()
4538 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) in hfi1_rc_rcv_tid_rdma_ack()
4542 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 && in hfi1_rc_rcv_tid_rdma_ack()
4547 req->r_last_acked = flow->flow_state.resp_ib_psn; in hfi1_rc_rcv_tid_rdma_ack()
4563 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4564 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4625 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4635 fpsn = full_flow_psn(flow, flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_ack()
4643 if (flow->flow_state.generation != in hfi1_rc_rcv_tid_rdma_ack()
4646 flow->resync_npkts += in hfi1_rc_rcv_tid_rdma_ack()
4663 flow = &rptr->flows[fidx]; in hfi1_rc_rcv_tid_rdma_ack()
4664 gen = flow->flow_state.generation; in hfi1_rc_rcv_tid_rdma_ack()
4666 flow->flow_state.spsn != in hfi1_rc_rcv_tid_rdma_ack()
4669 lpsn = flow->flow_state.lpsn; in hfi1_rc_rcv_tid_rdma_ack()
4670 lpsn = full_flow_psn(flow, lpsn); in hfi1_rc_rcv_tid_rdma_ack()
4671 flow->npkts = in hfi1_rc_rcv_tid_rdma_ack()
4675 flow->flow_state.generation = in hfi1_rc_rcv_tid_rdma_ack()
4677 flow->flow_state.spsn = spsn; in hfi1_rc_rcv_tid_rdma_ack()
4678 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_ack()
4679 flow->flow_state.spsn + in hfi1_rc_rcv_tid_rdma_ack()
4680 flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_ack()
4681 flow->pkt = 0; in hfi1_rc_rcv_tid_rdma_ack()
4682 spsn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4683 resync_psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_ack()
4686 flow); in hfi1_rc_rcv_tid_rdma_ack()
4711 flow = &req->flows[req->acked_tail]; in hfi1_rc_rcv_tid_rdma_ack()
4712 flpsn = full_flow_psn(flow, flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_ack()
4716 flow); in hfi1_rc_rcv_tid_rdma_ack()
4838 struct tid_rdma_flow *flow = &req->flows[fidx]; in hfi1_build_tid_rdma_resync() local
4848 generation = kern_flow_generation_next(flow->flow_state.generation); in hfi1_build_tid_rdma_resync()
4866 struct tid_rdma_flow *flow; in hfi1_rc_rcv_tid_rdma_resync() local
4935 flow = &req->flows[flow_idx]; in hfi1_rc_rcv_tid_rdma_resync()
4936 lpsn = full_flow_psn(flow, in hfi1_rc_rcv_tid_rdma_resync()
4937 flow->flow_state.lpsn); in hfi1_rc_rcv_tid_rdma_resync()
4938 next = flow->flow_state.r_next_psn; in hfi1_rc_rcv_tid_rdma_resync()
4939 flow->npkts = delta_psn(lpsn, next - 1); in hfi1_rc_rcv_tid_rdma_resync()
4940 flow->flow_state.generation = fs->generation; in hfi1_rc_rcv_tid_rdma_resync()
4941 flow->flow_state.spsn = fs->psn; in hfi1_rc_rcv_tid_rdma_resync()
4942 flow->flow_state.lpsn = in hfi1_rc_rcv_tid_rdma_resync()
4943 flow->flow_state.spsn + flow->npkts - 1; in hfi1_rc_rcv_tid_rdma_resync()
4944 flow->flow_state.r_next_psn = in hfi1_rc_rcv_tid_rdma_resync()
4945 full_flow_psn(flow, in hfi1_rc_rcv_tid_rdma_resync()
4946 flow->flow_state.spsn); in hfi1_rc_rcv_tid_rdma_resync()
4947 fs->psn += flow->npkts; in hfi1_rc_rcv_tid_rdma_resync()
4949 flow); in hfi1_rc_rcv_tid_rdma_resync()
5185 u16 flow; in make_tid_rdma_ack() local
5249 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS); in make_tid_rdma_ack()
5277 full_flow_psn(&req->flows[flow], in make_tid_rdma_ack()
5278 req->flows[flow].flow_state.lpsn)) > 0))) { in make_tid_rdma_ack()
5288 flow = req->acked_tail; in make_tid_rdma_ack()
5296 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1, in make_tid_rdma_ack()
5506 struct tid_rdma_flow *flow, in update_r_next_psn_fecn() argument
5517 flow->flow_state.r_next_psn = in update_r_next_psn_fecn()
5518 read_r_next_psn(dd, rcd->ctxt, flow->idx); in update_r_next_psn_fecn()