• Home
  • Raw
  • Download

Lines Matching full:req

79 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
82 static void user_sdma_free_request(struct user_sdma_request *req);
83 static int check_header_template(struct user_sdma_request *req,
86 static int set_txreq_header(struct user_sdma_request *req,
88 static int set_txreq_header_ahg(struct user_sdma_request *req,
120 static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
357 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local
364 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request()
369 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); in hfi1_user_sdma_process_request()
419 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request()
420 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ in hfi1_user_sdma_process_request()
421 req->data_len = 0; in hfi1_user_sdma_process_request()
422 req->pq = pq; in hfi1_user_sdma_process_request()
423 req->cq = cq; in hfi1_user_sdma_process_request()
424 req->ahg_idx = -1; in hfi1_user_sdma_process_request()
425 req->iov_idx = 0; in hfi1_user_sdma_process_request()
426 req->sent = 0; in hfi1_user_sdma_process_request()
427 req->seqnum = 0; in hfi1_user_sdma_process_request()
428 req->seqcomp = 0; in hfi1_user_sdma_process_request()
429 req->seqsubmitted = 0; in hfi1_user_sdma_process_request()
430 req->tids = NULL; in hfi1_user_sdma_process_request()
431 req->has_error = 0; in hfi1_user_sdma_process_request()
432 INIT_LIST_HEAD(&req->txps); in hfi1_user_sdma_process_request()
434 memcpy(&req->info, &info, sizeof(info)); in hfi1_user_sdma_process_request()
441 if (req->data_iovs < 2) { in hfi1_user_sdma_process_request()
442 SDMA_DBG(req, in hfi1_user_sdma_process_request()
447 req->data_iovs--; in hfi1_user_sdma_process_request()
450 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) { in hfi1_user_sdma_process_request()
451 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs, in hfi1_user_sdma_process_request()
458 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info), in hfi1_user_sdma_process_request()
459 sizeof(req->hdr)); in hfi1_user_sdma_process_request()
461 SDMA_DBG(req, "Failed to copy header template (%d)", ret); in hfi1_user_sdma_process_request()
468 req->hdr.pbc[2] = 0; in hfi1_user_sdma_process_request()
471 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff; in hfi1_user_sdma_process_request()
474 SDMA_DBG(req, "Invalid opcode (%d)", opcode); in hfi1_user_sdma_process_request()
483 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF; in hfi1_user_sdma_process_request()
484 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) | in hfi1_user_sdma_process_request()
485 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4)); in hfi1_user_sdma_process_request()
488 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl); in hfi1_user_sdma_process_request()
494 pkey = (u16)be32_to_cpu(req->hdr.bth[0]); in hfi1_user_sdma_process_request()
495 slid = be16_to_cpu(req->hdr.lrh[3]); in hfi1_user_sdma_process_request()
506 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) { in hfi1_user_sdma_process_request()
507 SDMA_DBG(req, "User tried to pass in a GRH"); in hfi1_user_sdma_process_request()
512 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]); in hfi1_user_sdma_process_request()
517 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) * in hfi1_user_sdma_process_request()
518 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? in hfi1_user_sdma_process_request()
521 info.comp_idx, req->tidoffset); in hfi1_user_sdma_process_request()
525 for (i = 0; i < req->data_iovs; i++) { in hfi1_user_sdma_process_request()
526 req->iovs[i].offset = 0; in hfi1_user_sdma_process_request()
527 INIT_LIST_HEAD(&req->iovs[i].list); in hfi1_user_sdma_process_request()
528 memcpy(&req->iovs[i].iov, in hfi1_user_sdma_process_request()
530 sizeof(req->iovs[i].iov)); in hfi1_user_sdma_process_request()
531 if (req->iovs[i].iov.iov_len == 0) { in hfi1_user_sdma_process_request()
535 req->data_len += req->iovs[i].iov.iov_len; in hfi1_user_sdma_process_request()
538 info.comp_idx, req->data_len); in hfi1_user_sdma_process_request()
539 if (pcount > req->info.npkts) in hfi1_user_sdma_process_request()
540 pcount = req->info.npkts; in hfi1_user_sdma_process_request()
549 if (req_opcode(req->info.ctrl) == EXPECTED) { in hfi1_user_sdma_process_request()
550 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids); in hfi1_user_sdma_process_request()
565 ntids * sizeof(*req->tids)); in hfi1_user_sdma_process_request()
568 SDMA_DBG(req, "Failed to copy %d TIDs (%d)", in hfi1_user_sdma_process_request()
572 req->tids = tmp; in hfi1_user_sdma_process_request()
573 req->n_tids = ntids; in hfi1_user_sdma_process_request()
574 req->tididx = 0; in hfi1_user_sdma_process_request()
578 dlid = be16_to_cpu(req->hdr.lrh[1]); in hfi1_user_sdma_process_request()
581 req->sde = sdma_select_user_engine(dd, selector, vl); in hfi1_user_sdma_process_request()
583 if (!req->sde || !sdma_running(req->sde)) { in hfi1_user_sdma_process_request()
589 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) in hfi1_user_sdma_process_request()
590 req->ahg_idx = sdma_ahg_alloc(req->sde); in hfi1_user_sdma_process_request()
601 while (req->seqsubmitted != req->info.npkts) { in hfi1_user_sdma_process_request()
602 ret = user_sdma_send_pkts(req, pcount); in hfi1_user_sdma_process_request()
626 if (req->seqsubmitted < req->info.npkts) { in hfi1_user_sdma_process_request()
627 if (req->seqsubmitted) in hfi1_user_sdma_process_request()
629 (req->seqcomp == req->seqsubmitted - 1)); in hfi1_user_sdma_process_request()
630 user_sdma_free_request(req); in hfi1_user_sdma_process_request()
637 static inline u32 compute_data_length(struct user_sdma_request *req, in compute_data_length() argument
654 if (!req->seqnum) { in compute_data_length()
655 if (req->data_len < sizeof(u32)) in compute_data_length()
656 len = req->data_len; in compute_data_length()
658 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) - in compute_data_length()
660 } else if (req_opcode(req->info.ctrl) == EXPECTED) { in compute_data_length()
661 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * in compute_data_length()
667 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize); in compute_data_length()
669 if (unlikely(!len) && ++req->tididx < req->n_tids && in compute_data_length()
670 req->tids[req->tididx]) { in compute_data_length()
671 tidlen = EXP_TID_GET(req->tids[req->tididx], in compute_data_length()
673 req->tidoffset = 0; in compute_data_length()
674 len = min_t(u32, tidlen, req->info.fragsize); in compute_data_length()
681 len = min(len, req->data_len - req->sent); in compute_data_length()
683 len = min(req->data_len - req->sent, (u32)req->info.fragsize); in compute_data_length()
685 trace_hfi1_sdma_user_compute_length(req->pq->dd, in compute_data_length()
686 req->pq->ctxt, in compute_data_length()
687 req->pq->subctxt, in compute_data_length()
688 req->info.comp_idx, in compute_data_length()
706 static int user_sdma_txadd_ahg(struct user_sdma_request *req, in user_sdma_txadd_ahg() argument
711 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]); in user_sdma_txadd_ahg()
712 u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen)); in user_sdma_txadd_ahg()
713 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd_ahg()
723 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg()
728 ret = check_header_template(req, &tx->hdr, lrhlen, datalen); in user_sdma_txadd_ahg()
732 sizeof(tx->hdr) + datalen, req->ahg_idx, in user_sdma_txadd_ahg()
742 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) in user_sdma_send_pkts() argument
751 if (!req->pq) in user_sdma_send_pkts()
754 pq = req->pq; in user_sdma_send_pkts()
757 if (READ_ONCE(req->has_error)) in user_sdma_send_pkts()
763 if (unlikely(req->seqnum == req->info.npkts)) { in user_sdma_send_pkts()
764 if (!list_empty(&req->txps)) in user_sdma_send_pkts()
769 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum) in user_sdma_send_pkts()
770 maxpkts = req->info.npkts - req->seqnum; in user_sdma_send_pkts()
780 if (READ_ONCE(req->has_error)) in user_sdma_send_pkts()
788 tx->req = req; in user_sdma_send_pkts()
795 if (req->seqnum == req->info.npkts - 1) in user_sdma_send_pkts()
804 if (req->data_len) { in user_sdma_send_pkts()
805 iovec = &req->iovs[req->iov_idx]; in user_sdma_send_pkts()
807 if (++req->iov_idx == req->data_iovs) { in user_sdma_send_pkts()
811 iovec = &req->iovs[req->iov_idx]; in user_sdma_send_pkts()
815 datalen = compute_data_length(req, tx); in user_sdma_send_pkts()
826 SDMA_DBG(req, in user_sdma_send_pkts()
835 if (req->ahg_idx >= 0) { in user_sdma_send_pkts()
836 if (!req->seqnum) { in user_sdma_send_pkts()
837 ret = user_sdma_txadd_ahg(req, tx, datalen); in user_sdma_send_pkts()
843 changes = set_txreq_header_ahg(req, tx, in user_sdma_send_pkts()
851 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts()
861 ret = set_txreq_header(req, tx, datalen); in user_sdma_send_pkts()
866 req->koffset += datalen; in user_sdma_send_pkts()
867 if (req_opcode(req->info.ctrl) == EXPECTED) in user_sdma_send_pkts()
868 req->tidoffset += datalen; in user_sdma_send_pkts()
869 req->sent += datalen; in user_sdma_send_pkts()
871 ret = add_system_pages_to_sdma_packet(req, tx, iovec, in user_sdma_send_pkts()
875 iovec = &req->iovs[req->iov_idx]; in user_sdma_send_pkts()
877 list_add_tail(&tx->txreq.list, &req->txps); in user_sdma_send_pkts()
883 tx->seqnum = req->seqnum++; in user_sdma_send_pkts()
887 ret = sdma_send_txlist(req->sde, in user_sdma_send_pkts()
889 &req->txps, &count); in user_sdma_send_pkts()
890 req->seqsubmitted += count; in user_sdma_send_pkts()
891 if (req->seqsubmitted == req->info.npkts) { in user_sdma_send_pkts()
898 if (req->ahg_idx >= 0) in user_sdma_send_pkts()
899 sdma_ahg_free(req->sde, req->ahg_idx); in user_sdma_send_pkts()
921 static int check_header_template(struct user_sdma_request *req, in check_header_template() argument
935 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 || in check_header_template()
936 lrhlen > get_lrh_len(*hdr, req->info.fragsize)) in check_header_template()
939 if (req_opcode(req->info.ctrl) == EXPECTED) { in check_header_template()
946 u32 tidval = req->tids[req->tididx], in check_header_template()
954 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? in check_header_template()
991 static int set_txreq_header(struct user_sdma_request *req, in set_txreq_header() argument
994 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header()
1002 memcpy(hdr, &req->hdr, sizeof(*hdr)); in set_txreq_header()
1019 if (unlikely(req->seqnum == 2)) { in set_txreq_header()
1027 req->hdr.pbc[0] = hdr->pbc[0]; in set_txreq_header()
1028 req->hdr.lrh[2] = hdr->lrh[2]; in set_txreq_header()
1036 if (unlikely(!req->seqnum)) { in set_txreq_header()
1037 ret = check_header_template(req, hdr, lrhlen, datalen); in set_txreq_header()
1045 (req_opcode(req->info.ctrl) == EXPECTED), in set_txreq_header()
1046 req->seqnum)); in set_txreq_header()
1053 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset); in set_txreq_header()
1055 if (req_opcode(req->info.ctrl) == EXPECTED) { in set_txreq_header()
1056 tidval = req->tids[req->tididx]; in set_txreq_header()
1061 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * in set_txreq_header()
1063 req->tidoffset = 0; in set_txreq_header()
1068 if (++req->tididx > req->n_tids - 1 || in set_txreq_header()
1069 !req->tids[req->tididx]) { in set_txreq_header()
1072 tidval = req->tids[req->tididx]; in set_txreq_header()
1091 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, in set_txreq_header()
1092 req->tidoffset, req->tidoffset >> omfactor, in set_txreq_header()
1095 req->tidoffset >> omfactor); in set_txreq_header()
1101 req->info.comp_idx, hdr, tidval); in set_txreq_header()
1105 static int set_txreq_header_ahg(struct user_sdma_request *req, in set_txreq_header_ahg() argument
1111 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header_ahg()
1112 struct hfi1_pkt_header *hdr = &req->hdr; in set_txreq_header_ahg()
1134 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) & in set_txreq_header_ahg()
1148 (__force u16)cpu_to_le16(req->koffset & 0xffff)); in set_txreq_header_ahg()
1152 (__force u16)cpu_to_le16(req->koffset >> 16)); in set_txreq_header_ahg()
1155 if (req_opcode(req->info.ctrl) == EXPECTED) { in set_txreq_header_ahg()
1158 tidval = req->tids[req->tididx]; in set_txreq_header_ahg()
1164 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * in set_txreq_header_ahg()
1166 req->tidoffset = 0; in set_txreq_header_ahg()
1171 if (++req->tididx > req->n_tids - 1 || in set_txreq_header_ahg()
1172 !req->tids[req->tididx]) in set_txreq_header_ahg()
1174 tidval = req->tids[req->tididx]; in set_txreq_header_ahg()
1184 ((req->tidoffset >> omfactor) in set_txreq_header_ahg()
1211 req->info.comp_idx, req->sde->this_idx, in set_txreq_header_ahg()
1212 req->ahg_idx, ahg, idx, tidval); in set_txreq_header_ahg()
1215 datalen, req->ahg_idx, idx, in set_txreq_header_ahg()
1216 ahg, sizeof(req->hdr), in set_txreq_header_ahg()
1236 struct user_sdma_request *req; in user_sdma_txreq_cb() local
1241 if (!tx->req) in user_sdma_txreq_cb()
1244 req = tx->req; in user_sdma_txreq_cb()
1245 pq = req->pq; in user_sdma_txreq_cb()
1246 cq = req->cq; in user_sdma_txreq_cb()
1249 SDMA_DBG(req, "SDMA completion with error %d", in user_sdma_txreq_cb()
1251 WRITE_ONCE(req->has_error, 1); in user_sdma_txreq_cb()
1255 req->seqcomp = tx->seqnum; in user_sdma_txreq_cb()
1259 if (req->seqcomp != req->info.npkts - 1) in user_sdma_txreq_cb()
1262 user_sdma_free_request(req); in user_sdma_txreq_cb()
1263 set_comp_state(pq, cq, req->info.comp_idx, state, status); in user_sdma_txreq_cb()
1273 static void user_sdma_free_request(struct user_sdma_request *req) in user_sdma_free_request() argument
1275 if (!list_empty(&req->txps)) { in user_sdma_free_request()
1278 list_for_each_entry_safe(t, p, &req->txps, list) { in user_sdma_free_request()
1282 sdma_txclean(req->pq->dd, t); in user_sdma_free_request()
1283 kmem_cache_free(req->pq->txreq_cache, tx); in user_sdma_free_request()
1287 kfree(req->tids); in user_sdma_free_request()
1288 clear_bit(req->info.comp_idx, req->pq->req_in_use); in user_sdma_free_request()
1355 static int pin_system_pages(struct user_sdma_request *req, in pin_system_pages() argument
1359 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_system_pages()
1370 SDMA_DBG(req, "Evicting: nlocked %u npages %u", in pin_system_pages()
1377 SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u", in pin_system_pages()
1384 SDMA_DBG(req, "pinned %d", pinned); in pin_system_pages()
1389 SDMA_DBG(req, "npages %u pinned %d", npages, pinned); in pin_system_pages()
1397 SDMA_DBG(req, "done. pinned %d", pinned); in pin_system_pages()
1401 static int add_system_pinning(struct user_sdma_request *req, in add_system_pinning() argument
1406 struct hfi1_user_sdma_pkt_q *pq = req->pq; in add_system_pinning()
1415 ret = pin_system_pages(req, start, len, node, PFN_DOWN(len)); in add_system_pinning()
1430 static int get_system_cache_entry(struct user_sdma_request *req, in get_system_cache_entry() argument
1434 struct hfi1_user_sdma_pkt_q *pq = req->pq; in get_system_cache_entry()
1441 SDMA_DBG(req, in get_system_cache_entry()
1447 SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len); in get_system_cache_entry()
1454 SDMA_DBG(req, "node %p start %llx end %llu", node, start, end); in get_system_cache_entry()
1456 ret = add_system_pinning(req, node_p, start, in get_system_cache_entry()
1477 SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d", in get_system_cache_entry()
1488 ret = add_system_pinning(req, node_p, start, prepend_len); in get_system_cache_entry()
1497 static int add_mapping_to_sdma_packet(struct user_sdma_request *req, in add_mapping_to_sdma_packet() argument
1503 struct hfi1_user_sdma_pkt_q *pq = req->pq; in add_mapping_to_sdma_packet()
1519 SDMA_DBG(req, in add_mapping_to_sdma_packet()
1548 SDMA_DBG(req, in add_mapping_to_sdma_packet()
1559 static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req, in add_system_iovec_to_sdma_packet() argument
1564 struct mmu_rb_handler *handler = req->pq->handler; in add_system_iovec_to_sdma_packet()
1573 ret = get_system_cache_entry(req, &cache_entry, start, in add_system_iovec_to_sdma_packet()
1576 SDMA_DBG(req, "pin system segment failed %d", ret); in add_system_iovec_to_sdma_packet()
1584 ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start, in add_system_iovec_to_sdma_packet()
1595 SDMA_DBG(req, "add system segment failed %d", ret); in add_system_iovec_to_sdma_packet()
1606 static int add_system_pages_to_sdma_packet(struct user_sdma_request *req, in add_system_pages_to_sdma_packet() argument
1629 req->iov_idx++; in add_system_pages_to_sdma_packet()
1633 ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec, in add_system_pages_to_sdma_packet()