Home
last modified time | relevance | path

Searched refs:sg (Results 1 – 25 of 45) sorted by relevance

12

/net/core/
Dskmsg.c14 if (msg->sg.end > msg->sg.start && in sk_msg_try_coalesce_ok()
15 elem_first_coalesce < msg->sg.end) in sk_msg_try_coalesce_ok()
18 if (msg->sg.end < msg->sg.start && in sk_msg_try_coalesce_ok()
19 (elem_first_coalesce > msg->sg.start || in sk_msg_try_coalesce_ok()
20 elem_first_coalesce < msg->sg.end)) in sk_msg_try_coalesce_ok()
30 u32 osize = msg->sg.size; in sk_msg_alloc()
33 len -= msg->sg.size; in sk_msg_alloc()
51 i = msg->sg.end; in sk_msg_alloc()
53 sge = &msg->sg.data[i]; in sk_msg_alloc()
65 sge = &msg->sg.data[msg->sg.end]; in sk_msg_alloc()
[all …]
/net/mac80211/
Daead_api.c22 struct scatterlist sg[3]; in aead_encrypt() local
35 sg_init_table(sg, 3); in aead_encrypt()
36 sg_set_buf(&sg[0], __aad, aad_len); in aead_encrypt()
37 sg_set_buf(&sg[1], data, data_len); in aead_encrypt()
38 sg_set_buf(&sg[2], mic, mic_len); in aead_encrypt()
41 aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); in aead_encrypt()
42 aead_request_set_ad(aead_req, sg[0].length); in aead_encrypt()
54 struct scatterlist sg[3]; in aead_decrypt() local
70 sg_init_table(sg, 3); in aead_decrypt()
71 sg_set_buf(&sg[0], __aad, aad_len); in aead_decrypt()
[all …]
Daes_gmac.c20 struct scatterlist sg[5]; in ieee80211_aes_gmac() local
41 sg_init_table(sg, 5); in ieee80211_aes_gmac()
42 sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); in ieee80211_aes_gmac()
43 sg_set_buf(&sg[1], zero, 8); in ieee80211_aes_gmac()
44 sg_set_buf(&sg[2], data + 8, data_len - 8 - GMAC_MIC_LEN); in ieee80211_aes_gmac()
45 sg_set_buf(&sg[3], zero, GMAC_MIC_LEN); in ieee80211_aes_gmac()
46 sg_set_buf(&sg[4], mic, GMAC_MIC_LEN); in ieee80211_aes_gmac()
48 sg_init_table(sg, 4); in ieee80211_aes_gmac()
49 sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); in ieee80211_aes_gmac()
50 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); in ieee80211_aes_gmac()
[all …]
/net/sunrpc/auth_gss/
Dgss_krb5_crypto.c63 struct scatterlist sg[1]; in krb5_encrypt() local
80 sg_init_one(sg, out, length); in krb5_encrypt()
84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); in krb5_encrypt()
102 struct scatterlist sg[1]; in krb5_decrypt() local
118 sg_init_one(sg, out, length); in krb5_decrypt()
122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); in krb5_decrypt()
132 checksummer(struct scatterlist *sg, void *data) in checksummer() argument
136 ahash_request_set_crypt(req, sg, NULL, sg->length); in checksummer()
153 struct scatterlist sg[1]; in make_checksum() local
190 sg_init_one(sg, header, hdrlen); in make_checksum()
[all …]
/net/rxrpc/
Drxkad.c188 struct scatterlist sg; in rxkad_prime_packet_security() local
216 sg_init_one(&sg, tmpbuf, tmpsize); in rxkad_prime_packet_security()
219 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); in rxkad_prime_packet_security()
268 struct scatterlist sg; in rxkad_secure_packet_auth() local
289 sg_init_one(&sg, skb->head, 8); in rxkad_secure_packet_auth()
292 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); in rxkad_secure_packet_auth()
312 struct scatterlist sg[16]; in rxkad_secure_packet_encrypt() local
338 sg_init_one(&sg[0], skb->head, sizeof(rxkhdr)); in rxkad_secure_packet_encrypt()
341 skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x); in rxkad_secure_packet_encrypt()
351 sg_init_table(sg, ARRAY_SIZE(sg)); in rxkad_secure_packet_encrypt()
[all …]
/net/tls/
Dtls_sw.c174 struct scatterlist *sg; in tls_decrypt_done() local
215 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { in tls_decrypt_done()
216 if (!sg) in tls_decrypt_done()
218 put_page(sg_page(sg)); in tls_decrypt_done()
321 len = required - msg_pl->sg.size; in tls_clone_plaintext_msg()
326 skip = prot->prepend_size + msg_pl->sg.size; in tls_clone_plaintext_msg()
421 &msg_en->sg.data[msg_en->sg.curr], in tls_tx_records()
457 sge = sk_msg_elem(msg_en, msg_en->sg.curr); in tls_encrypt_done()
528 msg_en->sg.curr = start; in tls_do_encryption()
570 u32 orig_size = msg_opl->sg.size; in tls_split_open_record()
[all …]
Dtls_main.c105 struct scatterlist *sg, in tls_push_sg() argument
115 size = sg->length - offset; in tls_push_sg()
116 offset += sg->offset; in tls_push_sg()
120 if (sg_is_last(sg)) in tls_push_sg()
125 p = sg_page(sg); in tls_push_sg()
136 offset -= sg->offset; in tls_push_sg()
138 ctx->partially_sent_record = (void *)sg; in tls_push_sg()
144 sk_mem_uncharge(sk, sg->length); in tls_push_sg()
145 sg = sg_next(sg); in tls_push_sg()
146 if (!sg) in tls_push_sg()
[all …]
/net/rds/
Dtcp_send.c71 unsigned int hdr_off, unsigned int sg, unsigned int off) in rds_tcp_xmit() argument
115 while (sg < rm->data.op_nents) { in rds_tcp_xmit()
119 sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
120 rm->data.op_sg[sg].offset + off, in rds_tcp_xmit()
121 rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
123 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
124 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
131 if (off == rm->data.op_sg[sg].length) { in rds_tcp_xmit()
133 sg++; in rds_tcp_xmit()
135 if (sg == rm->data.op_nents - 1) in rds_tcp_xmit()
Dmessage.c366 struct scatterlist *sg; in rds_message_zcopy_from_user() local
377 sg = rm->data.op_sg; in rds_message_zcopy_from_user()
410 sg_set_page(sg, pages, copied, start); in rds_message_zcopy_from_user()
412 sg++; in rds_message_zcopy_from_user()
427 struct scatterlist *sg; in rds_message_copy_from_user() local
433 sg = rm->data.op_sg; in rds_message_copy_from_user()
440 if (!sg_page(sg)) { in rds_message_copy_from_user()
441 ret = rds_page_remainder_alloc(sg, iov_iter_count(from), in rds_message_copy_from_user()
450 sg->length - sg_off); in rds_message_copy_from_user()
453 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, in rds_message_copy_from_user()
[all …]
Drdma.c177 struct scatterlist *sg = NULL; in __rds_rdma_map() local
272 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); in __rds_rdma_map()
273 if (!sg) { in __rds_rdma_map()
278 sg_init_table(sg, nents); in __rds_rdma_map()
282 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); in __rds_rdma_map()
291 sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL, in __rds_rdma_map()
301 kfree(sg); in __rds_rdma_map()
328 kfree(sg); in __rds_rdma_map()
765 struct scatterlist *sg; in rds_cmsg_rdma_args() local
767 sg = &op->op_sg[op->op_nents + j]; in rds_cmsg_rdma_args()
[all …]
Dib_frmr.c134 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, in rds_ib_post_reg_frmr()
190 struct scatterlist *sg, unsigned int sg_len) in rds_ib_map_frmr() argument
203 ibmr->sg = sg; in rds_ib_map_frmr()
208 ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
221 unsigned int dma_len = sg_dma_len(&ibmr->sg[i]); in rds_ib_map_frmr()
222 u64 dma_addr = sg_dma_address(&ibmr->sg[i]); in rds_ib_map_frmr()
260 ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
403 struct scatterlist *sg, in rds_ib_reg_frmr() argument
426 ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents); in rds_ib_reg_frmr()
Dib.h330 struct scatterlist *sg; in rds_ib_dma_sync_sg_for_cpu() local
333 for_each_sg(sglist, sg, sg_dma_len, i) { in rds_ib_dma_sync_sg_for_cpu()
334 ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg), in rds_ib_dma_sync_sg_for_cpu()
335 sg_dma_len(sg), direction); in rds_ib_dma_sync_sg_for_cpu()
345 struct scatterlist *sg; in rds_ib_dma_sync_sg_for_device() local
348 for_each_sg(sglist, sg, sg_dma_len, i) { in rds_ib_dma_sync_sg_for_device()
349 ib_dma_sync_single_for_device(dev, sg_dma_address(sg), in rds_ib_dma_sync_sg_for_device()
350 sg_dma_len(sg), direction); in rds_ib_dma_sync_sg_for_device()
429 unsigned int hdr_off, unsigned int sg, unsigned int off);
Dib_mr.h77 struct scatterlist *sg; member
120 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
138 struct scatterlist *sg,
Dib_rdma.c228 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
232 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, in rds_ib_sync_mr()
244 ibmr->sg, ibmr->sg_len, in __rds_ib_teardown_mr()
254 struct page *page = sg_page(&ibmr->sg[i]); in __rds_ib_teardown_mr()
262 kfree(ibmr->sg); in __rds_ib_teardown_mr()
264 ibmr->sg = NULL; in __rds_ib_teardown_mr()
550 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, in rds_ib_get_mr() argument
619 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); in rds_ib_get_mr()
/net/ipv4/
Dtcp_bpf.c28 tmp->sg.start = msg->sg.start; in bpf_tcp_ingress()
29 i = msg->sg.start; in bpf_tcp_ingress()
46 tmp->sg.end = i; in bpf_tcp_ingress()
55 } while (i != msg->sg.end); in bpf_tcp_ingress()
58 msg->sg.start = i; in bpf_tcp_ingress()
82 sge = sk_msg_elem(msg, msg->sg.start); in tcp_bpf_push()
103 msg->sg.size -= ret; in tcp_bpf_push()
117 if (msg->sg.start == msg->sg.end) in tcp_bpf_push()
302 delta = msg->sg.size; in tcp_bpf_send_verdict()
304 delta -= msg->sg.size; in tcp_bpf_send_verdict()
[all …]
Dah4.c156 struct scatterlist *sg; in ah_output() local
187 sg = ah_req_sg(ahash, req); in ah_output()
188 seqhisg = sg + nfrags; in ah_output()
224 sg_init_table(sg, nfrags + sglists); in ah_output()
225 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); in ah_output()
234 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); in ah_output()
314 struct scatterlist *sg; in ah_input() local
379 sg = ah_req_sg(ahash, req); in ah_input()
380 seqhisg = sg + nfrags; in ah_input()
399 sg_init_table(sg, nfrags + sglists); in ah_input()
[all …]
Desp4.c104 struct scatterlist *sg; in esp_ssg_unref() local
116 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) in esp_ssg_unref()
117 put_page(sg_page(sg)); in esp_ssg_unref()
545 struct scatterlist *sg, *dsg; in esp_output_tail() local
568 sg = esp_req_sg(aead, req); in esp_output_tail()
571 dsg = sg; in esp_output_tail()
573 dsg = &sg[esp->nfrags]; in esp_output_tail()
578 sg_init_table(sg, esp->nfrags); in esp_output_tail()
579 err = skb_to_sgvec(skb, sg, in esp_output_tail()
619 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); in esp_output_tail()
[all …]
/net/9p/
Dtrans_virtio.c80 struct scatterlist sg[VIRTQUEUE_NUM]; member
171 static int pack_sg_list(struct scatterlist *sg, int start, in pack_sg_list() argument
183 sg_unmark_end(&sg[index]); in pack_sg_list()
184 sg_set_buf(&sg[index++], data, s); in pack_sg_list()
189 sg_mark_end(&sg[index - 1]); in pack_sg_list()
218 pack_sg_list_p(struct scatterlist *sg, int start, int limit, in pack_sg_list_p() argument
236 sg_unmark_end(&sg[index]); in pack_sg_list_p()
237 sg_set_page(&sg[index++], pdata[i++], s, data_off); in pack_sg_list_p()
244 sg_mark_end(&sg[index - 1]); in pack_sg_list_p()
272 out = pack_sg_list(chan->sg, 0, in p9_virtio_request()
[all …]
/net/xfrm/
Despintcp.c210 struct scatterlist *sg; in espintcp_sendskmsg_locked() local
215 sg = &skmsg->sg.data[skmsg->sg.start]; in espintcp_sendskmsg_locked()
217 size_t size = sg->length - emsg->offset; in espintcp_sendskmsg_locked()
218 int offset = sg->offset + emsg->offset; in espintcp_sendskmsg_locked()
223 if (sg_is_last(sg)) in espintcp_sendskmsg_locked()
226 p = sg_page(sg); in espintcp_sendskmsg_locked()
230 emsg->offset = offset - sg->offset; in espintcp_sendskmsg_locked()
231 skmsg->sg.start += done; in espintcp_sendskmsg_locked()
243 sk_mem_uncharge(sk, sg->length); in espintcp_sendskmsg_locked()
244 sg = sg_next(sg); in espintcp_sendskmsg_locked()
[all …]
/net/wireless/
Dlib80211_crypt_ccmp.c195 struct scatterlist sg[2]; in lib80211_ccmp_encrypt() local
218 sg_init_table(sg, 2); in lib80211_ccmp_encrypt()
219 sg_set_buf(&sg[0], aad, aad_len); in lib80211_ccmp_encrypt()
220 sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN, in lib80211_ccmp_encrypt()
225 aead_request_set_crypt(req, sg, sg, data_len, iv); in lib80211_ccmp_encrypt()
260 struct scatterlist sg[2]; in lib80211_ccmp_decrypt() local
319 sg_init_table(sg, 2); in lib80211_ccmp_decrypt()
320 sg_set_buf(&sg[0], aad, aad_len); in lib80211_ccmp_decrypt()
321 sg_set_buf(&sg[1], pos, data_len); in lib80211_ccmp_decrypt()
325 aead_request_set_crypt(req, sg, sg, data_len, iv); in lib80211_ccmp_decrypt()
/net/ipv6/
Desp6.c122 struct scatterlist *sg; in esp_ssg_unref() local
134 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) in esp_ssg_unref()
135 put_page(sg_page(sg)); in esp_ssg_unref()
581 struct scatterlist *sg, *dsg; in esp6_output_tail() local
604 sg = esp_req_sg(aead, req); in esp6_output_tail()
607 dsg = sg; in esp6_output_tail()
609 dsg = &sg[esp->nfrags]; in esp6_output_tail()
614 sg_init_table(sg, esp->nfrags); in esp6_output_tail()
615 err = skb_to_sgvec(skb, sg, in esp6_output_tail()
655 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); in esp6_output_tail()
[all …]
Dah6.c333 struct scatterlist *sg; in ah6_output() local
370 sg = ah_req_sg(ahash, req); in ah6_output()
371 seqhisg = sg + nfrags; in ah6_output()
415 sg_init_table(sg, nfrags + sglists); in ah6_output()
416 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); in ah6_output()
425 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); in ah6_output()
519 struct scatterlist *sg; in ah6_input() local
584 sg = ah_req_sg(ahash, req); in ah6_input()
585 seqhisg = sg + nfrags; in ah6_input()
601 sg_init_table(sg, nfrags + sglists); in ah6_input()
[all …]
/net/sunrpc/xprtrdma/
Dsvc_rdma_rw.c398 struct scatterlist *sg = ctxt->rw_sg_table.sgl; in svc_rdma_vec_to_sg() local
400 sg_set_buf(&sg[0], info->wi_base, len); in svc_rdma_vec_to_sg()
414 struct scatterlist *sg; in svc_rdma_pagelist_to_sg() local
422 sg = ctxt->rw_sg_table.sgl; in svc_rdma_pagelist_to_sg()
427 sg_set_page(sg, *page, sge_bytes, page_off); in svc_rdma_pagelist_to_sg()
430 sg = sg_next(sg); in svc_rdma_pagelist_to_sg()
687 struct scatterlist *sg; in svc_rdma_build_read_segment() local
697 sg = ctxt->rw_sg_table.sgl; in svc_rdma_build_read_segment()
705 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno], in svc_rdma_build_read_segment()
707 sg = sg_next(sg); in svc_rdma_build_read_segment()
Dfrwr_ops.c129 struct scatterlist *sg; in frwr_mr_init() local
137 sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS); in frwr_mr_init()
138 if (!sg) in frwr_mr_init()
148 sg_init_table(sg, depth); in frwr_mr_init()
149 mr->mr_sg = sg; in frwr_mr_init()
/net/smc/
Dsmc_ib.c602 struct scatterlist *sg; in smc_ib_sync_sg_for_cpu() local
606 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, in smc_ib_sync_sg_for_cpu()
608 if (!sg_dma_len(sg)) in smc_ib_sync_sg_for_cpu()
611 sg_dma_address(sg), in smc_ib_sync_sg_for_cpu()
612 sg_dma_len(sg), in smc_ib_sync_sg_for_cpu()
622 struct scatterlist *sg; in smc_ib_sync_sg_for_device() local
626 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, in smc_ib_sync_sg_for_device()
628 if (!sg_dma_len(sg)) in smc_ib_sync_sg_for_device()
631 sg_dma_address(sg), in smc_ib_sync_sg_for_device()
632 sg_dma_len(sg), in smc_ib_sync_sg_for_device()

12