Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 27) sorted by relevance

12

/net/ceph/
Dpagevec.c18 struct page **pages; in ceph_get_direct_page_vector() local
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); in ceph_get_direct_page_vector()
23 if (!pages) in ceph_get_direct_page_vector()
29 num_pages - got, pages + got, write_page ? FOLL_WRITE : 0); in ceph_get_direct_page_vector()
37 return pages; in ceph_get_direct_page_vector()
40 ceph_put_page_vector(pages, got, false); in ceph_get_direct_page_vector()
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
51 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
52 put_page(pages[i]); in ceph_put_page_vector()
54 kvfree(pages); in ceph_put_page_vector()
[all …]
Dosd_client.c95 struct page **pages, u64 length, u32 alignment, in ceph_osd_data_pages_init() argument
99 osd_data->pages = pages; in ceph_osd_data_pages_init()
156 unsigned int which, struct page **pages, in osd_req_op_raw_data_in_pages() argument
163 ceph_osd_data_pages_init(osd_data, pages, length, alignment, in osd_req_op_raw_data_in_pages()
169 unsigned int which, struct page **pages, in osd_req_op_extent_osd_data_pages() argument
176 ceph_osd_data_pages_init(osd_data, pages, length, alignment, in osd_req_op_extent_osd_data_pages()
225 unsigned int which, struct page **pages, u64 length, in osd_req_op_cls_request_data_pages() argument
231 ceph_osd_data_pages_init(osd_data, pages, length, alignment, in osd_req_op_cls_request_data_pages()
237 unsigned int which, struct page **pages, u64 length, in osd_req_op_cls_response_data_pages() argument
243 ceph_osd_data_pages_init(osd_data, pages, length, alignment, in osd_req_op_cls_response_data_pages()
[all …]
Dmessenger.c935 BUG_ON(!data->pages); in ceph_msg_data_pages_cursor_init()
965 return data->pages[cursor->page_index]; in ceph_msg_data_pages_next()
3241 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, in ceph_msg_data_add_pages() argument
3246 BUG_ON(!pages); in ceph_msg_data_add_pages()
3251 data->pages = pages; in ceph_msg_data_add_pages()
/net/rds/
Dinfo.c65 struct page **pages; member
122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy()
140 iter->pages++; in rds_info_copy()
167 struct page **pages = NULL; in rds_info_getsockopt() local
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
192 if (!pages) { in rds_info_getsockopt()
196 ret = get_user_pages_fast(start, nr_pages, 1, pages); in rds_info_getsockopt()
215 iter.pages = pages; in rds_info_getsockopt()
238 for (i = 0; pages && i < nr_pages; i++) in rds_info_getsockopt()
[all …]
Drdma.c158 struct page **pages, int write) in rds_pin_pages() argument
162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages); in rds_pin_pages()
166 put_page(pages[ret]); in rds_pin_pages()
178 struct page **pages = NULL; in __rds_rdma_map() local
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map()
208 if (!pages) { in __rds_rdma_map()
241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map()
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); in __rds_rdma_map()
309 kfree(pages); in __rds_rdma_map()
557 struct page **pages = NULL; in rds_cmsg_rdma_args() local
[all …]
/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c68 page = ctxt->pages[0]; in rdma_build_arg_xdr()
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0]; in rdma_build_arg_xdr()
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; in rdma_build_arg_xdr()
95 page = ctxt->pages[sge_no]; in rdma_build_arg_xdr()
108 page = ctxt->pages[sge_no++]; in rdma_build_arg_xdr()
145 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_lcl()
150 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; in rdma_read_chunk_lcl()
154 head->arg.pages[pg_no], pg_off, in rdma_read_chunk_lcl()
248 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; in rdma_read_chunk_frmr()
254 sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no], in rdma_read_chunk_frmr()
[all …]
Dsvc_rdma_sendto.c83 page_address(xdr->pages[page_no]) + page_off; in map_xdr()
124 page = xdr->pages[xdr_off >> PAGE_SHIFT]; in dma_map_xdr()
464 int pages; in send_reply() local
479 ctxt->pages[0] = page; in send_reply()
519 pages = rqstp->rq_next_page - rqstp->rq_respages; in send_reply()
520 for (page_no = 0; page_no < pages; page_no++) { in send_reply()
521 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; in send_reply()
Drpc_rdma.c161 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); in rpcrdma_convert_iovs()
397 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); in rpcrdma_inline_pullup()
643 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); in rpcrdma_inline_fixup()
Dsvc_rdma_transport.c199 put_page(ctxt->pages[i]); in svc_rdma_put_context()
565 ctxt->pages[sge_no] = page; in svc_rdma_post_recv()
1333 ctxt->pages[0] = p; in svc_rdma_send_error()
/net/sunrpc/
Dxdr.c125 kaddr = kmap_atomic(buf->pages[0]); in xdr_terminate_string()
133 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() argument
142 xdr->pages = pages; in xdr_inline_pages()
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, in _shift_data_right_pages() argument
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); in _shift_data_right_pages()
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) in _copy_to_pages() argument
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_to_pages()
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) in _copy_from_pages() argument
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); in _copy_from_pages()
[all …]
Dsvc.c549 unsigned int pages, arghi; in svc_init_buffer() local
555 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. in svc_init_buffer()
559 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); in svc_init_buffer()
560 if (pages > RPCSVC_MAXPAGES) in svc_init_buffer()
561 pages = RPCSVC_MAXPAGES; in svc_init_buffer()
562 while (pages) { in svc_init_buffer()
567 pages--; in svc_init_buffer()
569 return pages == 0; in svc_init_buffer()
1319 rqstp->rq_res.pages = rqstp->rq_respages + 1; in svc_process()
Dsvc_xprt.c614 int pages; in svc_alloc_arg() local
618 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; in svc_alloc_arg()
619 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES); in svc_alloc_arg()
620 if (pages >= RPCSVC_MAXPAGES) in svc_alloc_arg()
622 pages = RPCSVC_MAXPAGES - 1; in svc_alloc_arg()
623 for (i = 0; i < pages ; i++) in svc_alloc_arg()
643 arg->pages = rqstp->rq_pages + 1; in svc_alloc_arg()
646 arg->page_len = (pages-2)*PAGE_SIZE; in svc_alloc_arg()
647 arg->len = (pages-1)*PAGE_SIZE; in svc_alloc_arg()
Dsocklib.c74 struct page **ppage = xdr->pages; in xdr_partial_copy_from_skb()
Dsvcsock.c181 struct page **ppage = xdr->pages; in svc_send_common()
1080 static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) in copy_pages_to_kvecs() argument
1086 vec[i].iov_base = page_address(pages[i]); in copy_pages_to_kvecs()
/net/sunrpc/auth_gss/
Dgss_krb5_wrap.c85 ptr = kmap_atomic(buf->pages[last]); in gss_krb5_remove_padding()
159 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v1() argument
222 tmp_pages = buf->pages; in gss_wrap_kerberos_v1()
223 buf->pages = pages; in gss_wrap_kerberos_v1()
227 buf->pages = tmp_pages; in gss_wrap_kerberos_v1()
252 offset + headlen - conflen, pages); in gss_wrap_kerberos_v1()
258 offset + headlen - conflen, pages)) in gss_wrap_kerberos_v1()
441 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos_v2() argument
486 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); in gss_wrap_kerberos_v2()
592 struct xdr_buf *buf, struct page **pages) in gss_wrap_kerberos() argument
[all …]
Dgss_krb5_crypto.c393 struct page **pages; member
418 in_page = desc->pages[i]; in encryptor()
463 int offset, struct page **pages) in gss_encrypt_xdr_buf() argument
476 desc.pages = pages; in gss_encrypt_xdr_buf()
598 u32 offset, u8 *iv, struct page **pages, int encrypt) in gss_krb5_cts_crypt() argument
617 save_pages = buf->pages; in gss_krb5_cts_crypt()
619 buf->pages = pages; in gss_krb5_cts_crypt()
622 buf->pages = save_pages; in gss_krb5_cts_crypt()
644 struct xdr_buf *buf, struct page **pages) in gss_krb5_aes_encrypt() argument
703 save_pages = buf->pages; in gss_krb5_aes_encrypt()
[all …]
Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++) in gssp_free_receive_pages()
219 __free_page(arg->pages[i]); in gssp_free_receive_pages()
221 kfree(arg->pages); in gssp_free_receive_pages()
227 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); in gssp_alloc_receive_pages()
232 if (!arg->pages) in gssp_alloc_receive_pages()
Dgss_rpc_xdr.h137 struct page **pages; /* Array of contiguous pages */ member
150 struct page **pages; member
Dgss_rpc_xdr.c69 xdr_write_pages(xdr, in->pages, in->page_base, in->page_len); in gssx_enc_in_token()
785 arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); in gssx_enc_accept_sec_context()
Dsvcauth_gss.c1069 in_token->pages = rqstp->rq_pages; in gss_read_proxy_verf()
1648 inpages = resbuf->pages; in svcauth_gss_wrap_resp_priv()
/net/9p/
Dtrans_common.c22 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages() argument
27 if (pages[i]) in p9_release_pages()
28 put_page(pages[i]); in p9_release_pages()
Dtrans_virtio.c316 struct page ***pages, in p9_get_mapped_pages() argument
340 n = iov_iter_get_pages_alloc(data, pages, count, offs); in p9_get_mapped_pages()
368 *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); in p9_get_mapped_pages()
369 if (!*pages) in p9_get_mapped_pages()
376 (*pages)[index] = vmalloc_to_page(p); in p9_get_mapped_pages()
378 (*pages)[index] = kmap_to_page(p); in p9_get_mapped_pages()
/net/ieee802154/
Dnl-phy.c40 int i, pages = 0; in ieee802154_nl_fill_phy() local
60 buf[pages++] = phy->supported.channels[i] | (i << 27); in ieee802154_nl_fill_phy()
62 if (pages && in ieee802154_nl_fill_phy()
64 pages * sizeof(uint32_t), buf)) in ieee802154_nl_fill_phy()
/net/core/
Ddatagram.c536 struct page *pages[MAX_SKB_FRAGS]; in zerocopy_sg_from_iter() local
545 copied = iov_iter_get_pages(from, pages, ~0U, in zerocopy_sg_from_iter()
559 skb_fill_page_desc(skb, frag++, pages[n], start, size); in zerocopy_sg_from_iter()
Dskbuff.c1776 put_page(spd->pages[i]); in sock_spd_release()
1803 spd->pages[spd->nr_pages - 1] == page && in spd_can_coalesce()
1830 spd->pages[spd->nr_pages] = page; in spd_fill_page()
1946 struct page *pages[MAX_SKB_FRAGS]; in skb_splice_bits() local
1948 .pages = pages, in skb_splice_bits()

12