Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 66) sorted by relevance

123

/net/ceph/
Dpagelist.c11 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local
12 kunmap(page); in ceph_pagelist_unmap_tail()
23 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local
25 list_del(&page->lru); in ceph_pagelist_release()
26 __free_page(page); in ceph_pagelist_release()
35 struct page *page; in ceph_pagelist_addpage() local
38 page = __page_cache_alloc(GFP_NOFS); in ceph_pagelist_addpage()
40 page = list_first_entry(&pl->free_list, struct page, lru); in ceph_pagelist_addpage()
41 list_del(&page->lru); in ceph_pagelist_addpage()
44 if (!page) in ceph_pagelist_addpage()
[all …]
Dpagevec.c15 struct page **ceph_get_direct_page_vector(const void __user *data, in ceph_get_direct_page_vector()
18 struct page **pages; in ceph_get_direct_page_vector()
47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector()
63 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector()
76 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector()
78 struct page **pages; in ceph_alloc_page_vector()
98 int ceph_copy_user_to_page_vector(struct page **pages, in ceph_copy_user_to_page_vector()
124 void ceph_copy_to_page_vector(struct page **pages, in ceph_copy_to_page_vector()
147 void ceph_copy_from_page_vector(struct page **pages, in ceph_copy_from_page_vector()
174 void ceph_zero_page_vector_range(int off, int len, struct page **pages) in ceph_zero_page_vector_range()
Dmessenger.c193 static struct page *zero_page; /* used in certain error cases */
529 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, in ceph_tcp_recvpage() argument
537 kaddr = kmap(page); in ceph_tcp_recvpage()
540 kunmap(page); in ceph_tcp_recvpage()
566 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, in __ceph_tcp_sendpage() argument
572 ret = kernel_sendpage(sock, page, offset, size, flags); in __ceph_tcp_sendpage()
579 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, in ceph_tcp_sendpage() argument
587 if (page_count(page) >= 1) in ceph_tcp_sendpage()
588 return __ceph_tcp_sendpage(sock, page, offset, size, more); in ceph_tcp_sendpage()
590 iov.iov_base = kmap(page) + offset; in ceph_tcp_sendpage()
[all …]
/net/rds/
Dpage.c41 struct page *r_page;
56 int rds_page_copy_user(struct page *page, unsigned long offset, in rds_page_copy_user() argument
63 addr = kmap(page); in rds_page_copy_user()
71 kunmap(page); in rds_page_copy_user()
102 struct page *page; in rds_page_remainder_alloc() local
109 page = alloc_page(gfp); in rds_page_remainder_alloc()
110 if (!page) { in rds_page_remainder_alloc()
113 sg_set_page(scat, page, PAGE_SIZE, 0); in rds_page_remainder_alloc()
151 page = alloc_page(gfp); in rds_page_remainder_alloc()
156 if (!page) { in rds_page_remainder_alloc()
[all …]
Drdma.c158 struct page **pages, int write) in rds_pin_pages()
178 struct page **pages = NULL; in __rds_rdma_map()
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map()
448 struct page *page = sg_page(&ro->op_sg[i]); in rds_rdma_free_op() local
455 set_page_dirty(page); in rds_rdma_free_op()
457 put_page(page); in rds_rdma_free_op()
467 struct page *page = sg_page(ao->op_sg); in rds_atomic_free_op() local
472 set_page_dirty(page); in rds_atomic_free_op()
473 put_page(page); in rds_atomic_free_op()
556 struct page **pages = NULL; in rds_cmsg_rdma_args()
[all …]
Dinfo.c65 struct page **pages;
167 struct page **pages = NULL; in rds_info_getsockopt()
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
/net/mac802154/
Dtx.c42 u8 page; member
53 xw->priv->phy->current_page != xw->page) { in mac802154_xmit_worker()
55 xw->page, in mac802154_xmit_worker()
63 xw->priv->phy->current_page = xw->page; in mac802154_xmit_worker()
85 u8 page, u8 chan) in mac802154_tx() argument
90 if (!(priv->phy->channels_supported[page] & (1 << chan))) { in mac802154_tx()
123 work->page = page; in mac802154_tx()
Dmonitor.c41 u8 chan, page; in mac802154_monitor_xmit() local
47 page = priv->hw->phy->current_page; in mac802154_monitor_xmit()
52 if (WARN_ON(page >= WPAN_NUM_PAGES) || in mac802154_monitor_xmit()
60 return mac802154_tx(priv->hw, skb, page, chan); in mac802154_monitor_xmit()
116 priv->page = 0; in mac802154_monitor_setup()
Dmib.c177 res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan); in phy_chan_notify()
182 priv->hw->phy->current_page = priv->page; in phy_chan_notify()
189 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) in mac802154_dev_set_page_channel() argument
197 priv->page = page; in mac802154_dev_set_page_channel()
203 priv->hw->phy->current_page != priv->page) { in mac802154_dev_set_page_channel()
Dmac802154.h89 u8 page; member
123 u8 page, u8 chan);
131 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
/net/core/
Ddatagram.c387 struct page *page = skb_frag_page(frag); in skb_copy_datagram_iovec() local
391 vaddr = kmap(page); in skb_copy_datagram_iovec()
394 kunmap(page); in skb_copy_datagram_iovec()
473 struct page *page = skb_frag_page(frag); in skb_copy_datagram_const_iovec() local
477 vaddr = kmap(page); in skb_copy_datagram_const_iovec()
480 kunmap(page); in skb_copy_datagram_const_iovec()
563 struct page *page = skb_frag_page(frag); in skb_copy_datagram_from_iovec() local
567 vaddr = kmap(page); in skb_copy_datagram_from_iovec()
571 kunmap(page); in skb_copy_datagram_from_iovec()
643 struct page *page[MAX_SKB_FRAGS]; in zerocopy_sg_from_iovec() local
[all …]
Dskbuff.c368 if (unlikely(!nc->frag.page)) { in __netdev_alloc_frag()
376 nc->frag.page = alloc_pages(gfp, order); in __netdev_alloc_frag()
377 if (likely(nc->frag.page)) in __netdev_alloc_frag()
387 &nc->frag.page->_count); in __netdev_alloc_frag()
393 if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { in __netdev_alloc_frag()
395 &nc->frag.page->_count)) in __netdev_alloc_frag()
398 atomic_set(&nc->frag.page->_count, in __netdev_alloc_frag()
402 &nc->frag.page->_count); in __netdev_alloc_frag()
408 data = page_address(nc->frag.page) + nc->frag.offset; in __netdev_alloc_frag()
474 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
[all …]
/net/atm/
Dmpoa_proc.c209 char *page, *p; in proc_mpc_write() local
218 page = (char *)__get_free_page(GFP_KERNEL); in proc_mpc_write()
219 if (!page) in proc_mpc_write()
222 for (p = page, len = 0; len < nbytes; p++, len++) { in proc_mpc_write()
224 free_page((unsigned long)page); in proc_mpc_write()
233 if (!parse_qos(page)) in proc_mpc_write()
234 printk("mpoa: proc_mpc_write: could not parse '%s'\n", page); in proc_mpc_write()
236 free_page((unsigned long)page); in proc_mpc_write()
Dproc.c380 unsigned long page; in proc_dev_atm_read() local
385 page = get_zeroed_page(GFP_KERNEL); in proc_dev_atm_read()
386 if (!page) in proc_dev_atm_read()
392 length = dev->ops->proc_read(dev, pos, (char *)page); in proc_dev_atm_read()
397 if (copy_to_user(buf, (char *)page, length)) in proc_dev_atm_read()
401 free_page(page); in proc_dev_atm_read()
/net/sunrpc/xprtrdma/
Dsvc_rdma_sendto.c110 struct page *page; in dma_map_xdr() local
115 page = virt_to_page(xdr->head[0].iov_base); in dma_map_xdr()
121 page = xdr->pages[xdr_off >> PAGE_SHIFT]; in dma_map_xdr()
128 page = virt_to_page(xdr->tail[0].iov_base); in dma_map_xdr()
131 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off, in dma_map_xdr()
368 struct page *page, in send_reply() argument
394 ctxt->pages[0] = page; in send_reply()
401 ib_dma_map_page(rdma->sc_cm_id->device, page, 0, in send_reply()
502 struct page *res_page; in svc_rdma_sendto()
Dsvc_rdma_recvfrom.c63 struct page *page; in rdma_build_arg_xdr() local
68 page = ctxt->pages[0]; in rdma_build_arg_xdr()
70 rqstp->rq_pages[0] = page; in rdma_build_arg_xdr()
73 rqstp->rq_arg.head[0].iov_base = page_address(page); in rdma_build_arg_xdr()
88 page = ctxt->pages[sge_no]; in rdma_build_arg_xdr()
90 rqstp->rq_pages[sge_no] = page; in rdma_build_arg_xdr()
109 page = ctxt->pages[sge_no++]; in rdma_build_arg_xdr()
110 put_page(page); in rdma_build_arg_xdr()
Dsvc_rdma_transport.c490 struct page *svc_rdma_get_page(void) in svc_rdma_get_page()
492 struct page *page; in svc_rdma_get_page() local
494 while ((page = alloc_page(GFP_KERNEL)) == NULL) { in svc_rdma_get_page()
499 return page; in svc_rdma_get_page()
506 struct page *page; in svc_rdma_post_recv() local
517 page = svc_rdma_get_page(); in svc_rdma_post_recv()
518 ctxt->pages[sge_no] = page; in svc_rdma_post_recv()
520 page, 0, PAGE_SIZE, in svc_rdma_post_recv()
1309 struct page *p; in svc_rdma_send_error()
/net/9p/
Dtrans_common.h15 void p9_release_pages(struct page **, int);
16 int p9_payload_gup(char *, int *, struct page **, int);
Dtrans_common.c25 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages()
57 int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write) in p9_payload_gup()
/net/xfrm/
Dxfrm_ipcomp.c73 struct page *page; in ipcomp_decompress() local
80 page = alloc_page(GFP_ATOMIC); in ipcomp_decompress()
83 if (!page) in ipcomp_decompress()
86 __skb_frag_set_page(frag, page); in ipcomp_decompress()
/net/sunrpc/
Dsunrpc.h60 struct page *headpage, unsigned long headoffset,
61 struct page *tailpage, unsigned long tailoffset);
Dxdr.c133 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages()
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, in _shift_data_right_pages()
175 struct page **pgfrom, **pgto; in _shift_data_right_pages()
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) in _copy_to_pages()
235 struct page **pgto; in _copy_to_pages()
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) in _copy_from_pages()
279 struct page **pgfrom; in _copy_from_pages()
501 void *page; in xdr_commit_encode() local
505 page = page_address(*xdr->page_ptr); in xdr_commit_encode()
506 memcpy(xdr->scratch.iov_base, page, shift); in xdr_commit_encode()
[all …]
/net/ipv4/
Dudp_impl.h26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
Dip_output.c773 csum_page(struct page *page, int offset, int copy) in csum_page() argument
777 kaddr = kmap(page); in csum_page()
779 kunmap(page); in csum_page()
1048 if (!skb_can_coalesce(skb, i, pfrag->page, in __ip_append_data()
1054 __skb_fill_page_desc(skb, i, pfrag->page, in __ip_append_data()
1057 get_page(pfrag->page); in __ip_append_data()
1061 page_address(pfrag->page) + pfrag->offset, in __ip_append_data()
1163 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, in ip_append_page() argument
1280 if (skb_can_coalesce(skb, i, page, offset)) { in ip_append_page()
1283 get_page(page); in ip_append_page()
[all …]
/net/ieee802154/
Dnl-mac.c211 u32 unscanned, u8 page, in ieee802154_nl_scan_confirm() argument
229 nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || in ieee802154_nl_scan_confirm()
362 u8 page; in ieee802154_associate_req() local
391 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); in ieee802154_associate_req()
393 page = 0; in ieee802154_associate_req()
397 page, in ieee802154_associate_req()
482 u8 page; in ieee802154_start_req() local
517 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); in ieee802154_start_req()
519 page = 0; in ieee802154_start_req()
528 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, in ieee802154_start_req()
[all …]

123