/net/core/ |
D | page_pool.c | 85 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() 89 struct page *page; in __page_pool_get_cached() local 95 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached() 96 return page; in __page_pool_get_cached() 109 page = __ptr_ring_consume(r); in __page_pool_get_cached() 115 return page; in __page_pool_get_cached() 120 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() 123 struct page *page; in __page_pool_alloc_pages_slow() local 141 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_pages_slow() 142 if (!page) in __page_pool_alloc_pages_slow() [all …]
|
D | xdp.c | 371 struct page *page; in __xdp_return() local 378 page = virt_to_head_page(data); in __xdp_return() 380 page_pool_put_page(xa->page_pool, page, napi_direct); in __xdp_return() 387 page = virt_to_page(data); /* Assumes order0 page*/ in __xdp_return() 388 put_page(page); in __xdp_return() 425 struct page *page; in __xdp_release_frame() local 429 page = virt_to_head_page(data); in __xdp_release_frame() 431 page_pool_release_page(xa->page_pool, page); in __xdp_release_frame() 472 struct page *page; in xdp_convert_zc_to_xdp_frame() local 482 page = dev_alloc_page(); in xdp_convert_zc_to_xdp_frame() [all …]
|
D | skbuff.c | 363 struct page_frag_cache page; member 375 return page_frag_alloc(&nc->page, fragsz, gfp_mask); in __napi_alloc_frag() 454 nc = this_cpu_ptr(&napi_alloc_cache.page); in __netdev_alloc_skb() 519 data = page_frag_alloc(&nc->page, len, gfp_mask); in __napi_alloc_skb() 530 if (nc->page.pfmemalloc) in __napi_alloc_skb() 543 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument 546 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag() 785 struct page *p; in skb_dump() 1345 struct page *page, *head = NULL; in skb_copy_ubufs() local 1357 page = alloc_page(gfp_mask); in skb_copy_ubufs() [all …]
|
/net/ceph/ |
D | pagelist.c | 32 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local 33 kunmap(page); in ceph_pagelist_unmap_tail() 44 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local 46 list_del(&page->lru); in ceph_pagelist_release() 47 __free_page(page); in ceph_pagelist_release() 56 struct page *page; in ceph_pagelist_addpage() local 59 page = __page_cache_alloc(GFP_NOFS); in ceph_pagelist_addpage() 61 page = list_first_entry(&pl->free_list, struct page, lru); in ceph_pagelist_addpage() 62 list_del(&page->lru); in ceph_pagelist_addpage() 65 if (!page) in ceph_pagelist_addpage() [all …]
|
D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() 39 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector() 41 struct page **pages; in ceph_alloc_page_vector() 61 int ceph_copy_user_to_page_vector(struct page **pages, in ceph_copy_user_to_page_vector() 87 void ceph_copy_to_page_vector(struct page **pages, in ceph_copy_to_page_vector() 110 void ceph_copy_from_page_vector(struct page **pages, in ceph_copy_from_page_vector() 137 void ceph_zero_page_vector_range(int off, int len, struct page **pages) in ceph_zero_page_vector_range()
|
D | messenger.c | 187 static struct page *zero_page; /* used in certain error cases */ 526 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, in ceph_tcp_recvpage() argument 530 .bv_page = page, in ceph_tcp_recvpage() 569 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, in ceph_tcp_sendpage() argument 572 ssize_t (*sendpage)(struct socket *sock, struct page *page, in ceph_tcp_sendpage() 585 if (page_count(page) >= 1 && !PageSlab(page)) in ceph_tcp_sendpage() 590 ret = sendpage(sock, page, offset, size, flags); in ceph_tcp_sendpage() 829 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, in ceph_msg_data_bio_next() 845 struct page *page = bio_iter_page(it->bio, it->iter); in ceph_msg_data_bio_advance() local 858 page == bio_iter_page(it->bio, it->iter))) in ceph_msg_data_bio_advance() [all …]
|
D | crypto.c | 193 struct page *page; in setup_sgtable() local 197 page = vmalloc_to_page(buf); in setup_sgtable() 199 page = virt_to_page(buf); in setup_sgtable() 201 sg_set_page(sg, page, len, off); in setup_sgtable()
|
D | cls_lock_client.c | 35 struct page *lock_op_page; in ceph_cls_lock() 98 struct page *unlock_op_page; in ceph_cls_unlock() 147 struct page *break_op_page; in ceph_cls_break_lock() 196 struct page *cookie_op_page; in ceph_cls_set_cookie() 336 struct page *get_info_op_page, *reply_page; in ceph_cls_lock_info() 390 struct page **pages; in ceph_cls_assert_locked()
|
/net/rds/ |
D | page.c | 41 struct page *r_page; 73 struct page *page; in rds_page_remainder_alloc() local 80 page = alloc_page(gfp); in rds_page_remainder_alloc() 81 if (!page) { in rds_page_remainder_alloc() 84 sg_set_page(scat, page, PAGE_SIZE, 0); in rds_page_remainder_alloc() 122 page = alloc_page(gfp); in rds_page_remainder_alloc() 127 if (!page) { in rds_page_remainder_alloc() 134 __free_page(page); in rds_page_remainder_alloc() 139 rem->r_page = page; in rds_page_remainder_alloc()
|
D | rdma.c | 157 struct page **pages, int write) in rds_pin_pages() 179 struct page **pages = NULL; in __rds_rdma_map() 216 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map() 460 struct page *page = sg_page(&ro->op_sg[i]); in rds_rdma_free_op() local 466 WARN_ON(!page->mapping && irqs_disabled()); in rds_rdma_free_op() 467 set_page_dirty(page); in rds_rdma_free_op() 469 put_page(page); in rds_rdma_free_op() 479 struct page *page = sg_page(ao->op_sg); in rds_atomic_free_op() local 484 set_page_dirty(page); in rds_atomic_free_op() 485 put_page(page); in rds_atomic_free_op() [all …]
|
D | info.c | 65 struct page **pages; 167 struct page **pages = NULL; in rds_info_getsockopt() 191 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
|
/net/atm/ |
D | mpoa_proc.c | 212 char *page, *p; in proc_mpc_write() local 221 page = (char *)__get_free_page(GFP_KERNEL); in proc_mpc_write() 222 if (!page) in proc_mpc_write() 225 for (p = page, len = 0; len < nbytes; p++, len++) { in proc_mpc_write() 227 free_page((unsigned long)page); in proc_mpc_write() 236 if (!parse_qos(page)) in proc_mpc_write() 237 printk("mpoa: proc_mpc_write: could not parse '%s'\n", page); in proc_mpc_write() 239 free_page((unsigned long)page); in proc_mpc_write()
|
D | proc.c | 320 unsigned long page; in proc_dev_atm_read() local 325 page = get_zeroed_page(GFP_KERNEL); in proc_dev_atm_read() 326 if (!page) in proc_dev_atm_read() 332 length = dev->ops->proc_read(dev, pos, (char *)page); in proc_dev_atm_read() 337 if (copy_to_user(buf, (char *)page, length)) in proc_dev_atm_read() 341 free_page(page); in proc_dev_atm_read()
|
/net/ipv4/ |
D | tcp_bpf.c | 64 struct page *page; in __tcp_bpf_recvmsg() local 69 page = sg_page(sge); in __tcp_bpf_recvmsg() 72 ret = copy_page_to_iter(page, sge->offset, copy, iter); in __tcp_bpf_recvmsg() 88 put_page(page); in __tcp_bpf_recvmsg() 220 struct page *page; in tcp_bpf_push() local 231 page = sg_page(sge); in tcp_bpf_push() 239 page, off, size, flags); in tcp_bpf_push() 241 ret = do_tcp_sendpages(sk, page, off, size, flags); in tcp_bpf_push() 259 put_page(page); in tcp_bpf_push() 483 static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, in tcp_bpf_sendpage() argument [all …]
|
D | esp4.c | 278 struct page *page; in esp_output_head() local 314 page = pfrag->page; in esp_output_head() 315 get_page(page); in esp_output_head() 317 vaddr = kmap_atomic(page); in esp_output_head() 327 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, in esp_output_head() 373 struct page *page; in esp_output_tail() local 431 page = pfrag->page; in esp_output_tail() 432 get_page(page); in esp_output_tail() 434 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); in esp_output_tail()
|
D | udp_impl.h | 28 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
|
/net/mac802154/ |
D | trace.h | 79 TP_PROTO(struct ieee802154_local *local, u8 page, u8 channel), 80 TP_ARGS(local, page, channel), 83 __field(u8, page) 88 __entry->page = page; 92 __entry->page, __entry->channel)
|
/net/sunrpc/xprtrdma/ |
D | svc_rdma_backchannel.c | 141 struct page *page; in xprt_rdma_bc_allocate() local 149 page = alloc_page(RPCRDMA_DEF_GFP); in xprt_rdma_bc_allocate() 150 if (!page) in xprt_rdma_bc_allocate() 152 rqst->rq_buffer = page_address(page); in xprt_rdma_bc_allocate() 156 put_page(page); in xprt_rdma_bc_allocate()
|
/net/ieee802154/ |
D | trace.h | 103 TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel), 104 TP_ARGS(wpan_phy, page, channel), 107 __field(u8, page) 112 __entry->page = page; 116 __entry->page, __entry->channel)
|
D | nl-mac.c | 177 u8 page; in ieee802154_associate_req() local 206 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); in ieee802154_associate_req() 208 page = 0; in ieee802154_associate_req() 212 page, in ieee802154_associate_req() 301 u8 page; in ieee802154_start_req() local 342 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); in ieee802154_start_req() 344 page = 0; in ieee802154_start_req() 353 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, in ieee802154_start_req() 374 u8 page; in ieee802154_scan_req() local 392 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); in ieee802154_scan_req() [all …]
|
/net/sunrpc/ |
D | backchannel_rqst.c | 69 struct page *page; in xprt_alloc_xdr_buf() local 71 page = alloc_page(gfp_flags); in xprt_alloc_xdr_buf() 72 if (page == NULL) in xprt_alloc_xdr_buf() 74 xdr_buf_init(buf, page_address(page), PAGE_SIZE); in xprt_alloc_xdr_buf()
|
D | sunrpc.h | 54 struct page *headpage, unsigned long headoffset, 55 struct page *tailpage, unsigned long tailoffset);
|
D | xdr.c | 178 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() 219 _shift_data_right_pages(struct page **pages, size_t pgto_base, in _shift_data_right_pages() 222 struct page **pgfrom, **pgto; in _shift_data_right_pages() 280 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) in _copy_to_pages() 282 struct page **pgto; in _copy_to_pages() 324 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) in _copy_from_pages() 326 struct page **pgfrom; in _copy_from_pages() 565 void *page; in xdr_commit_encode() local 569 page = page_address(*xdr->page_ptr); in xdr_commit_encode() 570 memcpy(xdr->scratch.iov_base, page, shift); in xdr_commit_encode() [all …]
|
/net/ipv6/ |
D | esp6.c | 231 struct page *page; in esp6_output_head() local 259 page = pfrag->page; in esp6_output_head() 260 get_page(page); in esp6_output_head() 262 vaddr = kmap_atomic(page); in esp6_output_head() 272 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, in esp6_output_head() 316 struct page *page; in esp6_output_tail() local 372 page = pfrag->page; in esp6_output_tail() 373 get_page(page); in esp6_output_tail() 375 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); in esp6_output_tail()
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 69 struct page *page; in ipcomp_decompress() local 76 page = alloc_page(GFP_ATOMIC); in ipcomp_decompress() 79 if (!page) in ipcomp_decompress() 82 __skb_frag_set_page(frag, page); in ipcomp_decompress()
|