/net/rds/ |
D | info.c | 163 unsigned long nr_pages = 0; in rds_info_getsockopt() local 187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 196 if (ret != nr_pages) { in rds_info_getsockopt() 198 nr_pages = ret; in rds_info_getsockopt() 200 nr_pages = 0; in rds_info_getsockopt() 205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt() 238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
|
D | rdma.c | 154 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, in rds_pin_pages() argument 163 ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages); in rds_pin_pages() 164 if (ret >= 0 && ret < nr_pages) { in rds_pin_pages() 178 unsigned int nr_pages; in __rds_rdma_map() local 213 nr_pages = rds_pages_in_vec(&args->vec); in __rds_rdma_map() 214 if (nr_pages == 0) { in __rds_rdma_map() 222 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { in __rds_rdma_map() 228 args->vec.addr, args->vec.bytes, nr_pages); in __rds_rdma_map() 231 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in __rds_rdma_map() 265 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); in __rds_rdma_map() [all …]
|
/net/9p/ |
D | trans_virtio.c | 219 struct page **pdata, int nr_pages, size_t offs, int count) in pack_sg_list_p() argument 225 BUG_ON(nr_pages > (limit - start)); in pack_sg_list_p() 230 while (nr_pages) { in pack_sg_list_p() 240 nr_pages--; in pack_sg_list_p() 316 int nr_pages; in p9_get_mapped_pages() local 338 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); in p9_get_mapped_pages() 339 atomic_add(nr_pages, &vp_pinned); in p9_get_mapped_pages() 359 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - in p9_get_mapped_pages() 362 *pages = kmalloc_array(nr_pages, sizeof(struct page *), in p9_get_mapped_pages() 369 for (index = 0; index < nr_pages; index++) { in p9_get_mapped_pages()
|
D | trans_common.c | 24 void p9_release_pages(struct page **pages, int nr_pages) in p9_release_pages() argument 28 for (i = 0; i < nr_pages; i++) in p9_release_pages()
|
D | trans_common.h | 15 void p9_release_pages(struct page **pages, int nr_pages);
|
/net/xdp/ |
D | xsk_buff_pool.c | 277 u32 nr_pages, struct xdp_umem *umem) in xp_create_dma_map() argument 285 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map() 294 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map() 376 unsigned long attrs, struct page **pages, u32 nr_pages) in xp_dma_map() argument 393 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
|
D | xdp_umem.c | 49 u32 nr_pages) in xdp_umem_addr_map() argument 51 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); in xdp_umem_addr_map()
|
/net/core/ |
D | page_pool.c | 284 int i, nr_pages; in __page_pool_alloc_pages_slow() local 297 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache); in __page_pool_alloc_pages_slow() 298 if (unlikely(!nr_pages)) in __page_pool_alloc_pages_slow() 304 for (i = 0; i < nr_pages; i++) { in __page_pool_alloc_pages_slow()
|
D | skbuff.c | 2488 return spd->nr_pages && in spd_can_coalesce() 2489 spd->pages[spd->nr_pages - 1] == page && in spd_can_coalesce() 2490 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce() 2491 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce() 2503 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in spd_fill_page() 2512 spd->partial[spd->nr_pages - 1].len += *len; in spd_fill_page() 2516 spd->pages[spd->nr_pages] = page; in spd_fill_page() 2517 spd->partial[spd->nr_pages].len = *len; in spd_fill_page() 2518 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page() 2519 spd->nr_pages++; in spd_fill_page() [all …]
|
/net/dccp/ |
D | proto.c | 1128 unsigned long nr_pages = totalram_pages(); in dccp_init() local 1152 if (nr_pages >= (128 * 1024)) in dccp_init() 1153 goal = nr_pages >> (21 - PAGE_SHIFT); in dccp_init() 1155 goal = nr_pages >> (23 - PAGE_SHIFT); in dccp_init()
|
/net/sctp/ |
D | protocol.c | 1476 unsigned long nr_pages = totalram_pages(); in sctp_init() local 1539 if (nr_pages >= (128 * 1024)) in sctp_init() 1540 goal = nr_pages >> (22 - PAGE_SHIFT); in sctp_init() 1542 goal = nr_pages >> (24 - PAGE_SHIFT); in sctp_init()
|
/net/netfilter/ |
D | xt_hashlimit.c | 284 unsigned long nr_pages = totalram_pages(); in htable_create() local 290 size = (nr_pages << PAGE_SHIFT) / 16384 / in htable_create() 292 if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE) in htable_create()
|
D | nf_conntrack_core.c | 2729 unsigned long nr_pages = totalram_pages(); in nf_conntrack_init_start() local 2745 = (((nr_pages << PAGE_SHIFT) / 16384) in nf_conntrack_init_start() 2748 nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) in nf_conntrack_init_start() 2750 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) in nf_conntrack_init_start()
|
/net/smc/ |
D | smc_rx.c | 162 spd.nr_pages = 1; in smc_rx_splice()
|