/net/9p/ |
D | trans_xen.c | 322 unsigned int order) in xen_9pfs_front_alloc_dataring() argument 340 bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT), in xen_9pfs_front_alloc_dataring() 346 for (; i < (1 << order); i++) { in xen_9pfs_front_alloc_dataring() 353 ring->intf->ring_order = order; in xen_9pfs_front_alloc_dataring() 355 ring->data.out = bytes + XEN_FLEX_RING_SIZE(order); in xen_9pfs_front_alloc_dataring() 372 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); in xen_9pfs_front_alloc_dataring()
|
/net/core/ |
D | page_pool.c | 360 (PAGE_SIZE << pool->p.order), in page_pool_dma_map() 395 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order() 421 unsigned int pp_order = pool->p.order; in __page_pool_alloc_pages_slow() 529 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_return_page() 748 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag()
|
D | datagram.c | 635 int refs, order, n = 0; in __zerocopy_sg_from_iter() local 663 order = compound_order(head); in __zerocopy_sg_from_iter() 668 if (pages[n] - head > (1UL << order) - 1) { in __zerocopy_sg_from_iter() 670 order = compound_order(head); in __zerocopy_sg_from_iter()
|
D | skbuff.c | 1806 int i, order, psize, new_frags; in skb_copy_ubufs() local 1818 order = 0; in skb_copy_ubufs() 1819 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs() 1820 order++; in skb_copy_ubufs() 1821 psize = (PAGE_SIZE << order); in skb_copy_ubufs() 1823 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); in skb_copy_ubufs() 1825 page = alloc_pages(gfp_mask | __GFP_COMP, order); in skb_copy_ubufs() 6320 int order, in alloc_skb_with_frags() argument 6330 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) in alloc_skb_with_frags() 6341 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) in alloc_skb_with_frags() [all …]
|
/net/ipv4/ |
D | fib_semantics.c | 550 static int fib_detect_death(struct fib_info *fi, int order, in fib_detect_death() argument 574 if ((state & NUD_VALID) && order != dflt) in fib_detect_death() 577 (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) { in fib_detect_death() 579 *last_idx = order; in fib_detect_death() 2059 int order = -1, last_idx = -1; in fib_select_default() local 2100 } else if (!fib_detect_death(fi, order, &last_resort, in fib_select_default() 2103 fa1->fa_default = order; in fib_select_default() 2107 order++; in fib_select_default() 2110 if (order <= 0 || !fi) { in fib_select_default() 2116 if (!fib_detect_death(fi, order, &last_resort, &last_idx, in fib_select_default() [all …]
|
D | Kconfig | 228 audio and video broadcasts. In order to do that, you would most 263 Kernel side support for Sparse Mode PIM version 2. In order to use 655 the TCP sender in order to:
|
/net/switchdev/ |
D | Kconfig | 11 drivers in order to support hardware switch chips in very generic
|
/net/sctp/ |
D | protocol.c | 1491 int order; in sctp_init() local 1553 order = get_order(goal); in sctp_init() 1562 order = min(order, max_entry_order); in sctp_init() 1585 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order); in sctp_init() 1586 } while (!sctp_port_hashtable && --order > 0); in sctp_init() 1597 num_entries = (1UL << order) * PAGE_SIZE / in sctp_init()
|
D | Kconfig | 27 with an option for order-of-arrival delivery of individual user
|
/net/ipv6/ila/ |
D | ila_xlat.c | 208 int err = 0, order; in ila_add_mapping() local 230 order = ila_order(ila); in ila_add_mapping() 250 if (order > ila_order(tila)) in ila_add_mapping()
|
/net/mptcp/ |
D | Kconfig | 10 subflows in order to utilize multiple network paths. Each subflow
|
/net/qrtr/ |
D | Kconfig | 12 In order to do service lookups, a userspace daemon is required to
|
/net/8021q/ |
D | Kconfig | 13 the 'ip' utility in order to effectively use VLANs.
|
/net/packet/ |
D | af_packet.c | 4404 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, in free_pg_vec() argument 4415 order); in free_pg_vec() 4422 static char *alloc_one_pg_vec_page(unsigned long order) in alloc_one_pg_vec_page() argument 4428 buffer = (char *) __get_free_pages(gfp_flags, order); in alloc_one_pg_vec_page() 4433 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); in alloc_one_pg_vec_page() 4439 buffer = (char *) __get_free_pages(gfp_flags, order); in alloc_one_pg_vec_page() 4447 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) in alloc_pg_vec() argument 4458 pg_vec[i].buffer = alloc_one_pg_vec_page(order); in alloc_pg_vec() 4467 free_pg_vec(pg_vec, order, block_nr); in alloc_pg_vec() 4478 int was_running, order = 0; in packet_set_ring() local [all …]
|
/net/bridge/ |
D | Kconfig | 20 In order to use the Ethernet bridge, you'll need the bridge
|
/net/atm/ |
D | Kconfig | 14 In order to participate in an ATM network, your Linux box needs an
|
/net/batman-adv/ |
D | Kconfig | 64 configured to have promiscuous mode enabled in order to make
|
/net/smc/ |
D | smc_core.c | 1313 __free_pages(buf_desc->pages, buf_desc->order); in smcr_buf_free() 2224 buf_desc->order = get_order(bufsize); in smcr_new_buf_create() 2228 buf_desc->order); in smcr_new_buf_create() 2240 buf_desc->order = get_order(bufsize); in smcr_new_buf_create() 2241 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); in smcr_new_buf_create()
|
D | smc_core.h | 203 u32 order; /* allocation order */ member
|
D | smc_ib.c | 723 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); in smc_ib_get_memory_region()
|
/net/netfilter/ipset/ |
D | Kconfig | 8 In order to define and use the sets, you need the userspace utility
|
/net/wireless/ |
D | Kconfig | 152 capabilities. However, note that in order to not create daisy chain
|
/net/bridge/netfilter/ |
D | Kconfig | 34 through your machine, in order to figure out how they are related
|
/net/ipv6/ |
D | Kconfig | 190 IPv4 encapsulation in order to transit IPv4-only network
|
/net/netfilter/ipvs/ |
D | Kconfig | 205 from the server set, in order to avoid high degree of replication.
|