Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 13 of 13) sorted by relevance

/kernel/dma/
Dpool.c46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument
48 if (gfp & __GFP_DMA) in dma_atomic_pool_size_add()
50 else if (gfp & __GFP_DMA32) in dma_atomic_pool_size_add()
56 static bool cma_in_zone(gfp_t gfp) in cma_in_zone() argument
72 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) in cma_in_zone()
74 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty()) in cma_in_zone()
80 gfp_t gfp) in atomic_pool_expand() argument
92 if (cma_in_zone(gfp)) in atomic_pool_expand()
96 page = alloc_pages(gfp, order); in atomic_pool_expand()
125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand()
[all …]
Ddirect.c121 gfp_t gfp, bool allow_highmem) in __dma_direct_alloc_pages() argument
132 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in __dma_direct_alloc_pages()
133 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
143 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
150 !(gfp & (GFP_DMA32 | GFP_DMA)) && in __dma_direct_alloc_pages()
152 gfp |= GFP_DMA32; in __dma_direct_alloc_pages()
156 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { in __dma_direct_alloc_pages()
157 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; in __dma_direct_alloc_pages()
169 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) in dma_direct_use_pool() argument
171 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); in dma_direct_use_pool()
[all …]
Dmapping.c94 gfp_t gfp, unsigned long attrs) in dmam_alloc_attrs() argument
99 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); in dmam_alloc_attrs()
103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
686 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in __dma_alloc_pages() argument
692 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) in __dma_alloc_pages()
694 if (WARN_ON_ONCE(gfp & __GFP_COMP)) in __dma_alloc_pages()
699 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
701 return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
704 return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
708 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_alloc_pages() argument
[all …]
Dcontiguous.c334 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument
338 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned()
356 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
363 if (!gfpflags_allow_blocking(gfp)) in dma_alloc_contiguous()
366 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
371 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) { in dma_alloc_contiguous()
376 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
383 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
392 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); in dma_alloc_contiguous()
Dswiotlb.c574 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit) in alloc_dma_pages() argument
581 page = alloc_pages(gfp, order); in alloc_dma_pages()
613 u64 phys_limit, gfp_t gfp) in swiotlb_alloc_tlb() argument
621 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) { in swiotlb_alloc_tlb()
627 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp, in swiotlb_alloc_tlb()
631 gfp &= ~GFP_ZONEMASK; in swiotlb_alloc_tlb()
633 gfp |= __GFP_DMA; in swiotlb_alloc_tlb()
635 gfp |= __GFP_DMA32; in swiotlb_alloc_tlb()
637 while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) { in swiotlb_alloc_tlb()
640 !(gfp & (__GFP_DMA32 | __GFP_DMA))) in swiotlb_alloc_tlb()
[all …]
Dops_helpers.c63 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_common_alloc_pages() argument
68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
Ddebug.c603 static int dma_debug_create_entries(gfp_t gfp) in dma_debug_create_entries() argument
608 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
/kernel/bpf/
Dcpumap.c280 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; in cpu_map_kthread_run() local
335 gfp, nframes, skbs); in cpu_map_kthread_run()
398 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; in __cpu_map_entry_alloc() local
405 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
411 sizeof(void *), gfp); in __cpu_map_entry_alloc()
421 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
426 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
Dlocal_storage.c502 const gfp_t gfp = __GFP_ZERO | GFP_USER; in bpf_cgroup_storage_alloc() local
515 gfp, map->numa_node); in bpf_cgroup_storage_alloc()
520 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc()
526 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
Dmemalloc.c208 gfp_t gfp; in alloc_bulk() local
212 gfp = __GFP_NOWARN | __GFP_ACCOUNT; in alloc_bulk()
213 gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; in alloc_bulk()
246 obj = __alloc(c, node, gfp); in alloc_bulk()
Dsyscall.c289 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); in __bpf_map_area_alloc() local
303 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, in __bpf_map_area_alloc()
310 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, in __bpf_map_area_alloc()
485 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, in bpf_map_alloc_pages() argument
498 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0); in bpf_map_alloc_pages()
/kernel/rcu/
Dtree.c1963 static bool rcu_gp_fqs_check_wake(int *gfp) in rcu_gp_fqs_check_wake() argument
1968 if (*gfp & RCU_GP_FLAG_OVLD) in rcu_gp_fqs_check_wake()
1972 *gfp = READ_ONCE(rcu_state.gp_flags); in rcu_gp_fqs_check_wake()
1973 if (*gfp & RCU_GP_FLAG_FQS) in rcu_gp_fqs_check_wake()
/kernel/
Dauditsc.c2925 enum audit_nfcfgop op, gfp_t gfp) in __audit_log_nfcfg() argument
2930 ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG); in __audit_log_nfcfg()