/kernel/dma/ |
D | pool.c | 46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument 48 if (gfp & __GFP_DMA) in dma_atomic_pool_size_add() 50 else if (gfp & __GFP_DMA32) in dma_atomic_pool_size_add() 56 static bool cma_in_zone(gfp_t gfp) in cma_in_zone() argument 72 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) in cma_in_zone() 74 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty()) in cma_in_zone() 80 gfp_t gfp) in atomic_pool_expand() argument 92 if (cma_in_zone(gfp)) in atomic_pool_expand() 96 page = alloc_pages(gfp, order); in atomic_pool_expand() 125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand() [all …]
|
D | direct.c | 120 gfp_t gfp, bool allow_highmem) in __dma_direct_alloc_pages() argument 131 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in __dma_direct_alloc_pages() 133 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages() 143 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages() 150 !(gfp & (GFP_DMA32 | GFP_DMA)) && in __dma_direct_alloc_pages() 152 gfp |= GFP_DMA32; in __dma_direct_alloc_pages() 156 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { in __dma_direct_alloc_pages() 157 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; in __dma_direct_alloc_pages() 169 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) in dma_direct_use_pool() argument 171 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); in dma_direct_use_pool() [all …]
|
D | contiguous.c | 283 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument 287 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned() 305 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument 312 if (!gfpflags_allow_blocking(gfp)) in dma_alloc_contiguous() 315 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 320 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) { in dma_alloc_contiguous() 325 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 334 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); in dma_alloc_contiguous()
|
D | mapping.c | 86 gfp_t gfp, unsigned long attrs) in dmam_alloc_attrs() argument 91 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); in dmam_alloc_attrs() 95 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs() 550 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in __dma_alloc_pages() argument 556 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) in __dma_alloc_pages() 561 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages() 564 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages() 568 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_alloc_pages() argument 570 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages() 612 enum dma_data_direction dir, gfp_t gfp) in alloc_single_sgt() argument [all …]
|
D | ops_helpers.c | 64 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_common_alloc_pages() argument 69 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 71 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
|
D | debug.c | 573 static int dma_debug_create_entries(gfp_t gfp) in dma_debug_create_entries() argument 578 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
|
/kernel/bpf/ |
D | cpumap.c | 321 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; in cpu_map_kthread_run() local 373 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); in cpu_map_kthread_run() 435 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; in __cpu_map_entry_alloc() local 442 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc() 448 sizeof(void *), gfp); in __cpu_map_entry_alloc() 458 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc() 463 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
|
D | local_storage.c | 495 const gfp_t gfp = __GFP_ZERO | GFP_USER; in bpf_cgroup_storage_alloc() local 508 gfp, map->numa_node); in bpf_cgroup_storage_alloc() 513 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc() 519 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
|
D | syscall.c | 314 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; in __bpf_map_area_alloc() local 328 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, in __bpf_map_area_alloc() 335 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, in __bpf_map_area_alloc()
|
/kernel/rcu/ |
D | tree.c | 1615 static bool rcu_gp_fqs_check_wake(int *gfp) in rcu_gp_fqs_check_wake() argument 1620 if (*gfp & RCU_GP_FLAG_OVLD) in rcu_gp_fqs_check_wake() 1624 *gfp = READ_ONCE(rcu_state.gp_flags); in rcu_gp_fqs_check_wake() 1625 if (*gfp & RCU_GP_FLAG_FQS) in rcu_gp_fqs_check_wake()
|
/kernel/ |
D | auditsc.c | 2910 enum audit_nfcfgop op, gfp_t gfp) in __audit_log_nfcfg() argument 2915 ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG); in __audit_log_nfcfg()
|