/kernel/dma/ |
D | pool.c | 46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument 48 if (gfp & __GFP_DMA) in dma_atomic_pool_size_add() 50 else if (gfp & __GFP_DMA32) in dma_atomic_pool_size_add() 56 static bool cma_in_zone(gfp_t gfp) in cma_in_zone() argument 72 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) in cma_in_zone() 74 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty()) in cma_in_zone() 80 gfp_t gfp) in atomic_pool_expand() argument 92 if (cma_in_zone(gfp)) in atomic_pool_expand() 96 page = alloc_pages(gfp, order); in atomic_pool_expand() 125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand() [all …]
|
D | direct.c | 108 gfp_t gfp, bool allow_highmem) in __dma_direct_alloc_pages() argument 116 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in __dma_direct_alloc_pages() 128 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages() 138 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages() 145 !(gfp & (GFP_DMA32 | GFP_DMA)) && in __dma_direct_alloc_pages() 147 gfp |= GFP_DMA32; in __dma_direct_alloc_pages() 151 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { in __dma_direct_alloc_pages() 152 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; in __dma_direct_alloc_pages() 161 dma_addr_t *dma_handle, gfp_t gfp) in dma_direct_alloc_from_pool() argument 167 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in dma_direct_alloc_from_pool() [all …]
|
D | contiguous.c | 284 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument 289 GFP_KERNEL | (gfp & __GFP_NOWARN)); in cma_alloc_aligned() 307 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument 314 if (!gfpflags_allow_blocking(gfp)) in dma_alloc_contiguous() 317 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 322 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) { in dma_alloc_contiguous() 327 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 336 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); in dma_alloc_contiguous()
|
D | mapping.c | 85 gfp_t gfp, unsigned long attrs) in dmam_alloc_attrs() argument 90 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); in dmam_alloc_attrs() 94 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs() 545 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in __dma_alloc_pages() argument 551 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) in __dma_alloc_pages() 556 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages() 559 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages() 563 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_alloc_pages() argument 565 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages() 607 enum dma_data_direction dir, gfp_t gfp) in alloc_single_sgt() argument [all …]
|
D | ops_helpers.c | 62 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_common_alloc_pages() argument 67 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 69 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
|
D | debug.c | 575 static int dma_debug_create_entries(gfp_t gfp) in dma_debug_create_entries() argument 580 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
|
/kernel/bpf/ |
D | cpumap.c | 320 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; in cpu_map_kthread_run() local 372 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); in cpu_map_kthread_run() 432 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; in __cpu_map_entry_alloc() local 439 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc() 445 sizeof(void *), gfp); in __cpu_map_entry_alloc() 455 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc() 460 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
|
D | local_storage.c | 496 const gfp_t gfp = __GFP_ZERO | GFP_USER; in bpf_cgroup_storage_alloc() local 509 gfp, map->numa_node); in bpf_cgroup_storage_alloc() 514 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc() 520 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
|
D | syscall.c | 308 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; in __bpf_map_area_alloc() local 322 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, in __bpf_map_area_alloc() 329 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, in __bpf_map_area_alloc()
|
/kernel/rcu/ |
D | tree.c | 1885 static bool rcu_gp_fqs_check_wake(int *gfp) in rcu_gp_fqs_check_wake() argument 1890 if (*gfp & RCU_GP_FLAG_OVLD) in rcu_gp_fqs_check_wake() 1894 *gfp = READ_ONCE(rcu_state.gp_flags); in rcu_gp_fqs_check_wake() 1895 if (*gfp & RCU_GP_FLAG_FQS) in rcu_gp_fqs_check_wake()
|
/kernel/ |
D | auditsc.c | 2644 enum audit_nfcfgop op, gfp_t gfp) in __audit_log_nfcfg() argument 2649 ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG); in __audit_log_nfcfg()
|