Home
last modified time | relevance | path

Searched refs:flags (Results 1 – 25 of 69) sorted by relevance

123

/mm/
Dgup.c124 unsigned int flags) in no_page_table() argument
134 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
140 pte_t *pte, unsigned int flags) in follow_pfn_pte() argument
143 if (flags & FOLL_GET) in follow_pfn_pte()
146 if (flags & FOLL_TOUCH) { in follow_pfn_pte()
149 if (flags & FOLL_WRITE) in follow_pfn_pte()
167 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) in can_follow_write_pte() argument
170 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); in can_follow_write_pte()
174 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument
184 return no_page_table(vma, flags); in follow_page_pte()
[all …]
Dslab.c256 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
257 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
342 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
401 slab_flags_t flags, size_t *left_over) in cache_estimate() argument
423 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { in cache_estimate()
612 gfp_t flags) in alternate_node_alloc() argument
618 gfp_t flags, int nodeid) in ____cache_alloc_node() argument
[all …]
Dkmemleak.c139 unsigned int flags; /* object status flags */ member
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
367 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
418 unsigned long flags; in mem_pool_alloc() local
429 write_lock_irqsave(&kmemleak_lock, flags); in mem_pool_alloc()
438 write_unlock_irqrestore(&kmemleak_lock, flags); in mem_pool_alloc()
448 unsigned long flags; in mem_pool_free() local
456 write_lock_irqsave(&kmemleak_lock, flags); in mem_pool_free()
458 write_unlock_irqrestore(&kmemleak_lock, flags); in mem_pool_free()
495 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
[all …]
Dmempolicy.c125 .flags = MPOL_F_LOCAL,
156 return pol->flags & MPOL_MODE_FLAGS; in mpol_store_user_nodemask()
178 pol->flags |= MPOL_F_LOCAL; /* local allocation */ in mpol_new_preferred()
219 if (pol->flags & MPOL_F_RELATIVE_NODES) in mpol_set_nodemask()
242 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, in mpol_new() argument
248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); in mpol_new()
264 if (((flags & MPOL_F_STATIC_NODES) || in mpol_new()
265 (flags & MPOL_F_RELATIVE_NODES))) in mpol_new()
270 (flags & MPOL_F_STATIC_NODES) || in mpol_new()
271 (flags & MPOL_F_RELATIVE_NODES)) in mpol_new()
[all …]
Dslab.h24 slab_flags_t flags; /* Active flags on the slab */ member
157 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
160 slab_flags_t flags, unsigned int useroffset,
163 unsigned int size, slab_flags_t flags,
168 slab_flags_t flags, const char *name, void (*ctor)(void *));
172 slab_flags_t flags, void (*ctor)(void *));
175 slab_flags_t flags, const char *name,
180 slab_flags_t flags, void (*ctor)(void *)) in __kmem_cache_alias() argument
184 slab_flags_t flags, const char *name, in kmem_cache_flags() argument
187 return flags; in kmem_cache_flags()
[all …]
Dslub.c122 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
355 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
361 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
373 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
409 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
417 unsigned long flags; in cmpxchg_double_slab() local
419 local_irq_save(flags); in cmpxchg_double_slab()
426 local_irq_restore(flags); in cmpxchg_double_slab()
430 local_irq_restore(flags); in cmpxchg_double_slab()
[all …]
Dhighmem.c139 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) argument
140 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) argument
144 #define lock_kmap_any(flags) \ argument
145 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
146 #define unlock_kmap_any(flags) \ argument
147 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
308 unsigned long vaddr, flags; in kmap_high_get() local
310 lock_kmap_any(flags); in kmap_high_get()
316 unlock_kmap_any(flags); in kmap_high_get()
332 unsigned long flags; in kunmap_high() local
[all …]
Dmemory-failure.c209 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) in kill_proc() argument
218 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { in kill_proc()
362 unsigned long pfn, int flags) in kill_procs() argument
386 else if (kill_proc(tk, pfn, flags) < 0) in kill_procs()
408 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY)) in find_early_kill_thread()
966 int flags, struct page **hpagep) in hwpoison_user_mappings() argument
1010 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && in hwpoison_user_mappings()
1031 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); in hwpoison_user_mappings()
1055 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL); in hwpoison_user_mappings()
1056 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); in hwpoison_user_mappings()
[all …]
Dslab_common.c116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument
122 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
281 static unsigned int calculate_alignment(slab_flags_t flags, in calculate_alignment() argument
291 if (flags & SLAB_HWCACHE_ALIGN) { in calculate_alignment()
311 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) in slab_unmergeable()
333 slab_flags_t flags, const char *name, void (*ctor)(void *)) in find_mergeable() argument
344 align = calculate_alignment(flags, align, size); in find_mergeable()
346 flags = kmem_cache_flags(size, flags, name, NULL); in find_mergeable()
348 if (flags & SLAB_NEVER_MERGE) in find_mergeable()
358 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) in find_mergeable()
[all …]
Dballoon_compaction.c44 unsigned long flags; in balloon_page_list_enqueue() local
47 spin_lock_irqsave(&b_dev_info->pages_lock, flags); in balloon_page_list_enqueue()
53 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); in balloon_page_list_enqueue()
80 unsigned long flags; in balloon_page_list_dequeue() local
83 spin_lock_irqsave(&b_dev_info->pages_lock, flags); in balloon_page_list_dequeue()
108 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); in balloon_page_list_dequeue()
149 unsigned long flags; in balloon_page_enqueue() local
151 spin_lock_irqsave(&b_dev_info->pages_lock, flags); in balloon_page_enqueue()
153 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); in balloon_page_enqueue()
179 unsigned long flags; in balloon_page_dequeue() local
[all …]
Dhmm.c80 unsigned long flags; in notifiers_decrement() local
82 spin_lock_irqsave(&hmm->ranges_lock, flags); in notifiers_decrement()
94 spin_unlock_irqrestore(&hmm->ranges_lock, flags); in notifiers_decrement()
103 unsigned long flags; in hmm_invalidate_range_start() local
106 spin_lock_irqsave(&hmm->ranges_lock, flags); in hmm_invalidate_range_start()
114 spin_unlock_irqrestore(&hmm->ranges_lock, flags); in hmm_invalidate_range_start()
220 unsigned int flags; member
226 unsigned int flags = FAULT_FLAG_REMOTE; in hmm_vma_do_fault() local
235 if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) in hmm_vma_do_fault()
236 flags |= FAULT_FLAG_ALLOW_RETRY; in hmm_vma_do_fault()
[all …]
Dslob.c307 unsigned long flags; in slob_alloc() local
317 spin_lock_irqsave(&slob_lock, flags); in slob_alloc()
354 spin_unlock_irqrestore(&slob_lock, flags); in slob_alloc()
364 spin_lock_irqsave(&slob_lock, flags); in slob_alloc()
372 spin_unlock_irqrestore(&slob_lock, flags); in slob_alloc()
387 unsigned long flags; in slob_free() local
397 spin_lock_irqsave(&slob_lock, flags); in slob_free()
403 spin_unlock_irqrestore(&slob_lock, flags); in slob_free()
461 spin_unlock_irqrestore(&slob_lock, flags); in slob_free()
582 int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) in __kmem_cache_create() argument
[all …]
Dprocess_vm_access.c88 unsigned int flags = 0; in process_vm_rw_single_vec() local
96 flags |= FOLL_WRITE; in process_vm_rw_single_vec()
109 pages = get_user_pages_remote(task, mm, pa, pages, flags, in process_vm_rw_single_vec()
154 unsigned long flags, int vm_write) in process_vm_rw_core() argument
259 unsigned long flags, int vm_write) in process_vm_rw() argument
269 if (flags != 0) in process_vm_rw()
284 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); in process_vm_rw()
296 unsigned long, riovcnt, unsigned long, flags) in SYSCALL_DEFINE6() argument
298 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); in SYSCALL_DEFINE6()
304 unsigned long, riovcnt, unsigned long, flags) in SYSCALL_DEFINE6() argument
[all …]
Dmempool.c303 unsigned long flags; in mempool_resize() local
308 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
312 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
314 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
319 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
327 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
330 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
341 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
345 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
349 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
[all …]
Dmemblock.c192 enum memblock_flags flags) in __memblock_find_range_bottom_up() argument
197 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up()
227 enum memblock_flags flags) in __memblock_find_range_top_down() argument
232 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, in __memblock_find_range_top_down()
274 enum memblock_flags flags) in memblock_find_in_range_node() argument
300 size, align, nid, flags); in memblock_find_in_range_node()
319 flags); in memblock_find_in_range_node()
340 enum memblock_flags flags = choose_memblock_flags(); in memblock_find_in_range() local
344 NUMA_NO_NODE, flags); in memblock_find_in_range()
346 if (!ret && (flags & MEMBLOCK_MIRROR)) { in memblock_find_in_range()
[all …]
Dmlock.c584 vm_flags_t flags) in apply_vma_lock_flags() argument
608 newflags |= flags; in apply_vma_lock_flags()
671 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) in do_mlock() argument
706 error = apply_vma_lock_flags(start, len, flags); in do_mlock()
723 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument
727 if (flags & ~MLOCK_ONFAULT) in SYSCALL_DEFINE3()
730 if (flags & MLOCK_ONFAULT) in SYSCALL_DEFINE3()
763 static int apply_mlockall_flags(int flags) in apply_mlockall_flags() argument
769 if (flags & MCL_FUTURE) { in apply_mlockall_flags()
772 if (flags & MCL_ONFAULT) in apply_mlockall_flags()
[all …]
Dpage_isolation.c21 unsigned long flags, pfn; in set_migratetype_isolate() local
28 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
84 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
93 unsigned long flags, nr_pages; in unset_migratetype_isolate() local
100 spin_lock_irqsave(&zone->lock, flags); in unset_migratetype_isolate()
139 spin_unlock_irqrestore(&zone->lock, flags); in unset_migratetype_isolate()
194 unsigned migratetype, int flags) in start_isolate_page_range() argument
209 if (set_migratetype_isolate(page, migratetype, flags)) { in start_isolate_page_range()
291 unsigned long pfn, flags; in test_pages_isolated() local
310 spin_lock_irqsave(&zone->lock, flags); in test_pages_isolated()
[all …]
Dhuge_memory.c118 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page()
124 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page()
132 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_put_huge_zero_page()
532 loff_t off, unsigned long flags, unsigned long size) in __thp_get_unmapped_area() argument
546 off >> PAGE_SHIFT, flags); in __thp_get_unmapped_area()
567 unsigned long len, unsigned long pgoff, unsigned long flags) in thp_get_unmapped_area() argument
575 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); in thp_get_unmapped_area()
579 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in thp_get_unmapped_area()
728 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_huge_pmd_anonymous_page()
931 pmd_t *pmd, int flags) in touch_pmd() argument
[all …]
Dmmap.c59 #define arch_mmap_check(addr, len, flags) (0) argument
185 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
1348 unsigned long flags, in mlock_future_check() argument
1354 if (flags & VM_LOCKED) { in mlock_future_check()
1402 unsigned long flags, vm_flags_t vm_flags, in do_mmap() argument
1425 if (flags & MAP_FIXED_NOREPLACE) in do_mmap()
1426 flags |= MAP_FIXED; in do_mmap()
1428 if (!(flags & MAP_FIXED)) in do_mmap()
1447 addr = get_unmapped_area(file, addr, len, pgoff, flags); in do_mmap()
1451 if (flags & MAP_FIXED_NOREPLACE) { in do_mmap()
[all …]
Dswap.c65 unsigned long flags; in __page_cache_release() local
67 spin_lock_irqsave(&pgdat->lru_lock, flags); in __page_cache_release()
72 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in __page_cache_release()
197 unsigned long flags = 0; in pagevec_lru_move_fn() local
205 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
207 spin_lock_irqsave(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
214 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
254 unsigned long flags; in rotate_reclaimable_page() local
257 local_irq_save(flags); in rotate_reclaimable_page()
261 local_irq_restore(flags); in rotate_reclaimable_page()
[all …]
Dpage_owner.c116 static noinline depot_stack_handle_t save_stack(gfp_t flags) in save_stack() argument
135 handle = stack_depot_save(entries, nr_entries, flags); in save_stack()
155 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); in __reset_page_owner()
175 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); in __set_page_owner_handle()
176 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); in __set_page_owner_handle()
249 __set_bit(PAGE_EXT_OWNER, &new_ext->flags); in __copy_page_owner()
250 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); in __copy_page_owner()
311 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) in pagetypeinfo_showmixedcount_print()
369 page->flags, &page->flags); in print_page_owner()
421 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { in __dump_page_owner()
[all …]
Dnommu.c153 void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) in __vmalloc_node_flags() argument
155 return __vmalloc(size, flags, PAGE_KERNEL); in __vmalloc_node_flags()
330 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) in vmap() argument
784 unsigned long flags, in validate_mmap_request() argument
792 if (flags & MAP_FIXED) in validate_mmap_request()
795 if ((flags & MAP_TYPE) != MAP_PRIVATE && in validate_mmap_request()
796 (flags & MAP_TYPE) != MAP_SHARED) in validate_mmap_request()
854 if (flags & MAP_SHARED) { in validate_mmap_request()
890 if (flags & MAP_SHARED) { in validate_mmap_request()
943 unsigned long flags, in determine_vm_flags() argument
[all …]
Dmmzone.c102 unsigned long old_flags, flags; in page_cpupid_xchg_last() local
106 old_flags = flags = page->flags; in page_cpupid_xchg_last()
109 flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); in page_cpupid_xchg_last()
110 flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; in page_cpupid_xchg_last()
111 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); in page_cpupid_xchg_last()
/mm/kasan/
Dcommon.c64 static inline depot_stack_handle_t save_stack(gfp_t flags) in save_stack() argument
71 return stack_depot_save(entries, nr_entries, flags); in save_stack()
74 static inline void set_track(struct kasan_track *track, gfp_t flags) in set_track() argument
77 track->stack = save_stack(flags); in set_track()
256 slab_flags_t *flags) in kasan_cache_create() argument
268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || in kasan_cache_create()
293 *flags |= SLAB_KASAN; in kasan_cache_create()
387 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag()
408 if (!(cache->flags & SLAB_KASAN)) in kasan_init_slab_obj()
455 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) in __kasan_slab_free()
[all …]
Dquarantine.c143 unsigned long flags; in qlink_free() local
146 local_irq_save(flags); in qlink_free()
151 local_irq_restore(flags); in qlink_free()
175 unsigned long flags; in quarantine_put() local
187 local_irq_save(flags); in quarantine_put()
210 local_irq_restore(flags); in quarantine_put()
216 unsigned long flags; in quarantine_reduce() local
234 raw_spin_lock_irqsave(&quarantine_lock, flags); in quarantine_reduce()
258 raw_spin_unlock_irqrestore(&quarantine_lock, flags); in quarantine_reduce()
302 unsigned long flags, i; in quarantine_remove_cache() local
[all …]

123