/mm/ |
D | mempolicy.c | 127 .mode = MPOL_LOCAL, 170 if (pol->mode) in get_task_policy() 231 if (!pol || pol->mode == MPOL_LOCAL) in mpol_set_nodemask() 250 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); in mpol_set_nodemask() 258 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, in mpol_new() argument 264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); in mpol_new() 266 if (mode == MPOL_DEFAULT) { in mpol_new() 278 if (mode == MPOL_PREFERRED) { in mpol_new() 284 mode = MPOL_LOCAL; in mpol_new() 286 } else if (mode == MPOL_LOCAL) { in mpol_new() [all …]
|
D | userfaultfd.c | 293 enum mcopy_atomic_mode mode) in __mcopy_atomic_hugetlb() argument 312 if (mode == MCOPY_ATOMIC_ZEROPAGE) { in __mcopy_atomic_hugetlb() 380 if (mode != MCOPY_ATOMIC_CONTINUE && in __mcopy_atomic_hugetlb() 389 dst_addr, src_addr, mode, &page); in __mcopy_atomic_hugetlb() 454 enum mcopy_atomic_mode mode); 463 enum mcopy_atomic_mode mode, in mfill_atomic_pte() argument 468 if (mode == MCOPY_ATOMIC_CONTINUE) { in mfill_atomic_pte() 484 if (mode == MCOPY_ATOMIC_NORMAL) in mfill_atomic_pte() 495 mode != MCOPY_ATOMIC_NORMAL, in mfill_atomic_pte() 508 __u64 mode) in __mcopy_atomic() argument [all …]
|
D | migrate.c | 61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 104 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page() 640 enum migrate_mode mode) in migrate_page() argument 651 if (mode != MIGRATE_SYNC_NO_COPY) in migrate_page() 662 enum migrate_mode mode) in buffer_migrate_lock_buffers() argument 667 if (mode != MIGRATE_ASYNC) { in buffer_migrate_lock_buffers() 699 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument 707 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page() 715 if (!buffer_migrate_lock_buffers(head, mode)) in __buffer_migrate_page() 758 if (mode != MIGRATE_SYNC_NO_COPY) in __buffer_migrate_page() [all …]
|
D | vmpressure.c | 153 enum vmpressure_modes mode; member 166 if (ancestor && ev->mode == VMPRESSURE_LOCAL) in vmpressure_event() 168 if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH) in vmpressure_event() 379 enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH; in vmpressure_register_event() local 402 mode = ret; in vmpressure_register_event() 413 ev->mode = mode; in vmpressure_register_event()
|
D | failslab.c | 46 umode_t mode = S_IFREG | 0600; in failslab_debugfs_init() local 52 debugfs_create_bool("ignore-gfp-wait", mode, dir, in failslab_debugfs_init() 54 debugfs_create_bool("cache-filter", mode, dir, in failslab_debugfs_init()
|
D | balloon_compaction.c | 206 bool balloon_page_isolate(struct page *page, isolate_mode_t mode) in balloon_page_isolate() argument 235 enum migrate_mode mode) in balloon_page_migrate() argument 244 if (mode == MIGRATE_SYNC_NO_COPY) in balloon_page_migrate() 250 return balloon->migratepage(balloon, newpage, page, mode); in balloon_page_migrate()
|
D | compaction.c | 435 if (cc->mode != MIGRATE_ASYNC && in update_cached_migrate() 503 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { in compact_lock_irqsave() 822 unsigned long end_pfn, isolate_mode_t mode) in isolate_migratepages_block() argument 851 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block() 862 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { in isolate_migratepages_block() 1016 if (!isolate_movable_page(page, mode)) in isolate_migratepages_block() 1052 if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) in isolate_migratepages_block() 1061 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) in isolate_migratepages_block() 1064 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { in isolate_migratepages_block() 1296 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) in suitable_migration_source() [all …]
|
D | shmem.c | 116 umode_t mode; member 1468 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol() 2045 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) in synchronous_wake_function() argument 2047 int ret = default_wake_function(wait, mode, sync, key); in synchronous_wake_function() 2284 umode_t mode, dev_t dev, unsigned long flags) in shmem_get_inode() argument 2297 inode_init_owner(&init_user_ns, inode, dir, mode); in shmem_get_inode() 2312 switch (mode & S_IFMT) { in shmem_get_inode() 2315 init_special_inode(inode, mode, dev); in shmem_get_inode() 2665 static long shmem_fallocate(struct file *file, int mode, loff_t offset, in shmem_fallocate() argument 2675 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) in shmem_fallocate() [all …]
|
D | secretmem.c | 147 static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode) in secretmem_isolate_page() argument 154 enum migrate_mode mode) in secretmem_migratepage() argument
|
D | internal.h | 255 enum migrate_mode mode; /* Async or sync migration mode */ member
|
D | zsmalloc.c | 1894 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) in zs_page_isolate() argument 1951 struct page *page, enum migrate_mode mode) in zs_page_migrate() argument 1971 if (mode == MIGRATE_SYNC_NO_COPY) in zs_page_migrate()
|
D | z3fold.c | 1562 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) in z3fold_page_isolate() argument 1602 struct page *page, enum migrate_mode mode) in z3fold_page_migrate() argument
|
D | page_alloc.c | 4026 umode_t mode = S_IFREG | 0600; in fail_page_alloc_debugfs() local 4032 debugfs_create_bool("ignore-gfp-wait", mode, dir, in fail_page_alloc_debugfs() 4034 debugfs_create_bool("ignore-gfp-highmem", mode, dir, in fail_page_alloc_debugfs() 4036 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); in fail_page_alloc_debugfs() 9327 if (cc->alloc_contig && cc->mode == MIGRATE_ASYNC) in __alloc_contig_migrate_range() 9361 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); in __alloc_contig_migrate_range() 9435 .mode = gfp_mask & __GFP_NORETRY ? MIGRATE_ASYNC : MIGRATE_SYNC, in alloc_contig_range()
|
D | hugetlb.c | 2398 enum vma_resv_mode mode) in __vma_reservation_common() argument 2410 switch (mode) { in __vma_reservation_common() 2452 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) in __vma_reservation_common() 5259 enum mcopy_atomic_mode mode, in hugetlb_mcopy_atomic_pte() argument 5262 bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); in hugetlb_mcopy_atomic_pte()
|
D | filemap.c | 1131 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) in wake_page_function() argument 1166 wake_up_state(wait->private, mode); in wake_page_function()
|
D | memcontrol.c | 1809 unsigned mode, int sync, void *arg) in memcg_oom_wake_function() argument 1821 return autoremove_wake_function(wait, mode, sync, arg); in memcg_oom_wake_function() 4755 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, in memcg_event_wake() argument
|
D | slub.c | 2076 int mode, int *objects) in acquire_slab() argument 2093 if (mode) { in acquire_slab()
|
/mm/kasan/ |
D | kasan.h | 550 #error kasan_arch_is_ready only works in KASAN generic outline mode!
|