/mm/ |
D | mempolicy.c | 120 .mode = MPOL_PREFERRED, 138 if (pol->mode) in get_task_policy() 233 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) in mpol_set_nodemask() 249 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); in mpol_set_nodemask() 251 ret = mpol_ops[pol->mode].create(pol, NULL); in mpol_set_nodemask() 259 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, in mpol_new() argument 265 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); in mpol_new() 267 if (mode == MPOL_DEFAULT) { in mpol_new() 279 if (mode == MPOL_PREFERRED) { in mpol_new() 285 } else if (mode == MPOL_LOCAL) { in mpol_new() [all …]
|
D | migrate.c | 286 enum migrate_mode mode) in buffer_migrate_lock_buffers() argument 291 if (mode != MIGRATE_ASYNC) { in buffer_migrate_lock_buffers() 327 enum migrate_mode mode) in buffer_migrate_lock_buffers() argument 343 struct buffer_head *head, enum migrate_mode mode, in migrate_page_move_mapping() argument 380 if (mode == MIGRATE_ASYNC && head && in migrate_page_move_mapping() 381 !buffer_migrate_lock_buffers(head, mode)) { in migrate_page_move_mapping() 599 enum migrate_mode mode) in migrate_page() argument 605 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); in migrate_page() 622 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument 628 return migrate_page(mapping, newpage, page, mode); in buffer_migrate_page() [all …]
|
D | compaction.c | 202 if (cc->mode != MIGRATE_ASYNC && in update_pageblock_skip() 237 if (cc->mode == MIGRATE_ASYNC) { in compact_trylock_irqsave() 278 if (cc->mode == MIGRATE_ASYNC) { in compact_unlock_should_abort() 301 if (cc->mode == MIGRATE_ASYNC) { in compact_should_abort() 624 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block() 1009 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages() 1049 if (cc->mode == MIGRATE_ASYNC && in isolate_migratepages() 1195 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone() 1254 compaction_free, (unsigned long)cc, cc->mode, in compact_zone() 1286 gfp_t gfp_mask, enum migrate_mode mode, int *contended) in compact_zone_order() argument [all …]
|
D | failslab.c | 38 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; in failslab_debugfs_init() local 44 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, in failslab_debugfs_init() 47 if (!debugfs_create_bool("cache-filter", mode, dir, in failslab_debugfs_init()
|
D | shmem.c | 858 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol() 1404 umode_t mode, dev_t dev, unsigned long flags) in shmem_get_inode() argument 1416 inode_init_owner(inode, dir, mode); in shmem_get_inode() 1430 switch (mode & S_IFMT) { in shmem_get_inode() 1433 init_special_inode(inode, mode, dev); in shmem_get_inode() 2052 static long shmem_fallocate(struct file *file, int mode, loff_t offset, in shmem_fallocate() argument 2062 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) in shmem_fallocate() 2067 if (mode & FALLOC_FL_PUNCH_HOLE) { in shmem_fallocate() 2172 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate() 2209 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) in shmem_mknod() argument [all …]
|
D | balloon_compaction.c | 197 struct page *page, enum migrate_mode mode) in balloon_page_migrate() argument 216 rc = balloon->migratepage(balloon, newpage, page, mode); in balloon_page_migrate()
|
D | vmscan.c | 1226 int __isolate_lru_page(struct page *page, isolate_mode_t mode) in __isolate_lru_page() argument 1235 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) in __isolate_lru_page() 1251 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { in __isolate_lru_page() 1260 if (mode & ISOLATE_CLEAN) in __isolate_lru_page() 1274 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) in __isolate_lru_page() 1313 isolate_mode_t mode, enum lru_list lru) in isolate_lru_pages() argument 1328 switch (__isolate_lru_page(page, mode)) { in isolate_lru_pages() 1348 nr_taken, mode, is_file_lru(lru)); in isolate_lru_pages()
|
D | page_alloc.c | 1701 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; in fail_page_alloc_debugfs() local 1709 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, in fail_page_alloc_debugfs() 1712 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, in fail_page_alloc_debugfs() 1715 if (!debugfs_create_u32("min-order", mode, dir, in fail_page_alloc_debugfs() 2335 int classzone_idx, int migratetype, enum migrate_mode mode, in __alloc_pages_direct_compact() argument 2347 nodemask, mode, in __alloc_pages_direct_compact() 2391 if (last_compact_zone && mode != MIGRATE_ASYNC) in __alloc_pages_direct_compact() 2409 int classzone_idx, int migratetype, enum migrate_mode mode, in __alloc_pages_direct_compact() argument 6343 NULL, 0, cc->mode, MR_CMA); in __alloc_contig_migrate_range() 6383 .mode = MIGRATE_SYNC, in alloc_contig_range()
|
D | internal.h | 163 enum migrate_mode mode; /* Async or sync migration mode */ member
|
D | memcontrol.c | 2075 unsigned mode, int sync, void *arg) in memcg_oom_wake_function() argument 2091 return autoremove_wake_function(wait, mode, sync, arg); in memcg_oom_wake_function() 4949 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, in memcg_event_wake() argument 5178 .mode = S_IWUGO,
|
D | slub.c | 1568 int mode, int *objects) in acquire_slab() argument 1585 if (mode) { in acquire_slab()
|