/mm/ |
D | vmpressure.c | 72 static struct vmpressure *work_to_vmpressure(struct work_struct *work) in work_to_vmpressure() argument 74 return container_of(work, struct vmpressure, work); in work_to_vmpressure() 183 static void vmpressure_work_fn(struct work_struct *work) in vmpressure_work_fn() argument 185 struct vmpressure *vmpr = work_to_vmpressure(work); in vmpressure_work_fn() 290 schedule_work(&vmpr->work); in vmpressure() 464 INIT_WORK(&vmpr->work, vmpressure_work_fn); in vmpressure_init() 480 flush_work(&vmpr->work); in vmpressure_cleanup()
|
D | page_reporting.c | 46 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); in __page_reporting_request() 270 static void page_reporting_process(struct work_struct *work) in page_reporting_process() argument 272 struct delayed_work *d_work = to_delayed_work(work); in page_reporting_process() 274 container_of(d_work, struct page_reporting_dev_info, work); in page_reporting_process() 309 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); in page_reporting_process() 329 INIT_DELAYED_WORK(&prdev->work, &page_reporting_process); in page_reporting_register() 359 cancel_delayed_work_sync(&prdev->work); in page_reporting_unregister()
|
D | z3fold.c | 119 struct work_struct work; member 169 struct work_struct work; member 429 INIT_WORK(&zhdr->work, compact_page_work); in init_z3fold_page() 540 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page() 576 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); in free_pages_work() 588 cancel_work_sync(&zhdr->work); in free_pages_work() 868 work); in compact_page_work() 1026 INIT_WORK(&pool->work, free_pages_work); in z3fold_create_pool() 1140 cancel_work_sync(&zhdr->work); in z3fold_alloc() 1288 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free() [all …]
|
D | memory-failure.c | 1501 struct work_struct work; member 1534 schedule_work_on(smp_processor_id(), &mf_cpu->work); in memory_failure_queue() 1543 static void memory_failure_work_func(struct work_struct *work) in memory_failure_work_func() argument 1550 mf_cpu = container_of(work, struct memory_failure_cpu, work); in memory_failure_work_func() 1573 cancel_work_sync(&mf_cpu->work); in memory_failure_queue_kick() 1574 memory_failure_work_func(&mf_cpu->work); in memory_failure_queue_kick() 1586 INIT_WORK(&mf_cpu->work, memory_failure_work_func); in memory_failure_init()
|
D | swap.c | 911 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); in __lru_add_drain_all() local 922 INIT_WORK(work, lru_add_drain_per_cpu); in __lru_add_drain_all() 923 queue_work_on(cpu, mm_percpu_wq, work); in __lru_add_drain_all()
|
D | slab_common.c | 50 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); 425 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) in slab_caches_to_rcu_destroy_workfn() argument
|
D | backing-dev.c | 390 static void cgwb_release_workfn(struct work_struct *work) in cgwb_release_workfn() argument 392 struct bdi_writeback *wb = container_of(work, struct bdi_writeback, in cgwb_release_workfn()
|
D | zswap.c | 655 static void __zswap_pool_release(struct work_struct *work) in __zswap_pool_release() argument 657 struct zswap_pool *pool = container_of(work, typeof(*pool), in __zswap_pool_release()
|
D | memcontrol.c | 2247 struct work_struct work; member 2403 drain_local_stock(&stock->work); in drain_all_stock() 2405 schedule_work_on(cpu, &stock->work); in drain_all_stock() 2485 static void high_work_func(struct work_struct *work) in high_work_func() argument 2489 memcg = container_of(work, struct mem_cgroup, high_work); in high_work_func() 4826 static void memcg_event_remove(struct work_struct *work) in memcg_event_remove() argument 4829 container_of(work, struct mem_cgroup_event, remove); in memcg_event_remove() 7197 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
|
D | slab.c | 515 if (reap_work->work.func == NULL) { in start_cpu_timer() 1094 per_cpu(slab_reap_work, cpu).work.func = NULL; in slab_offline_cpu() 3978 struct delayed_work *work = to_delayed_work(w); in cache_reap() local 4026 schedule_delayed_work_on(smp_processor_id(), work, in cache_reap()
|
D | page_alloc.c | 148 struct work_struct work; member 3152 static void drain_local_pages_wq(struct work_struct *work) in drain_local_pages_wq() argument 3156 drain = container_of(work, struct pcpu_drain, work); in drain_local_pages_wq() 3240 INIT_WORK(&drain->work, drain_local_pages_wq); in drain_all_pages() 3241 queue_work_on(cpu, mm_percpu_wq, &drain->work); in drain_all_pages() 3244 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); in drain_all_pages()
|
D | zsmalloc.c | 2172 static void async_free_zspage(struct work_struct *work) in async_free_zspage() argument 2180 struct zs_pool *pool = container_of(work, struct zs_pool, in async_free_zspage()
|
D | percpu.c | 194 static void pcpu_balance_workfn(struct work_struct *work); 2063 static void pcpu_balance_workfn(struct work_struct *work) in pcpu_balance_workfn() argument
|
D | kmemleak.c | 1871 static void kmemleak_do_cleanup(struct work_struct *work) in kmemleak_do_cleanup() argument
|
D | swapfile.c | 506 static void swap_discard_work(struct work_struct *work) in swap_discard_work() argument 510 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
|
D | vmstat.c | 1813 static void refresh_vm_stats(struct work_struct *work) in refresh_vm_stats() argument
|
D | Kconfig | 415 benefit but it will work automatically for all applications.
|
D | hugetlb.c | 1510 static void free_hpage_workfn(struct work_struct *work) in free_hpage_workfn() argument
|
/mm/kfence/ |
D | core.c | 597 static void wake_up_kfence_timer(struct irq_work *work) in wake_up_kfence_timer() argument 616 static void toggle_allocation_gate(struct work_struct *work) in toggle_allocation_gate() argument
|
/mm/damon/ |
D | reclaim.c | 347 static void damon_reclaim_timer_fn(struct work_struct *work) in damon_reclaim_timer_fn() argument
|
D | Kconfig | 33 that work for virtual address spaces.
|