/mm/ |
D | gup.c | 634 unsigned long start, unsigned long nr_pages, int write, in get_user_pages() argument 641 if (write) in get_user_pages() 720 int write, struct page **pages, int *nr) in gup_pte_range() argument 742 pte_numa(pte) || (write && !pte_write(pte))) in gup_pte_range() 779 int write, struct page **pages, int *nr) in gup_pte_range() argument 786 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument 791 if (write && !pmd_write(orig)) in gup_huge_pmd() 833 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pud() argument 838 if (write && !pud_write(orig)) in gup_huge_pud() 875 int write, struct page **pages, int *nr) in gup_pmd_range() argument [all …]
|
D | util.c | 209 int nr_pages, int write, struct page **pages) in __get_user_pages_fast() argument 240 int nr_pages, int write, struct page **pages) in get_user_pages_fast() argument 247 write, 0, pages, NULL); in get_user_pages_fast() 315 int overcommit_ratio_handler(struct ctl_table *table, int write, in overcommit_ratio_handler() argument 321 ret = proc_dointvec(table, write, buffer, lenp, ppos); in overcommit_ratio_handler() 322 if (ret == 0 && write) in overcommit_ratio_handler() 327 int overcommit_kbytes_handler(struct ctl_table *table, int write, in overcommit_kbytes_handler() argument 333 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in overcommit_kbytes_handler() 334 if (ret == 0 && write) in overcommit_kbytes_handler()
|
D | page-writeback.c | 332 int dirty_background_ratio_handler(struct ctl_table *table, int write, in dirty_background_ratio_handler() argument 338 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_ratio_handler() 339 if (ret == 0 && write) in dirty_background_ratio_handler() 344 int dirty_background_bytes_handler(struct ctl_table *table, int write, in dirty_background_bytes_handler() argument 350 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_bytes_handler() 351 if (ret == 0 && write) in dirty_background_bytes_handler() 356 int dirty_ratio_handler(struct ctl_table *table, int write, in dirty_ratio_handler() argument 363 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_ratio_handler() 364 if (ret == 0 && write && vm_dirty_ratio != old_ratio) { in dirty_ratio_handler() 371 int dirty_bytes_handler(struct ctl_table *table, int write, in dirty_bytes_handler() argument [all …]
|
D | madvise.c | 468 int write; in SYSCALL_DEFINE3() local 495 write = madvise_need_mmap_write(behavior); in SYSCALL_DEFINE3() 496 if (write) in SYSCALL_DEFINE3() 547 if (write) in SYSCALL_DEFINE3()
|
D | hugetlb_cgroup.c | 336 cft->write = hugetlb_cgroup_write; in __hugetlb_cgroup_file_init() 348 cft->write = hugetlb_cgroup_reset; in __hugetlb_cgroup_file_init() 355 cft->write = hugetlb_cgroup_reset; in __hugetlb_cgroup_file_init()
|
D | nommu.c | 201 int write, int force, struct page **pages, in get_user_pages() argument 206 if (write) in get_user_pages() 2006 unsigned long addr, void *buf, int len, int write) in __access_remote_vm() argument 2020 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm() 2023 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm() 2048 void *buf, int len, int write) in access_remote_vm() argument 2050 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm() 2057 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) in access_process_vm() argument 2068 len = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
|
D | memory.c | 2710 struct page *page, pte_t *pte, bool write, bool anon) in do_set_pte() argument 2716 if (write) in do_set_pte() 3514 void *buf, int len, int write) in generic_access_phys() argument 3521 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys() 3525 if (write) in generic_access_phys() 3541 unsigned long addr, void *buf, int len, int write) in __access_remote_vm() argument 3554 write, 1, &page, &vma); in __access_remote_vm() 3568 len, write); in __access_remote_vm() 3580 if (write) { in __access_remote_vm() 3611 void *buf, int len, int write) in access_remote_vm() argument [all …]
|
D | hugetlb.c | 2289 struct ctl_table *table, int write, in hugetlb_sysctl_handler_common() argument 2301 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); in hugetlb_sysctl_handler_common() 2305 if (write) in hugetlb_sysctl_handler_common() 2312 int hugetlb_sysctl_handler(struct ctl_table *table, int write, in hugetlb_sysctl_handler() argument 2316 return hugetlb_sysctl_handler_common(false, table, write, in hugetlb_sysctl_handler() 2321 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, in hugetlb_mempolicy_sysctl_handler() argument 2324 return hugetlb_sysctl_handler_common(true, table, write, in hugetlb_mempolicy_sysctl_handler() 2329 int hugetlb_overcommit_handler(struct ctl_table *table, int write, in hugetlb_overcommit_handler() argument 2342 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler() 2347 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); in hugetlb_overcommit_handler() [all …]
|
D | compaction.c | 1505 int sysctl_compaction_handler(struct ctl_table *table, int write, in sysctl_compaction_handler() argument 1508 if (write) in sysctl_compaction_handler() 1514 int sysctl_extfrag_handler(struct ctl_table *table, int write, in sysctl_extfrag_handler() argument 1517 proc_dointvec_minmax(table, write, buffer, length, ppos); in sysctl_extfrag_handler()
|
D | page_alloc.c | 3485 int numa_zonelist_order_handler(struct ctl_table *table, int write, in numa_zonelist_order_handler() argument 3494 if (write) { in numa_zonelist_order_handler() 3501 ret = proc_dostring(table, write, buffer, length, ppos); in numa_zonelist_order_handler() 3504 if (write) { in numa_zonelist_order_handler() 5874 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, in module_init() 5879 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); in module_init() 5883 if (write) { in module_init() 5891 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, in sysctl_min_unmapped_ratio_sysctl_handler() argument 5897 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); in sysctl_min_unmapped_ratio_sysctl_handler() 5907 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, in sysctl_min_slab_ratio_sysctl_handler() argument [all …]
|
D | swap.c | 371 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, in get_kernel_pages() argument 399 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument 406 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
|
D | memcontrol.c | 5140 .write = mem_cgroup_reset, 5146 .write = mem_cgroup_write, 5152 .write = mem_cgroup_write, 5158 .write = mem_cgroup_reset, 5167 .write = mem_cgroup_force_empty_write, 5176 .write = memcg_write_event_control, 5209 .write = mem_cgroup_write, 5220 .write = mem_cgroup_reset, 5226 .write = mem_cgroup_reset, 5249 .write = mem_cgroup_reset, [all …]
|
D | backing-dev.c | 656 int pdflush_proc_obsolete(struct ctl_table *table, int write, in pdflush_proc_obsolete() argument
|
D | slab_common.c | 943 .write = slabinfo_write,
|
D | kmemleak.c | 1724 .write = kmemleak_write,
|
D | shmem.c | 3127 .write = new_sync_write,
|