Searched refs:dirty (Results 1 – 8 of 8) sorted by relevance
/mm/ |
D | page-writeback.c | 141 unsigned long dirty; /* file_dirty + write + nfs */ member 483 unsigned long dirty; in node_dirty_limit() local 486 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * in node_dirty_limit() 489 dirty = vm_dirty_ratio * node_memory / 100; in node_dirty_limit() 492 dirty += dirty / 4; in node_dirty_limit() 494 return dirty; in node_dirty_limit() 733 unsigned long clean = filepages - min(filepages, mdtc->dirty); in mdtc_calc_avail() 734 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); in mdtc_calc_avail() 810 unsigned long dirty, in pos_ratio_polynom() argument 816 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, in pos_ratio_polynom() [all …]
|
D | memory-failure.c | 852 #define dirty (1UL << PG_dirty) macro 882 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 883 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 885 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 886 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 888 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 889 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 891 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 892 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 900 #undef dirty
|
D | pgtable-generic.c | 66 pte_t entry, int dirty) in ptep_set_access_flags() argument 107 pmd_t entry, int dirty) in pmdp_set_access_flags() argument
|
D | vmscan.c | 152 unsigned int dirty; member 1091 bool *dirty, bool *writeback) in page_check_dirty_writeback() argument 1101 *dirty = false; in page_check_dirty_writeback() 1107 *dirty = PageDirty(page); in page_check_dirty_writeback() 1116 mapping->a_ops->is_dirty_writeback(page, dirty, writeback); in page_check_dirty_writeback() 1141 bool dirty, writeback, may_enter_fs; in shrink_page_list() local 1174 page_check_dirty_writeback(page, &dirty, &writeback); in shrink_page_list() 1175 if (dirty || writeback) in shrink_page_list() 1178 if (dirty && !writeback) in shrink_page_list() 1188 if (((dirty || writeback) && mapping && in shrink_page_list() [all …]
|
D | vmalloc.c | 1498 unsigned long free, dirty; member 1576 vb->dirty = 0; in new_vmap_block() 1619 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks() 1623 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks() 1625 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks() 1731 vb->dirty += 1UL << order; in vb_free() 1732 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free() 1756 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { in _vm_unmap_aliases()
|
D | migrate.c | 387 int dirty; in migrate_page_move_mapping() local 437 dirty = PageDirty(page); in migrate_page_move_mapping() 438 if (dirty) { in migrate_page_move_mapping() 487 if (dirty && mapping_can_writeback(mapping)) { in migrate_page_move_mapping()
|
D | Kconfig | 541 soft-dirty bit on pte-s. This bit it set when someone writes 542 into a page just as regular dirty bit, but unlike the latter 545 See Documentation/admin-guide/mm/soft-dirty.rst for more details.
|
D | memory.c | 4862 unsigned int dirty = flags & FAULT_FLAG_WRITE; in __handle_mm_fault() local 4889 if (dirty && !pud_write(orig_pud)) { in __handle_mm_fault() 4930 if (dirty && !pmd_write(orig_pmd)) { in __handle_mm_fault()
|