Searched refs:region (Results 1 – 5 of 5) sorted by relevance
435 struct vm_region *region, *last; in validate_nommu_regions() local447 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()450 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()451 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()452 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()466 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument478 if (region->vm_start < pregion->vm_start) in add_nommu_region()480 else if (region->vm_start > pregion->vm_start) in add_nommu_region()482 else if (pregion == region) in add_nommu_region()488 rb_link_node(®ion->vm_rb, parent, p); in add_nommu_region()[all …]
494 CMA reserves a region of memory and allows only movable pages to
294 struct damon_region *region; in damon_reclaim_turn() local317 region = damon_new_region(monitor_region_start, monitor_region_end); in damon_reclaim_turn()318 if (!region) in damon_reclaim_turn()320 damon_add_region(region, target); in damon_reclaim_turn()341 damon_destroy_region(region, target); in damon_reclaim_turn()
35 struct damon_region *region; in damon_new_region() local37 region = kmalloc(sizeof(*region), GFP_KERNEL); in damon_new_region()38 if (!region) in damon_new_region()41 region->ar.start = start; in damon_new_region()42 region->ar.end = end; in damon_new_region()43 region->nr_accesses = 0; in damon_new_region()44 INIT_LIST_HEAD(®ion->list); in damon_new_region()46 region->age = 0; in damon_new_region()47 region->last_nr_accesses = 0; in damon_new_region()49 return region; in damon_new_region()
9 access frequency of each memory region. The information can be useful