• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __LINUX_KSM_H
2 #define __LINUX_KSM_H
3 /*
4  * Memory merging support.
5  *
6  * This code enables dynamic sharing of identical pages found in different
7  * memory areas, even if they are not shared by fork().
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/sched.h>
15 
16 struct stable_node;
17 struct mem_cgroup;
18 
19 #ifdef CONFIG_KSM
20 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21 		unsigned long end, int advice, unsigned long *vm_flags);
22 int __ksm_enter(struct mm_struct *mm);
23 void __ksm_exit(struct mm_struct *mm);
24 
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
26 {
27 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
28 		return __ksm_enter(mm);
29 	return 0;
30 }
31 
ksm_exit(struct mm_struct * mm)32 static inline void ksm_exit(struct mm_struct *mm)
33 {
34 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
35 		__ksm_exit(mm);
36 }
37 
page_stable_node(struct page * page)38 static inline struct stable_node *page_stable_node(struct page *page)
39 {
40 	return PageKsm(page) ? page_rmapping(page) : NULL;
41 }
42 
set_page_stable_node(struct page * page,struct stable_node * stable_node)43 static inline void set_page_stable_node(struct page *page,
44 					struct stable_node *stable_node)
45 {
46 	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
47 }
48 
49 /*
50  * When do_swap_page() first faults in from swap what used to be a KSM page,
51  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
52  * it might be faulted into a different anon_vma (or perhaps to a different
53  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
54  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
55  * a copy, and leave remerging the pages to a later pass of ksmd.
56  *
57  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
58  * but what if the vma was unmerged while the page was swapped out?
59  */
60 struct page *ksm_might_need_to_copy(struct page *page,
61 			struct vm_area_struct *vma, unsigned long address);
62 
63 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
64 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
65 
66 #else  /* !CONFIG_KSM */
67 
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)68 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
69 {
70 	return 0;
71 }
72 
ksm_exit(struct mm_struct * mm)73 static inline void ksm_exit(struct mm_struct *mm)
74 {
75 }
76 
77 #ifdef CONFIG_MMU
ksm_madvise(struct vm_area_struct * vma,unsigned long start,unsigned long end,int advice,unsigned long * vm_flags)78 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
79 		unsigned long end, int advice, unsigned long *vm_flags)
80 {
81 	return 0;
82 }
83 
ksm_might_need_to_copy(struct page * page,struct vm_area_struct * vma,unsigned long address)84 static inline struct page *ksm_might_need_to_copy(struct page *page,
85 			struct vm_area_struct *vma, unsigned long address)
86 {
87 	return page;
88 }
89 
page_referenced_ksm(struct page * page,struct mem_cgroup * memcg,unsigned long * vm_flags)90 static inline int page_referenced_ksm(struct page *page,
91 			struct mem_cgroup *memcg, unsigned long *vm_flags)
92 {
93 	return 0;
94 }
95 
rmap_walk_ksm(struct page * page,struct rmap_walk_control * rwc)96 static inline int rmap_walk_ksm(struct page *page,
97 			struct rmap_walk_control *rwc)
98 {
99 	return 0;
100 }
101 
ksm_migrate_page(struct page * newpage,struct page * oldpage)102 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
103 {
104 }
105 #endif /* CONFIG_MMU */
106 #endif /* !CONFIG_KSM */
107 
108 #endif /* __LINUX_KSM_H */
109