1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
4
5 #include <linux/huge_mm.h>
6 #include <linux/swap.h>
7 #include <linux/string.h>
8
9 /**
10 * page_is_file_lru - should the page be on a file LRU or anon LRU?
11 * @page: the page to test
12 *
13 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
14 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
15 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
16 * functions that manipulate the LRU lists, to sort a page onto the right LRU
17 * list.
18 *
19 * We would like to get this info without a page flag, but the state
20 * needs to survive until the page is last deleted from the LRU, which
21 * could be as far down as __page_cache_release.
22 */
page_is_file_lru(struct page * page)23 static inline int page_is_file_lru(struct page *page)
24 {
25 return !PageSwapBacked(page);
26 }
27
__update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,int nr_pages)28 static __always_inline void __update_lru_size(struct lruvec *lruvec,
29 enum lru_list lru, enum zone_type zid,
30 int nr_pages)
31 {
32 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
33
34 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
35 __mod_zone_page_state(&pgdat->node_zones[zid],
36 NR_ZONE_LRU_BASE + lru, nr_pages);
37 }
38
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,int nr_pages)39 static __always_inline void update_lru_size(struct lruvec *lruvec,
40 enum lru_list lru, enum zone_type zid,
41 int nr_pages)
42 {
43 __update_lru_size(lruvec, lru, zid, nr_pages);
44 #ifdef CONFIG_MEMCG
45 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
46 #endif
47 }
48
add_page_to_lru_list(struct page * page,struct lruvec * lruvec,enum lru_list lru)49 static __always_inline void add_page_to_lru_list(struct page *page,
50 struct lruvec *lruvec, enum lru_list lru)
51 {
52 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
53 list_add(&page->lru, &lruvec->lists[lru]);
54 }
55
add_page_to_lru_list_tail(struct page * page,struct lruvec * lruvec,enum lru_list lru)56 static __always_inline void add_page_to_lru_list_tail(struct page *page,
57 struct lruvec *lruvec, enum lru_list lru)
58 {
59 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
60 list_add_tail(&page->lru, &lruvec->lists[lru]);
61 }
62
del_page_from_lru_list(struct page * page,struct lruvec * lruvec,enum lru_list lru)63 static __always_inline void del_page_from_lru_list(struct page *page,
64 struct lruvec *lruvec, enum lru_list lru)
65 {
66 list_del(&page->lru);
67 update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
68 }
69
70 /**
71 * page_lru_base_type - which LRU list type should a page be on?
72 * @page: the page to test
73 *
74 * Used for LRU list index arithmetic.
75 *
76 * Returns the base LRU type - file or anon - @page should be on.
77 */
page_lru_base_type(struct page * page)78 static inline enum lru_list page_lru_base_type(struct page *page)
79 {
80 #ifdef CONFIG_MEM_PURGEABLE
81 if (PagePurgeable(page))
82 return LRU_INACTIVE_PURGEABLE;
83 #endif
84
85 if (page_is_file_lru(page))
86 return LRU_INACTIVE_FILE;
87 return LRU_INACTIVE_ANON;
88 }
89
90 /**
91 * page_off_lru - which LRU list was page on? clearing its lru flags.
92 * @page: the page to test
93 *
94 * Returns the LRU list a page was on, as an index into the array of LRU
95 * lists; and clears its Unevictable or Active flags, ready for freeing.
96 */
page_off_lru(struct page * page)97 static __always_inline enum lru_list page_off_lru(struct page *page)
98 {
99 enum lru_list lru;
100
101 if (PageUnevictable(page)) {
102 __ClearPageUnevictable(page);
103 lru = LRU_UNEVICTABLE;
104 } else {
105 lru = page_lru_base_type(page);
106 if (PageActive(page)) {
107 __ClearPageActive(page);
108 lru += LRU_ACTIVE;
109 }
110 }
111 return lru;
112 }
113
114 /**
115 * page_lru - which LRU list should a page be on?
116 * @page: the page to test
117 *
118 * Returns the LRU list a page should be on, as an index
119 * into the array of LRU lists.
120 */
page_lru(struct page * page)121 static __always_inline enum lru_list page_lru(struct page *page)
122 {
123 enum lru_list lru;
124
125 if (PageUnevictable(page))
126 lru = LRU_UNEVICTABLE;
127 else {
128 lru = page_lru_base_type(page);
129 if (PageActive(page))
130 lru += LRU_ACTIVE;
131 }
132 return lru;
133 }
134
135 #ifdef CONFIG_ANON_VMA_NAME
136 /*
137 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
138 * either keep holding the lock while using the returned pointer or it should
139 * raise anon_vma_name refcount before releasing the lock.
140 */
141 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
142 extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
143 extern void anon_vma_name_free(struct kref *kref);
144
145 /* mmap_lock should be read-locked */
anon_vma_name_get(struct anon_vma_name * anon_name)146 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
147 {
148 if (anon_name)
149 kref_get(&anon_name->kref);
150 }
151
anon_vma_name_put(struct anon_vma_name * anon_name)152 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
153 {
154 if (anon_name)
155 kref_put(&anon_name->kref, anon_vma_name_free);
156 }
157
158 static inline
anon_vma_name_reuse(struct anon_vma_name * anon_name)159 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
160 {
161 /* Prevent anon_name refcount saturation early on */
162 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
163 anon_vma_name_get(anon_name);
164 return anon_name;
165
166 }
167 return anon_vma_name_alloc(anon_name->name);
168 }
169
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)170 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
171 struct vm_area_struct *new_vma)
172 {
173 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
174
175 if (anon_name)
176 new_vma->anon_name = anon_vma_name_reuse(anon_name);
177 }
178
free_anon_vma_name(struct vm_area_struct * vma)179 static inline void free_anon_vma_name(struct vm_area_struct *vma)
180 {
181 /*
182 * Not using anon_vma_name because it generates a warning if mmap_lock
183 * is not held, which might be the case here.
184 */
185 if (!vma->vm_file)
186 anon_vma_name_put(vma->anon_name);
187 }
188
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)189 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
190 struct anon_vma_name *anon_name2)
191 {
192 if (anon_name1 == anon_name2)
193 return true;
194
195 return anon_name1 && anon_name2 &&
196 !strcmp(anon_name1->name, anon_name2->name);
197 }
198
199 #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name(struct vm_area_struct * vma)200 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
201 {
202 return NULL;
203 }
204
anon_vma_name_alloc(const char * name)205 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
206 {
207 return NULL;
208 }
209
anon_vma_name_get(struct anon_vma_name * anon_name)210 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
anon_vma_name_put(struct anon_vma_name * anon_name)211 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)212 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
213 struct vm_area_struct *new_vma) {}
free_anon_vma_name(struct vm_area_struct * vma)214 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
215
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)216 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
217 struct anon_vma_name *anon_name2)
218 {
219 return true;
220 }
221
222 #endif /* CONFIG_ANON_VMA_NAME */
223
224 #endif
225