1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/node.h> 3 #include <linux/mmzone.h> 4 #include <linux/compaction.h> 5 /* 6 * The order of these masks is important. Matching masks will be seen 7 * first and the left over flags will end up showing by themselves. 8 * 9 * For example, if we have GFP_KERNEL before GFP_USER we wil get: 10 * 11 * GFP_KERNEL|GFP_HARDWALL 12 * 13 * Thus most bits set go first. 14 */ 15 16 #define __def_gfpflag_names \ 17 {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ 18 {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \ 19 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ 20 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ 21 {(unsigned long)GFP_USER, "GFP_USER"}, \ 22 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ 23 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ 24 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ 25 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ 26 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ 27 {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ 28 {(unsigned long)GFP_DMA, "GFP_DMA"}, \ 29 {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ 30 {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \ 31 {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ 32 {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ 33 {(unsigned long)__GFP_IO, "__GFP_IO"}, \ 34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \ 35 {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ 36 {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ 37 {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ 38 {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ 39 {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ 40 {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ 41 {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ 42 {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ 43 {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ 44 {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ 45 {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ 46 {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ 47 {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ 48 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ 49 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ 50 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ 51 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ 52 {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ 53 54 #ifdef CONFIG_KASAN_HW_TAGS 55 #define __def_gfpflag_names_kasan , \ 56 {(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ 57 {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ 58 {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} 59 #else 60 #define __def_gfpflag_names_kasan 61 #endif 62 63 #define show_gfp_flags(flags) \ 64 (flags) ? __print_flags(flags, "|", \ 65 __def_gfpflag_names __def_gfpflag_names_kasan \ 66 ) : "none" 67 68 #ifdef CONFIG_MMU 69 #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string} 70 #else 71 #define IF_HAVE_PG_MLOCK(flag,string) 72 #endif 73 74 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 75 #define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string} 76 #else 77 #define IF_HAVE_PG_UNCACHED(flag,string) 78 #endif 79 80 #ifdef CONFIG_MEMORY_FAILURE 81 #define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string} 82 #else 83 #define IF_HAVE_PG_HWPOISON(flag,string) 84 #endif 85 86 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 87 #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string} 88 #else 89 #define IF_HAVE_PG_IDLE(flag,string) 90 #endif 91 92 #ifdef CONFIG_64BIT 93 #define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string} 94 #define IF_HAVE_PG_OEM_RESERVED(flag,string) ,{1UL << flag, string} 95 #else 96 #define IF_HAVE_PG_ARCH_2(flag,string) 97 #define IF_HAVE_PG_OEM_RESERVED(flag,string) 98 #endif 99 100 #ifdef CONFIG_KASAN_HW_TAGS 101 #define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string} 102 #else 103 #define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) 104 #endif 105 106 #define __def_pageflag_names \ 107 {1UL << PG_locked, "locked" }, \ 108 {1UL << PG_waiters, "waiters" }, \ 109 {1UL << PG_error, "error" }, \ 110 {1UL << PG_referenced, "referenced" }, \ 111 {1UL << PG_uptodate, "uptodate" }, \ 112 {1UL << PG_dirty, "dirty" }, \ 113 {1UL << PG_lru, "lru" }, \ 114 {1UL << PG_active, "active" }, \ 115 {1UL << PG_workingset, "workingset" }, \ 116 {1UL << PG_slab, "slab" }, \ 117 {1UL << PG_owner_priv_1, "owner_priv_1" }, \ 118 {1UL << PG_arch_1, "arch_1" }, \ 119 {1UL << PG_reserved, "reserved" }, \ 120 {1UL << PG_private, "private" }, \ 121 {1UL << PG_private_2, "private_2" }, \ 122 {1UL << PG_writeback, "writeback" }, \ 123 {1UL << PG_head, "head" }, \ 124 {1UL << PG_mappedtodisk, "mappedtodisk" }, \ 125 {1UL << PG_reclaim, "reclaim" }, \ 126 {1UL << PG_swapbacked, "swapbacked" }, \ 127 {1UL << PG_unevictable, "unevictable" } \ 128 IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ 129 IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ 130 IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ 131 IF_HAVE_PG_IDLE(PG_young, "young" ) \ 132 IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ 133 IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ 134 IF_HAVE_PG_OEM_RESERVED(PG_oem_reserved,"oem_reserved" ) \ 135 IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") 136 137 #define show_page_flags(flags) \ 138 (flags) ? __print_flags(flags, "|", \ 139 __def_pageflag_names \ 140 ) : "none" 141 142 #if defined(CONFIG_X86) 143 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } 144 #elif defined(CONFIG_PPC) 145 #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } 146 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) 147 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } 148 #elif !defined(CONFIG_MMU) 149 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } 150 #else 151 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" } 152 #endif 153 154 #ifdef CONFIG_MEM_SOFT_DIRTY 155 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name }, 156 #else 157 #define IF_HAVE_VM_SOFTDIRTY(flag,name) 158 #endif 159 160 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 161 # define IF_HAVE_UFFD_MINOR(flag, name) {flag, name}, 162 #else 163 # define IF_HAVE_UFFD_MINOR(flag, name) 164 #endif 165 166 #define __def_vmaflag_names \ 167 {VM_READ, "read" }, \ 168 {VM_WRITE, "write" }, \ 169 {VM_EXEC, "exec" }, \ 170 {VM_SHARED, "shared" }, \ 171 {VM_MAYREAD, "mayread" }, \ 172 {VM_MAYWRITE, "maywrite" }, \ 173 {VM_MAYEXEC, "mayexec" }, \ 174 {VM_MAYSHARE, "mayshare" }, \ 175 {VM_GROWSDOWN, "growsdown" }, \ 176 {VM_UFFD_MISSING, "uffd_missing" }, \ 177 IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ 178 {VM_PFNMAP, "pfnmap" }, \ 179 {VM_UFFD_WP, "uffd_wp" }, \ 180 {VM_LOCKED, "locked" }, \ 181 {VM_IO, "io" }, \ 182 {VM_SEQ_READ, "seqread" }, \ 183 {VM_RAND_READ, "randread" }, \ 184 {VM_DONTCOPY, "dontcopy" }, \ 185 {VM_DONTEXPAND, "dontexpand" }, \ 186 {VM_LOCKONFAULT, "lockonfault" }, \ 187 {VM_ACCOUNT, "account" }, \ 188 {VM_NORESERVE, "noreserve" }, \ 189 {VM_HUGETLB, "hugetlb" }, \ 190 {VM_SYNC, "sync" }, \ 191 __VM_ARCH_SPECIFIC_1 , \ 192 {VM_WIPEONFORK, "wipeonfork" }, \ 193 {VM_DONTDUMP, "dontdump" }, \ 194 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ 195 {VM_MIXEDMAP, "mixedmap" }, \ 196 {VM_HUGEPAGE, "hugepage" }, \ 197 {VM_NOHUGEPAGE, "nohugepage" }, \ 198 {VM_MERGEABLE, "mergeable" } \ 199 200 #define show_vma_flags(flags) \ 201 (flags) ? __print_flags(flags, "|", \ 202 __def_vmaflag_names \ 203 ) : "none" 204 205 #ifdef CONFIG_COMPACTION 206 #define COMPACTION_STATUS \ 207 EM( COMPACT_SKIPPED, "skipped") \ 208 EM( COMPACT_DEFERRED, "deferred") \ 209 EM( COMPACT_CONTINUE, "continue") \ 210 EM( COMPACT_SUCCESS, "success") \ 211 EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ 212 EM( COMPACT_COMPLETE, "complete") \ 213 EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ 214 EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ 215 EMe(COMPACT_CONTENDED, "contended") 216 217 /* High-level compaction status feedback */ 218 #define COMPACTION_FAILED 1 219 #define COMPACTION_WITHDRAWN 2 220 #define COMPACTION_PROGRESS 3 221 222 #define compact_result_to_feedback(result) \ 223 ({ \ 224 enum compact_result __result = result; \ 225 (compaction_failed(__result)) ? COMPACTION_FAILED : \ 226 (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \ 227 }) 228 229 #define COMPACTION_FEEDBACK \ 230 EM(COMPACTION_FAILED, "failed") \ 231 EM(COMPACTION_WITHDRAWN, "withdrawn") \ 232 EMe(COMPACTION_PROGRESS, "progress") 233 234 #define COMPACTION_PRIORITY \ 235 EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \ 236 EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \ 237 EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC") 238 #else 239 #define COMPACTION_STATUS 240 #define COMPACTION_PRIORITY 241 #define COMPACTION_FEEDBACK 242 #endif 243 244 #ifdef CONFIG_ZONE_DMA 245 #define IFDEF_ZONE_DMA(X) X 246 #else 247 #define IFDEF_ZONE_DMA(X) 248 #endif 249 250 #ifdef CONFIG_ZONE_DMA32 251 #define IFDEF_ZONE_DMA32(X) X 252 #else 253 #define IFDEF_ZONE_DMA32(X) 254 #endif 255 256 #ifdef CONFIG_HIGHMEM 257 #define IFDEF_ZONE_HIGHMEM(X) X 258 #else 259 #define IFDEF_ZONE_HIGHMEM(X) 260 #endif 261 262 #define ZONE_TYPE \ 263 IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ 264 IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ 265 EM (ZONE_NORMAL, "Normal") \ 266 IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ 267 EMe(ZONE_MOVABLE,"Movable") 268 269 #define LRU_NAMES \ 270 EM (LRU_INACTIVE_ANON, "inactive_anon") \ 271 EM (LRU_ACTIVE_ANON, "active_anon") \ 272 EM (LRU_INACTIVE_FILE, "inactive_file") \ 273 EM (LRU_ACTIVE_FILE, "active_file") \ 274 EMe(LRU_UNEVICTABLE, "unevictable") 275 276 /* 277 * First define the enums in the above macros to be exported to userspace 278 * via TRACE_DEFINE_ENUM(). 279 */ 280 #undef EM 281 #undef EMe 282 #define EM(a, b) TRACE_DEFINE_ENUM(a); 283 #define EMe(a, b) TRACE_DEFINE_ENUM(a); 284 285 COMPACTION_STATUS 286 COMPACTION_PRIORITY 287 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */ 288 ZONE_TYPE 289 LRU_NAMES 290 291 /* 292 * Now redefine the EM() and EMe() macros to map the enums to the strings 293 * that will be printed in the output. 294 */ 295 #undef EM 296 #undef EMe 297 #define EM(a, b) {a, b}, 298 #define EMe(a, b) {a, b} 299