1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/node.h> 3 #include <linux/mmzone.h> 4 #include <linux/compaction.h> 5 /* 6 * The order of these masks is important. Matching masks will be seen 7 * first and the left over flags will end up showing by themselves. 8 * 9 * For example, if we have GFP_KERNEL before GFP_USER we wil get: 10 * 11 * GFP_KERNEL|GFP_HARDWALL 12 * 13 * Thus most bits set go first. 14 */ 15 16 /* These define the values that are enums (the bits) */ 17 #define TRACE_GFP_FLAGS_GENERAL \ 18 TRACE_GFP_EM(DMA) \ 19 TRACE_GFP_EM(HIGHMEM) \ 20 TRACE_GFP_EM(DMA32) \ 21 TRACE_GFP_EM(MOVABLE) \ 22 TRACE_GFP_EM(RECLAIMABLE) \ 23 TRACE_GFP_EM(HIGH) \ 24 TRACE_GFP_EM(IO) \ 25 TRACE_GFP_EM(FS) \ 26 TRACE_GFP_EM(ZERO) \ 27 TRACE_GFP_EM(DIRECT_RECLAIM) \ 28 TRACE_GFP_EM(KSWAPD_RECLAIM) \ 29 TRACE_GFP_EM(WRITE) \ 30 TRACE_GFP_EM(NOWARN) \ 31 TRACE_GFP_EM(RETRY_MAYFAIL) \ 32 TRACE_GFP_EM(NOFAIL) \ 33 TRACE_GFP_EM(NORETRY) \ 34 TRACE_GFP_EM(MEMALLOC) \ 35 TRACE_GFP_EM(COMP) \ 36 TRACE_GFP_EM(NOMEMALLOC) \ 37 TRACE_GFP_EM(HARDWALL) \ 38 TRACE_GFP_EM(THISNODE) \ 39 TRACE_GFP_EM(ACCOUNT) \ 40 TRACE_GFP_EM(ZEROTAGS) 41 42 #ifdef CONFIG_KASAN_HW_TAGS 43 # define TRACE_GFP_FLAGS_KASAN \ 44 TRACE_GFP_EM(SKIP_ZERO) \ 45 TRACE_GFP_EM(SKIP_KASAN) 46 #else 47 # define TRACE_GFP_FLAGS_KASAN 48 #endif 49 50 #ifdef CONFIG_LOCKDEP 51 # define TRACE_GFP_FLAGS_LOCKDEP \ 52 TRACE_GFP_EM(NOLOCKDEP) 53 #else 54 # define TRACE_GFP_FLAGS_LOCKDEP 55 #endif 56 57 #ifdef CONFIG_SLAB_OBJ_EXT 58 # define TRACE_GFP_FLAGS_SLAB \ 59 TRACE_GFP_EM(NO_OBJ_EXT) 60 #else 61 # define TRACE_GFP_FLAGS_SLAB 62 #endif 63 64 #define TRACE_GFP_FLAGS \ 65 TRACE_GFP_FLAGS_GENERAL \ 66 TRACE_GFP_FLAGS_KASAN \ 67 TRACE_GFP_FLAGS_LOCKDEP \ 68 TRACE_GFP_FLAGS_SLAB 69 70 #undef TRACE_GFP_EM 71 #define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT); 72 73 TRACE_GFP_FLAGS 74 75 /* Just in case these are ever used */ 76 TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT); 77 TRACE_DEFINE_ENUM(___GFP_LAST_BIT); 78 79 #define gfpflag_string(flag) {(__force unsigned long)flag, #flag} 80 81 #define __def_gfpflag_names \ 82 gfpflag_string(GFP_TRANSHUGE), \ 83 gfpflag_string(GFP_TRANSHUGE_LIGHT), \ 84 gfpflag_string(GFP_HIGHUSER_MOVABLE), \ 85 gfpflag_string(GFP_HIGHUSER), \ 86 gfpflag_string(GFP_USER), \ 87 gfpflag_string(GFP_KERNEL_ACCOUNT), \ 88 gfpflag_string(GFP_KERNEL), \ 89 gfpflag_string(GFP_NOFS), \ 90 gfpflag_string(GFP_ATOMIC), \ 91 gfpflag_string(GFP_NOIO), \ 92 gfpflag_string(GFP_NOWAIT), \ 93 gfpflag_string(GFP_DMA), \ 94 gfpflag_string(__GFP_HIGHMEM), \ 95 gfpflag_string(GFP_DMA32), \ 96 gfpflag_string(__GFP_HIGH), \ 97 gfpflag_string(__GFP_IO), \ 98 gfpflag_string(__GFP_FS), \ 99 gfpflag_string(__GFP_NOWARN), \ 100 gfpflag_string(__GFP_RETRY_MAYFAIL), \ 101 gfpflag_string(__GFP_NOFAIL), \ 102 gfpflag_string(__GFP_NORETRY), \ 103 gfpflag_string(__GFP_COMP), \ 104 gfpflag_string(__GFP_ZERO), \ 105 gfpflag_string(__GFP_NOMEMALLOC), \ 106 gfpflag_string(__GFP_MEMALLOC), \ 107 gfpflag_string(__GFP_HARDWALL), \ 108 gfpflag_string(__GFP_THISNODE), \ 109 gfpflag_string(__GFP_RECLAIMABLE), \ 110 gfpflag_string(__GFP_MOVABLE), \ 111 gfpflag_string(__GFP_ACCOUNT), \ 112 gfpflag_string(__GFP_WRITE), \ 113 gfpflag_string(__GFP_RECLAIM), \ 114 gfpflag_string(__GFP_DIRECT_RECLAIM), \ 115 gfpflag_string(__GFP_KSWAPD_RECLAIM), \ 116 gfpflag_string(__GFP_ZEROTAGS), \ 117 gfpflag_string(__GFP_CMA) 118 119 #ifdef CONFIG_KASAN_HW_TAGS 120 #define __def_gfpflag_names_kasan , \ 121 gfpflag_string(__GFP_SKIP_ZERO), \ 122 gfpflag_string(__GFP_SKIP_KASAN) 123 #else 124 #define __def_gfpflag_names_kasan 125 #endif 126 127 #define show_gfp_flags(flags) \ 128 (flags) ? __print_flags(flags, "|", \ 129 __def_gfpflag_names __def_gfpflag_names_kasan \ 130 ) : "none" 131 132 #ifdef CONFIG_MMU 133 #define IF_HAVE_PG_MLOCK(_name) ,{1UL << PG_##_name, __stringify(_name)} 134 #else 135 #define IF_HAVE_PG_MLOCK(_name) 136 #endif 137 138 #ifdef CONFIG_MEMORY_FAILURE 139 #define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)} 140 #else 141 #define IF_HAVE_PG_HWPOISON(_name) 142 #endif 143 144 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 145 #define IF_HAVE_PG_IDLE(_name) ,{1UL << PG_##_name, __stringify(_name)} 146 #else 147 #define IF_HAVE_PG_IDLE(_name) 148 #endif 149 150 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 151 #define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)} 152 #else 153 #define IF_HAVE_PG_ARCH_2(_name) 154 #endif 155 156 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 157 #define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)} 158 #else 159 #define IF_HAVE_PG_ARCH_3(_name) 160 #endif 161 162 #ifdef CONFIG_64BIT 163 #define IF_HAVE_PG_OEM_RESERVED(_name) ,{1UL << PG_##_name, __stringify(_name)} 164 #else 165 #define IF_HAVE_PG_OEM_RESERVED(_name) 166 #endif 167 168 #define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) } 169 170 #define __def_pageflag_names \ 171 DEF_PAGEFLAG_NAME(locked), \ 172 DEF_PAGEFLAG_NAME(waiters), \ 173 DEF_PAGEFLAG_NAME(error), \ 174 DEF_PAGEFLAG_NAME(referenced), \ 175 DEF_PAGEFLAG_NAME(uptodate), \ 176 DEF_PAGEFLAG_NAME(dirty), \ 177 DEF_PAGEFLAG_NAME(lru), \ 178 DEF_PAGEFLAG_NAME(active), \ 179 DEF_PAGEFLAG_NAME(workingset), \ 180 DEF_PAGEFLAG_NAME(owner_priv_1), \ 181 DEF_PAGEFLAG_NAME(owner_2), \ 182 DEF_PAGEFLAG_NAME(arch_1), \ 183 DEF_PAGEFLAG_NAME(reserved), \ 184 DEF_PAGEFLAG_NAME(private), \ 185 DEF_PAGEFLAG_NAME(private_2), \ 186 DEF_PAGEFLAG_NAME(writeback), \ 187 DEF_PAGEFLAG_NAME(head), \ 188 DEF_PAGEFLAG_NAME(reclaim), \ 189 DEF_PAGEFLAG_NAME(swapbacked), \ 190 DEF_PAGEFLAG_NAME(unevictable), \ 191 DEF_PAGEFLAG_NAME(dropbehind) \ 192 IF_HAVE_PG_MLOCK(mlocked) \ 193 IF_HAVE_PG_HWPOISON(hwpoison) \ 194 IF_HAVE_PG_IDLE(idle) \ 195 IF_HAVE_PG_IDLE(young) \ 196 IF_HAVE_PG_ARCH_2(arch_2) \ 197 IF_HAVE_PG_ARCH_3(arch_3) \ 198 IF_HAVE_PG_OEM_RESERVED(oem_reserved_1) \ 199 IF_HAVE_PG_OEM_RESERVED(oem_reserved_2) \ 200 IF_HAVE_PG_OEM_RESERVED(oem_reserved_3) \ 201 IF_HAVE_PG_OEM_RESERVED(oem_reserved_4) 202 203 #define show_page_flags(flags) \ 204 (flags) ? __print_flags(flags, "|", \ 205 __def_pageflag_names \ 206 ) : "none" 207 208 #if defined(CONFIG_X86) 209 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } 210 #elif defined(CONFIG_PPC64) 211 #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } 212 #elif defined(CONFIG_PARISC) 213 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } 214 #elif !defined(CONFIG_MMU) 215 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } 216 #else 217 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" } 218 #endif 219 220 #ifdef CONFIG_MEM_SOFT_DIRTY 221 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name }, 222 #else 223 #define IF_HAVE_VM_SOFTDIRTY(flag,name) 224 #endif 225 226 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 227 # define IF_HAVE_UFFD_MINOR(flag, name) {flag, name}, 228 #else 229 # define IF_HAVE_UFFD_MINOR(flag, name) 230 #endif 231 232 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 233 # define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name}, 234 #else 235 # define IF_HAVE_VM_DROPPABLE(flag, name) 236 #endif 237 238 #define __def_vmaflag_names \ 239 {VM_READ, "read" }, \ 240 {VM_WRITE, "write" }, \ 241 {VM_EXEC, "exec" }, \ 242 {VM_SHARED, "shared" }, \ 243 {VM_MAYREAD, "mayread" }, \ 244 {VM_MAYWRITE, "maywrite" }, \ 245 {VM_MAYEXEC, "mayexec" }, \ 246 {VM_MAYSHARE, "mayshare" }, \ 247 {VM_GROWSDOWN, "growsdown" }, \ 248 {VM_UFFD_MISSING, "uffd_missing" }, \ 249 IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ 250 {VM_PFNMAP, "pfnmap" }, \ 251 {VM_UFFD_WP, "uffd_wp" }, \ 252 {VM_LOCKED, "locked" }, \ 253 {VM_IO, "io" }, \ 254 {VM_SEQ_READ, "seqread" }, \ 255 {VM_RAND_READ, "randread" }, \ 256 {VM_DONTCOPY, "dontcopy" }, \ 257 {VM_DONTEXPAND, "dontexpand" }, \ 258 {VM_LOCKONFAULT, "lockonfault" }, \ 259 {VM_ACCOUNT, "account" }, \ 260 {VM_NORESERVE, "noreserve" }, \ 261 {VM_HUGETLB, "hugetlb" }, \ 262 {VM_SYNC, "sync" }, \ 263 __VM_ARCH_SPECIFIC_1 , \ 264 {VM_WIPEONFORK, "wipeonfork" }, \ 265 {VM_DONTDUMP, "dontdump" }, \ 266 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ 267 {VM_MIXEDMAP, "mixedmap" }, \ 268 {VM_HUGEPAGE, "hugepage" }, \ 269 {VM_NOHUGEPAGE, "nohugepage" }, \ 270 IF_HAVE_VM_DROPPABLE(VM_DROPPABLE, "droppable" ) \ 271 {VM_MERGEABLE, "mergeable" } \ 272 273 #define show_vma_flags(flags) \ 274 (flags) ? __print_flags(flags, "|", \ 275 __def_vmaflag_names \ 276 ) : "none" 277 278 #ifdef CONFIG_COMPACTION 279 #define COMPACTION_STATUS \ 280 EM( COMPACT_SKIPPED, "skipped") \ 281 EM( COMPACT_DEFERRED, "deferred") \ 282 EM( COMPACT_CONTINUE, "continue") \ 283 EM( COMPACT_SUCCESS, "success") \ 284 EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ 285 EM( COMPACT_COMPLETE, "complete") \ 286 EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ 287 EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ 288 EMe(COMPACT_CONTENDED, "contended") 289 290 /* High-level compaction status feedback */ 291 #define COMPACTION_FAILED 1 292 #define COMPACTION_WITHDRAWN 2 293 #define COMPACTION_PROGRESS 3 294 295 #define compact_result_to_feedback(result) \ 296 ({ \ 297 enum compact_result __result = result; \ 298 (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \ 299 (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \ 300 }) 301 302 #define COMPACTION_FEEDBACK \ 303 EM(COMPACTION_FAILED, "failed") \ 304 EM(COMPACTION_WITHDRAWN, "withdrawn") \ 305 EMe(COMPACTION_PROGRESS, "progress") 306 307 #define COMPACTION_PRIORITY \ 308 EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \ 309 EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \ 310 EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC") 311 #else 312 #define COMPACTION_STATUS 313 #define COMPACTION_PRIORITY 314 #define COMPACTION_FEEDBACK 315 #endif 316 317 #ifdef CONFIG_ZONE_DMA 318 #define IFDEF_ZONE_DMA(X) X 319 #else 320 #define IFDEF_ZONE_DMA(X) 321 #endif 322 323 #ifdef CONFIG_ZONE_DMA32 324 #define IFDEF_ZONE_DMA32(X) X 325 #else 326 #define IFDEF_ZONE_DMA32(X) 327 #endif 328 329 #ifdef CONFIG_HIGHMEM 330 #define IFDEF_ZONE_HIGHMEM(X) X 331 #else 332 #define IFDEF_ZONE_HIGHMEM(X) 333 #endif 334 335 #define ZONE_TYPE \ 336 IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ 337 IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ 338 EM (ZONE_NORMAL, "Normal") \ 339 IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ 340 EMe(ZONE_MOVABLE,"Movable") 341 342 #define LRU_NAMES \ 343 EM (LRU_INACTIVE_ANON, "inactive_anon") \ 344 EM (LRU_ACTIVE_ANON, "active_anon") \ 345 EM (LRU_INACTIVE_FILE, "inactive_file") \ 346 EM (LRU_ACTIVE_FILE, "active_file") \ 347 EMe(LRU_UNEVICTABLE, "unevictable") 348 349 /* 350 * First define the enums in the above macros to be exported to userspace 351 * via TRACE_DEFINE_ENUM(). 352 */ 353 #undef EM 354 #undef EMe 355 #define EM(a, b) TRACE_DEFINE_ENUM(a); 356 #define EMe(a, b) TRACE_DEFINE_ENUM(a); 357 358 COMPACTION_STATUS 359 COMPACTION_PRIORITY 360 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */ 361 ZONE_TYPE 362 LRU_NAMES 363 364 /* 365 * Now redefine the EM() and EMe() macros to map the enums to the strings 366 * that will be printed in the output. 367 */ 368 #undef EM 369 #undef EMe 370 #define EM(a, b) {a, b}, 371 #define EMe(a, b) {a, b} 372