1# SPDX-License-Identifier: GPL-2.0-only 2 3menu "Memory Management options" 4 5config SELECT_MEMORY_MODEL 6 def_bool y 7 depends on ARCH_SELECT_MEMORY_MODEL 8 9choice 10 prompt "Memory model" 11 depends on SELECT_MEMORY_MODEL 12 default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT 13 default FLATMEM_MANUAL 14 help 15 This option allows you to change some of the ways that 16 Linux manages its memory internally. Most users will 17 only have one option here selected by the architecture 18 configuration. This is normal. 19 20config FLATMEM_MANUAL 21 bool "Flat Memory" 22 depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE 23 help 24 This option is best suited for non-NUMA systems with 25 flat address space. The FLATMEM is the most efficient 26 system in terms of performance and resource consumption 27 and it is the best option for smaller systems. 28 29 For systems that have holes in their physical address 30 spaces and for features like NUMA and memory hotplug, 31 choose "Sparse Memory". 32 33 If unsure, choose this option (Flat Memory) over any other. 34 35config SPARSEMEM_MANUAL 36 bool "Sparse Memory" 37 depends on ARCH_SPARSEMEM_ENABLE 38 help 39 This will be the only option for some systems, including 40 memory hot-plug systems. This is normal. 41 42 This option provides efficient support for systems with 43 holes is their physical address space and allows memory 44 hot-plug and hot-remove. 45 46 If unsure, choose "Flat Memory" over this option. 47 48endchoice 49 50config SPARSEMEM 51 def_bool y 52 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL 53 54config FLATMEM 55 def_bool y 56 depends on !SPARSEMEM || FLATMEM_MANUAL 57 58# 59# SPARSEMEM_EXTREME (which is the default) does some bootmem 60# allocations when sparse_init() is called. If this cannot 61# be done on your architecture, select this option. However, 62# statically allocating the mem_section[] array can potentially 63# consume vast quantities of .bss, so be careful. 64# 65# This option will also potentially produce smaller runtime code 66# with gcc 3.4 and later. 67# 68config SPARSEMEM_STATIC 69 bool 70 71# 72# Architecture platforms which require a two level mem_section in SPARSEMEM 73# must select this option. This is usually for architecture platforms with 74# an extremely sparse physical address space. 75# 76config SPARSEMEM_EXTREME 77 def_bool y 78 depends on SPARSEMEM && !SPARSEMEM_STATIC 79 80config SPARSEMEM_VMEMMAP_ENABLE 81 bool 82 83config SPARSEMEM_VMEMMAP 84 bool "Sparse Memory virtual memmap" 85 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE 86 default y 87 help 88 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise 89 pfn_to_page and page_to_pfn operations. This is the most 90 efficient option when sufficient kernel resources are available. 91 92config HAVE_MEMBLOCK_PHYS_MAP 93 bool 94 95config HAVE_FAST_GUP 96 depends on MMU 97 bool 98 99# Don't discard allocated memory used to track "memory" and "reserved" memblocks 100# after early boot, so it can still be used to test for validity of memory. 101# Also, memblocks are updated with memory hot(un)plug. 102config ARCH_KEEP_MEMBLOCK 103 bool 104 105# Keep arch NUMA mapping infrastructure post-init. 106config NUMA_KEEP_MEMINFO 107 bool 108 109config MEMORY_ISOLATION 110 bool 111 112# 113# Only be set on architectures that have completely implemented memory hotplug 114# feature. If you are not sure, don't touch it. 115# 116config HAVE_BOOTMEM_INFO_NODE 117 def_bool n 118 119config ARCH_ENABLE_MEMORY_HOTPLUG 120 bool 121 122# eventually, we can have this option just 'select SPARSEMEM' 123config MEMORY_HOTPLUG 124 bool "Allow for memory hot-add" 125 select MEMORY_ISOLATION 126 depends on SPARSEMEM || X86_64_ACPI_NUMA 127 depends on ARCH_ENABLE_MEMORY_HOTPLUG 128 depends on 64BIT || BROKEN 129 select NUMA_KEEP_MEMINFO if NUMA 130 131config MEMORY_HOTPLUG_SPARSE 132 def_bool y 133 depends on SPARSEMEM && MEMORY_HOTPLUG 134 135config MEMORY_HOTPLUG_DEFAULT_ONLINE 136 bool "Online the newly added memory blocks by default" 137 depends on MEMORY_HOTPLUG 138 help 139 This option sets the default policy setting for memory hotplug 140 onlining policy (/sys/devices/system/memory/auto_online_blocks) which 141 determines what happens to newly added memory regions. Policy setting 142 can always be changed at runtime. 143 See Documentation/admin-guide/mm/memory-hotplug.rst for more information. 144 145 Say Y here if you want all hot-plugged memory blocks to appear in 146 'online' state by default. 147 Say N here if you want the default policy to keep all hot-plugged 148 memory blocks in 'offline' state. 149 150config ARCH_ENABLE_MEMORY_HOTREMOVE 151 bool 152 153config MEMORY_HOTREMOVE 154 bool "Allow for memory hot remove" 155 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) 156 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 157 depends on MIGRATION 158 159config MHP_MEMMAP_ON_MEMORY 160 def_bool y 161 depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP 162 depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE 163 164# Heavily threaded applications may benefit from splitting the mm-wide 165# page_table_lock, so that faults on different parts of the user address 166# space can be handled with less contention: split it at this NR_CPUS. 167# Default to 4 for wider testing, though 8 might be more appropriate. 168# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 169# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 170# SPARC32 allocates multiple pte tables within a single page, and therefore 171# a per-page lock leads to problems when multiple tables need to be locked 172# at the same time (e.g. copy_page_range()). 173# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. 174# 175config SPLIT_PTLOCK_CPUS 176 int 177 default "999999" if !MMU 178 default "999999" if ARM && !CPU_CACHE_VIPT 179 default "999999" if PARISC && !PA20 180 default "999999" if SPARC32 181 default "4" 182 183config ARCH_ENABLE_SPLIT_PMD_PTLOCK 184 bool 185 186# 187# support for memory balloon 188config MEMORY_BALLOON 189 bool 190 191# 192# support for memory balloon compaction 193config BALLOON_COMPACTION 194 bool "Allow for balloon memory compaction/migration" 195 def_bool y 196 depends on COMPACTION && MEMORY_BALLOON 197 help 198 Memory fragmentation introduced by ballooning might reduce 199 significantly the number of 2MB contiguous memory blocks that can be 200 used within a guest, thus imposing performance penalties associated 201 with the reduced number of transparent huge pages that could be used 202 by the guest workload. Allowing the compaction & migration for memory 203 pages enlisted as being part of memory balloon devices avoids the 204 scenario aforementioned and helps improving memory defragmentation. 205 206# 207# support for memory compaction 208config COMPACTION 209 bool "Allow for memory compaction" 210 def_bool y 211 select MIGRATION 212 depends on MMU 213 help 214 Compaction is the only memory management component to form 215 high order (larger physically contiguous) memory blocks 216 reliably. The page allocator relies on compaction heavily and 217 the lack of the feature can lead to unexpected OOM killer 218 invocations for high order memory requests. You shouldn't 219 disable this option unless there really is a strong reason for 220 it and then we would be really interested to hear about that at 221 linux-mm@kvack.org. 222 223# 224# support for free page reporting 225config PAGE_REPORTING 226 bool "Free page reporting" 227 def_bool n 228 help 229 Free page reporting allows for the incremental acquisition of 230 free pages from the buddy allocator for the purpose of reporting 231 those pages to another entity, such as a hypervisor, so that the 232 memory can be freed within the host for other uses. 233 234# 235# support for page migration 236# 237config MIGRATION 238 bool "Page migration" 239 def_bool y 240 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU 241 help 242 Allows the migration of the physical location of pages of processes 243 while the virtual addresses are not changed. This is useful in 244 two situations. The first is on NUMA systems to put pages nearer 245 to the processors accessing. The second is when allocating huge 246 pages as migration can relocate pages to satisfy a huge page 247 allocation instead of reclaiming. 248 249config ARCH_ENABLE_HUGEPAGE_MIGRATION 250 bool 251 252config ARCH_ENABLE_THP_MIGRATION 253 bool 254 255config HUGETLB_PAGE_SIZE_VARIABLE 256 def_bool n 257 help 258 Allows the pageblock_order value to be dynamic instead of just standard 259 HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available 260 on a platform. 261 262config CONTIG_ALLOC 263 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA 264 265config PHYS_ADDR_T_64BIT 266 def_bool 64BIT 267 268config BOUNCE 269 bool "Enable bounce buffers" 270 default y 271 depends on BLOCK && MMU && HIGHMEM 272 help 273 Enable bounce buffers for devices that cannot access the full range of 274 memory available to the CPU. Enabled by default when HIGHMEM is 275 selected, but you may say n to override this. 276 277config VIRT_TO_BUS 278 bool 279 help 280 An architecture should select this if it implements the 281 deprecated interface virt_to_bus(). All new architectures 282 should probably not select this. 283 284 285config MMU_NOTIFIER 286 bool 287 select SRCU 288 select INTERVAL_TREE 289 290config KSM 291 bool "Enable KSM for page merging" 292 depends on MMU 293 select XXHASH 294 help 295 Enable Kernel Samepage Merging: KSM periodically scans those areas 296 of an application's address space that an app has advised may be 297 mergeable. When it finds pages of identical content, it replaces 298 the many instances by a single page with that content, so 299 saving memory until one or another app needs to modify the content. 300 Recommended for use with KVM, or with other duplicative applications. 301 See Documentation/vm/ksm.rst for more information: KSM is inactive 302 until a program has madvised that an area is MADV_MERGEABLE, and 303 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). 304 305config DEFAULT_MMAP_MIN_ADDR 306 int "Low address space to protect from user allocation" 307 depends on MMU 308 default 4096 309 help 310 This is the portion of low virtual memory which should be protected 311 from userspace allocation. Keeping a user from writing to low pages 312 can help reduce the impact of kernel NULL pointer bugs. 313 314 For most ia64, ppc64 and x86 users with lots of address space 315 a value of 65536 is reasonable and should cause no problems. 316 On arm and other archs it should not be higher than 32768. 317 Programs which use vm86 functionality or have some need to map 318 this low address space will need CAP_SYS_RAWIO or disable this 319 protection by setting the value to 0. 320 321 This value can be changed after boot using the 322 /proc/sys/vm/mmap_min_addr tunable. 323 324config ARCH_SUPPORTS_MEMORY_FAILURE 325 bool 326 327config MEMORY_FAILURE 328 depends on MMU 329 depends on ARCH_SUPPORTS_MEMORY_FAILURE 330 bool "Enable recovery from hardware memory errors" 331 select MEMORY_ISOLATION 332 select RAS 333 help 334 Enables code to recover from some memory failures on systems 335 with MCA recovery. This allows a system to continue running 336 even when some of its memory has uncorrected errors. This requires 337 special hardware support and typically ECC memory. 338 339config HWPOISON_INJECT 340 tristate "HWPoison pages injector" 341 depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS 342 select PROC_PAGE_MONITOR 343 344config NOMMU_INITIAL_TRIM_EXCESS 345 int "Turn on mmap() excess space trimming before booting" 346 depends on !MMU 347 default 1 348 help 349 The NOMMU mmap() frequently needs to allocate large contiguous chunks 350 of memory on which to store mappings, but it can only ask the system 351 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently 352 more than it requires. To deal with this, mmap() is able to trim off 353 the excess and return it to the allocator. 354 355 If trimming is enabled, the excess is trimmed off and returned to the 356 system allocator, which can cause extra fragmentation, particularly 357 if there are a lot of transient processes. 358 359 If trimming is disabled, the excess is kept, but not used, which for 360 long-term mappings means that the space is wasted. 361 362 Trimming can be dynamically controlled through a sysctl option 363 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of 364 excess pages there must be before trimming should occur, or zero if 365 no trimming is to occur. 366 367 This option specifies the initial value of this option. The default 368 of 1 says that all excess pages should be trimmed. 369 370 See Documentation/admin-guide/mm/nommu-mmap.rst for more information. 371 372config TRANSPARENT_HUGEPAGE 373 bool "Transparent Hugepage Support" 374 depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE 375 select COMPACTION 376 select XARRAY_MULTI 377 help 378 Transparent Hugepages allows the kernel to use huge pages and 379 huge tlb transparently to the applications whenever possible. 380 This feature can improve computing performance to certain 381 applications by speeding up page faults during memory 382 allocation, by reducing the number of tlb misses and by speeding 383 up the pagetable walking. 384 385 If memory constrained on embedded, you may want to say N. 386 387choice 388 prompt "Transparent Hugepage Support sysfs defaults" 389 depends on TRANSPARENT_HUGEPAGE 390 default TRANSPARENT_HUGEPAGE_ALWAYS 391 help 392 Selects the sysfs defaults for Transparent Hugepage Support. 393 394 config TRANSPARENT_HUGEPAGE_ALWAYS 395 bool "always" 396 help 397 Enabling Transparent Hugepage always, can increase the 398 memory footprint of applications without a guaranteed 399 benefit but it will work automatically for all applications. 400 401 config TRANSPARENT_HUGEPAGE_MADVISE 402 bool "madvise" 403 help 404 Enabling Transparent Hugepage madvise, will only provide a 405 performance improvement benefit to the applications using 406 madvise(MADV_HUGEPAGE) but it won't risk to increase the 407 memory footprint of applications without a guaranteed 408 benefit. 409endchoice 410 411config ARCH_WANTS_THP_SWAP 412 def_bool n 413 414config THP_SWAP 415 def_bool y 416 depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP 417 help 418 Swap transparent huge pages in one piece, without splitting. 419 XXX: For now, swap cluster backing transparent huge page 420 will be split after swapout. 421 422 For selection by architectures with reasonable THP sizes. 423 424# 425# UP and nommu archs use km based percpu allocator 426# 427config NEED_PER_CPU_KM 428 depends on !SMP 429 bool 430 default y 431 432config CLEANCACHE 433 bool "Enable cleancache driver to cache clean pages if tmem is present" 434 help 435 Cleancache can be thought of as a page-granularity victim cache 436 for clean pages that the kernel's pageframe replacement algorithm 437 (PFRA) would like to keep around, but can't since there isn't enough 438 memory. So when the PFRA "evicts" a page, it first attempts to use 439 cleancache code to put the data contained in that page into 440 "transcendent memory", memory that is not directly accessible or 441 addressable by the kernel and is of unknown and possibly 442 time-varying size. And when a cleancache-enabled 443 filesystem wishes to access a page in a file on disk, it first 444 checks cleancache to see if it already contains it; if it does, 445 the page is copied into the kernel and a disk access is avoided. 446 When a transcendent memory driver is available (such as zcache or 447 Xen transcendent memory), a significant I/O reduction 448 may be achieved. When none is available, all cleancache calls 449 are reduced to a single pointer-compare-against-NULL resulting 450 in a negligible performance hit. 451 452 If unsure, say Y to enable cleancache 453 454config FRONTSWAP 455 bool "Enable frontswap to cache swap pages if tmem is present" 456 depends on SWAP 457 help 458 Frontswap is so named because it can be thought of as the opposite 459 of a "backing" store for a swap device. The data is stored into 460 "transcendent memory", memory that is not directly accessible or 461 addressable by the kernel and is of unknown and possibly 462 time-varying size. When space in transcendent memory is available, 463 a significant swap I/O reduction may be achieved. When none is 464 available, all frontswap calls are reduced to a single pointer- 465 compare-against-NULL resulting in a negligible performance hit 466 and swap data is stored as normal on the matching swap device. 467 468 If unsure, say Y to enable frontswap. 469 470config CMA 471 bool "Contiguous Memory Allocator" 472 depends on MMU 473 select MIGRATION 474 select MEMORY_ISOLATION 475 help 476 This enables the Contiguous Memory Allocator which allows other 477 subsystems to allocate big physically-contiguous blocks of memory. 478 CMA reserves a region of memory and allows only movable pages to 479 be allocated from it. This way, the kernel can use the memory for 480 pagecache and when a subsystem requests for contiguous area, the 481 allocated pages are migrated away to serve the contiguous request. 482 483 If unsure, say "n". 484 485config CMA_DEBUG 486 bool "CMA debug messages (DEVELOPMENT)" 487 depends on DEBUG_KERNEL && CMA 488 help 489 Turns on debug messages in CMA. This produces KERN_DEBUG 490 messages for every CMA call as well as various messages while 491 processing calls such as dma_alloc_from_contiguous(). 492 This option does not affect warning and error messages. 493 494config CMA_DEBUGFS 495 bool "CMA debugfs interface" 496 depends on CMA && DEBUG_FS 497 help 498 Turns on the DebugFS interface for CMA. 499 500config CMA_SYSFS 501 bool "CMA information through sysfs interface" 502 depends on CMA && SYSFS 503 help 504 This option exposes some sysfs attributes to get information 505 from CMA. 506 507config CMA_AREAS 508 int "Maximum count of the CMA areas" 509 depends on CMA 510 default 19 if NUMA 511 default 7 512 help 513 CMA allows to create CMA areas for particular purpose, mainly, 514 used as device private area. This parameter sets the maximum 515 number of CMA area in the system. 516 517 If unsure, leave the default value "7" in UMA and "19" in NUMA. 518 519config MEM_SOFT_DIRTY 520 bool "Track memory changes" 521 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS 522 select PROC_PAGE_MONITOR 523 help 524 This option enables memory changes tracking by introducing a 525 soft-dirty bit on pte-s. This bit it set when someone writes 526 into a page just as regular dirty bit, but unlike the latter 527 it can be cleared by hands. 528 529 See Documentation/admin-guide/mm/soft-dirty.rst for more details. 530 531config ZSWAP 532 bool "Compressed cache for swap pages (EXPERIMENTAL)" 533 depends on FRONTSWAP && CRYPTO=y 534 select ZPOOL 535 help 536 A lightweight compressed cache for swap pages. It takes 537 pages that are in the process of being swapped out and attempts to 538 compress them into a dynamically allocated RAM-based memory pool. 539 This can result in a significant I/O reduction on swap device and, 540 in the case where decompressing from RAM is faster that swap device 541 reads, can also improve workload performance. 542 543 This is marked experimental because it is a new feature (as of 544 v3.11) that interacts heavily with memory reclaim. While these 545 interactions don't cause any known issues on simple memory setups, 546 they have not be fully explored on the large set of potential 547 configurations and workloads that exist. 548 549choice 550 prompt "Compressed cache for swap pages default compressor" 551 depends on ZSWAP 552 default ZSWAP_COMPRESSOR_DEFAULT_LZO 553 help 554 Selects the default compression algorithm for the compressed cache 555 for swap pages. 556 557 For an overview what kind of performance can be expected from 558 a particular compression algorithm please refer to the benchmarks 559 available at the following LWN page: 560 https://lwn.net/Articles/751795/ 561 562 If in doubt, select 'LZO'. 563 564 The selection made here can be overridden by using the kernel 565 command line 'zswap.compressor=' option. 566 567config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE 568 bool "Deflate" 569 select CRYPTO_DEFLATE 570 help 571 Use the Deflate algorithm as the default compression algorithm. 572 573config ZSWAP_COMPRESSOR_DEFAULT_LZO 574 bool "LZO" 575 select CRYPTO_LZO 576 help 577 Use the LZO algorithm as the default compression algorithm. 578 579config ZSWAP_COMPRESSOR_DEFAULT_842 580 bool "842" 581 select CRYPTO_842 582 help 583 Use the 842 algorithm as the default compression algorithm. 584 585config ZSWAP_COMPRESSOR_DEFAULT_LZ4 586 bool "LZ4" 587 select CRYPTO_LZ4 588 help 589 Use the LZ4 algorithm as the default compression algorithm. 590 591config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC 592 bool "LZ4HC" 593 select CRYPTO_LZ4HC 594 help 595 Use the LZ4HC algorithm as the default compression algorithm. 596 597config ZSWAP_COMPRESSOR_DEFAULT_ZSTD 598 bool "zstd" 599 select CRYPTO_ZSTD 600 help 601 Use the zstd algorithm as the default compression algorithm. 602endchoice 603 604config ZSWAP_COMPRESSOR_DEFAULT 605 string 606 depends on ZSWAP 607 default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE 608 default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO 609 default "842" if ZSWAP_COMPRESSOR_DEFAULT_842 610 default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4 611 default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC 612 default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD 613 default "" 614 615choice 616 prompt "Compressed cache for swap pages default allocator" 617 depends on ZSWAP 618 default ZSWAP_ZPOOL_DEFAULT_ZBUD 619 help 620 Selects the default allocator for the compressed cache for 621 swap pages. 622 The default is 'zbud' for compatibility, however please do 623 read the description of each of the allocators below before 624 making a right choice. 625 626 The selection made here can be overridden by using the kernel 627 command line 'zswap.zpool=' option. 628 629config ZSWAP_ZPOOL_DEFAULT_ZBUD 630 bool "zbud" 631 select ZBUD 632 help 633 Use the zbud allocator as the default allocator. 634 635config ZSWAP_ZPOOL_DEFAULT_Z3FOLD 636 bool "z3fold" 637 select Z3FOLD 638 help 639 Use the z3fold allocator as the default allocator. 640 641config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC 642 bool "zsmalloc" 643 select ZSMALLOC 644 help 645 Use the zsmalloc allocator as the default allocator. 646endchoice 647 648config ZSWAP_ZPOOL_DEFAULT 649 string 650 depends on ZSWAP 651 default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD 652 default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD 653 default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC 654 default "" 655 656config ZSWAP_DEFAULT_ON 657 bool "Enable the compressed cache for swap pages by default" 658 depends on ZSWAP 659 help 660 If selected, the compressed cache for swap pages will be enabled 661 at boot, otherwise it will be disabled. 662 663 The selection made here can be overridden by using the kernel 664 command line 'zswap.enabled=' option. 665 666config ZPOOL 667 tristate "Common API for compressed memory storage" 668 help 669 Compressed memory storage API. This allows using either zbud or 670 zsmalloc. 671 672config ZBUD 673 tristate "Low (Up to 2x) density storage for compressed pages" 674 depends on ZPOOL 675 help 676 A special purpose allocator for storing compressed pages. 677 It is designed to store up to two compressed pages per physical 678 page. While this design limits storage density, it has simple and 679 deterministic reclaim properties that make it preferable to a higher 680 density approach when reclaim will be used. 681 682config Z3FOLD 683 tristate "Up to 3x density storage for compressed pages" 684 depends on ZPOOL 685 help 686 A special purpose allocator for storing compressed pages. 687 It is designed to store up to three compressed pages per physical 688 page. It is a ZBUD derivative so the simplicity and determinism are 689 still there. 690 691config ZSMALLOC 692 tristate "Memory allocator for compressed pages" 693 depends on MMU 694 help 695 zsmalloc is a slab-based memory allocator designed to store 696 compressed RAM pages. zsmalloc uses virtual memory mapping 697 in order to reduce fragmentation. However, this results in a 698 non-standard allocator interface where a handle, not a pointer, is 699 returned by an alloc(). This handle must be mapped in order to 700 access the allocated space. 701 702config ZSMALLOC_STAT 703 bool "Export zsmalloc statistics" 704 depends on ZSMALLOC 705 select DEBUG_FS 706 help 707 This option enables code in the zsmalloc to collect various 708 statistics about what's happening in zsmalloc and exports that 709 information to userspace via debugfs. 710 If unsure, say N. 711 712config GENERIC_EARLY_IOREMAP 713 bool 714 715config STACK_MAX_DEFAULT_SIZE_MB 716 int "Default maximum user stack size for 32-bit processes (MB)" 717 default 100 718 range 8 2048 719 depends on STACK_GROWSUP && (!64BIT || COMPAT) 720 help 721 This is the maximum stack size in Megabytes in the VM layout of 32-bit 722 user processes when the stack grows upwards (currently only on parisc 723 arch) when the RLIMIT_STACK hard limit is unlimited. 724 725 A sane initial value is 100 MB. 726 727config DEFERRED_STRUCT_PAGE_INIT 728 bool "Defer initialisation of struct pages to kthreads" 729 depends on SPARSEMEM 730 depends on !NEED_PER_CPU_KM 731 depends on 64BIT 732 select PADATA 733 help 734 Ordinarily all struct pages are initialised during early boot in a 735 single thread. On very large machines this can take a considerable 736 amount of time. If this option is set, large machines will bring up 737 a subset of memmap at boot and then initialise the rest in parallel. 738 This has a potential performance impact on tasks running early in the 739 lifetime of the system until these kthreads finish the 740 initialisation. 741 742config PAGE_IDLE_FLAG 743 bool 744 select PAGE_EXTENSION if !64BIT 745 help 746 This adds PG_idle and PG_young flags to 'struct page'. PTE Accessed 747 bit writers can set the state of the bit in the flags so that PTE 748 Accessed bit readers may avoid disturbance. 749 750config IDLE_PAGE_TRACKING 751 bool "Enable idle page tracking" 752 depends on SYSFS && MMU 753 select PAGE_IDLE_FLAG 754 help 755 This feature allows to estimate the amount of user pages that have 756 not been touched during a given period of time. This information can 757 be useful to tune memory cgroup limits and/or for job placement 758 within a compute cluster. 759 760 See Documentation/admin-guide/mm/idle_page_tracking.rst for 761 more details. 762 763config ARCH_HAS_CACHE_LINE_SIZE 764 bool 765 766config ARCH_HAS_PTE_DEVMAP 767 bool 768 769config ARCH_HAS_ZONE_DMA_SET 770 bool 771 772config ZONE_DMA 773 bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET 774 default y if ARM64 || X86 775 776config ZONE_DMA32 777 bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET 778 depends on !X86_32 779 default y if ARM64 780 781config ZONE_DEVICE 782 bool "Device memory (pmem, HMM, etc...) hotplug support" 783 depends on MEMORY_HOTPLUG 784 depends on MEMORY_HOTREMOVE 785 depends on SPARSEMEM_VMEMMAP 786 depends on ARCH_HAS_PTE_DEVMAP 787 select XARRAY_MULTI 788 789 help 790 Device memory hotplug support allows for establishing pmem, 791 or other device driver discovered memory regions, in the 792 memmap. This allows pfn_to_page() lookups of otherwise 793 "device-physical" addresses which is needed for using a DAX 794 mapping in an O_DIRECT operation, among other things. 795 796 If FS_DAX is enabled, then say Y. 797 798config DEV_PAGEMAP_OPS 799 bool 800 801# 802# Helpers to mirror range of the CPU page tables of a process into device page 803# tables. 804# 805config HMM_MIRROR 806 bool 807 depends on MMU 808 809config DEVICE_PRIVATE 810 bool "Unaddressable device memory (GPU memory, ...)" 811 depends on ZONE_DEVICE 812 select DEV_PAGEMAP_OPS 813 814 help 815 Allows creation of struct pages to represent unaddressable device 816 memory; i.e., memory that is only accessible from the device (or 817 group of devices). You likely also want to select HMM_MIRROR. 818 819config VMAP_PFN 820 bool 821 822config ARCH_USES_HIGH_VMA_FLAGS 823 bool 824config ARCH_HAS_PKEYS 825 bool 826 827config PERCPU_STATS 828 bool "Collect percpu memory statistics" 829 help 830 This feature collects and exposes statistics via debugfs. The 831 information includes global and per chunk statistics, which can 832 be used to help understand percpu memory usage. 833 834config GUP_TEST 835 bool "Enable infrastructure for get_user_pages()-related unit tests" 836 depends on DEBUG_FS 837 help 838 Provides /sys/kernel/debug/gup_test, which in turn provides a way 839 to make ioctl calls that can launch kernel-based unit tests for 840 the get_user_pages*() and pin_user_pages*() family of API calls. 841 842 These tests include benchmark testing of the _fast variants of 843 get_user_pages*() and pin_user_pages*(), as well as smoke tests of 844 the non-_fast variants. 845 846 There is also a sub-test that allows running dump_page() on any 847 of up to eight pages (selected by command line args) within the 848 range of user-space addresses. These pages are either pinned via 849 pin_user_pages*(), or pinned via get_user_pages*(), as specified 850 by other command line arguments. 851 852 See tools/testing/selftests/vm/gup_test.c 853 854comment "GUP_TEST needs to have DEBUG_FS enabled" 855 depends on !GUP_TEST && !DEBUG_FS 856 857config GUP_GET_PTE_LOW_HIGH 858 bool 859 860config READ_ONLY_THP_FOR_FS 861 bool "Read-only THP for filesystems (EXPERIMENTAL)" 862 depends on TRANSPARENT_HUGEPAGE && SHMEM 863 864 help 865 Allow khugepaged to put read-only file-backed pages in THP. 866 867 This is marked experimental because it is a new feature. Write 868 support of file THPs will be developed in the next few release 869 cycles. 870 871config ARCH_HAS_PTE_SPECIAL 872 bool 873 874# 875# Some architectures require a special hugepage directory format that is 876# required to support multiple hugepage sizes. For example a4fe3ce76 877# "powerpc/mm: Allow more flexible layouts for hugepage pagetables" 878# introduced it on powerpc. This allows for a more flexible hugepage 879# pagetable layouts. 880# 881config ARCH_HAS_HUGEPD 882 bool 883 884config MAPPING_DIRTY_HELPERS 885 bool 886 887config KMAP_LOCAL 888 bool 889 890config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY 891 bool 892 893# struct io_mapping based helper. Selected by drivers that need them 894config IO_MAPPING 895 bool 896 897config SECRETMEM 898 def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED 899 900# Some architectures want callbacks for all IO mappings in order to 901# track the physical addresses that get used as devices. 902config ARCH_HAS_IOREMAP_PHYS_HOOKS 903 bool 904 905config ANON_VMA_NAME 906 bool "Anonymous VMA name support" 907 depends on PROC_FS && ADVISE_SYSCALLS && MMU 908 909 help 910 Allow naming anonymous virtual memory areas. 911 912 This feature allows assigning names to virtual memory areas. Assigned 913 names can be later retrieved from /proc/pid/maps and /proc/pid/smaps 914 and help identifying individual anonymous memory areas. 915 Assigning a name to anonymous virtual memory area might prevent that 916 area from being merged with adjacent virtual memory areas due to the 917 difference in their name. 918 919# multi-gen LRU { 920config LRU_GEN 921 bool "Multi-Gen LRU" 922 depends on MMU 923 # make sure page->flags has enough spare bits 924 depends on !MAXSMP && (64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP) 925 help 926 A high performance LRU implementation to overcommit memory. See 927 Documentation/admin-guide/mm/multigen_lru.rst for details. 928 929config LRU_GEN_ENABLED 930 bool "Enable by default" 931 depends on LRU_GEN 932 help 933 This option enables the multi-gen LRU by default. 934 935config LRU_GEN_STATS 936 bool "Full stats for debugging" 937 depends on LRU_GEN 938 help 939 Do not enable this option unless you plan to look at historical stats 940 from evicted generations for debugging purpose. 941 942 This option has a per-memcg and per-node memory overhead. 943# } 944 945source "mm/damon/Kconfig" 946 947config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT 948 def_bool n 949 950config SPECULATIVE_PAGE_FAULT 951 bool "Speculative page faults" 952 default y 953 depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT && MMU && SMP && !NUMA 954 help 955 Try to handle user space page faults without holding the mmap lock. 956 957 Instead of blocking writers through the use of mmap lock, 958 the page fault handler merely verifies, at the end of the page 959 fault, that no writers have been running concurrently with it. 960 961 In high concurrency situations, the speculative fault handler 962 gains a throughput advantage by avoiding having to update the 963 mmap lock reader count. 964 965 If the check fails due to a concurrent writer, or due to hitting 966 an unsupported case, the fault handler falls back to classical 967 processing using the mmap read lock. 968 969endmenu 970