linux/mm/Kconfig
<<
>>
Prefs
   1config SELECT_MEMORY_MODEL
   2        def_bool y
   3        depends on ARCH_SELECT_MEMORY_MODEL
   4
   5choice
   6        prompt "Memory model"
   7        depends on SELECT_MEMORY_MODEL
   8        default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
   9        default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
  10        default FLATMEM_MANUAL
  11
  12config FLATMEM_MANUAL
  13        bool "Flat Memory"
  14        depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
  15        help
  16          This option allows you to change some of the ways that
  17          Linux manages its memory internally.  Most users will
  18          only have one option here: FLATMEM.  This is normal
  19          and a correct option.
  20
  21          Some users of more advanced features like NUMA and
  22          memory hotplug may have different options here.
  23          DISCONTIGMEM is a more mature, better tested system,
  24          but is incompatible with memory hotplug and may suffer
  25          decreased performance over SPARSEMEM.  If unsure between
  26          "Sparse Memory" and "Discontiguous Memory", choose
  27          "Discontiguous Memory".
  28
  29          If unsure, choose this option (Flat Memory) over any other.
  30
  31config DISCONTIGMEM_MANUAL
  32        bool "Discontiguous Memory"
  33        depends on ARCH_DISCONTIGMEM_ENABLE
  34        help
  35          This option provides enhanced support for discontiguous
  36          memory systems, over FLATMEM.  These systems have holes
  37          in their physical address spaces, and this option provides
  38          more efficient handling of these holes.  However, the vast
  39          majority of hardware has quite flat address spaces, and
  40          can have degraded performance from the extra overhead that
  41          this option imposes.
  42
  43          Many NUMA configurations will have this as the only option.
  44
  45          If unsure, choose "Flat Memory" over this option.
  46
  47config SPARSEMEM_MANUAL
  48        bool "Sparse Memory"
  49        depends on ARCH_SPARSEMEM_ENABLE
  50        help
  51          This will be the only option for some systems, including
  52          memory hotplug systems.  This is normal.
  53
  54          For many other systems, this will be an alternative to
  55          "Discontiguous Memory".  This option provides some potential
  56          performance benefits, along with decreased code complexity,
  57          but it is newer, and more experimental.
  58
  59          If unsure, choose "Discontiguous Memory" or "Flat Memory"
  60          over this option.
  61
  62endchoice
  63
  64config DISCONTIGMEM
  65        def_bool y
  66        depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
  67
  68config SPARSEMEM
  69        def_bool y
  70        depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
  71
  72config FLATMEM
  73        def_bool y
  74        depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
  75
  76config FLAT_NODE_MEM_MAP
  77        def_bool y
  78        depends on !SPARSEMEM
  79
  80#
  81# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
  82# to represent different areas of memory.  This variable allows
  83# those dependencies to exist individually.
  84#
  85config NEED_MULTIPLE_NODES
  86        def_bool y
  87        depends on DISCONTIGMEM || NUMA
  88
  89config HAVE_MEMORY_PRESENT
  90        def_bool y
  91        depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
  92
  93#
  94# SPARSEMEM_EXTREME (which is the default) does some bootmem
  95# allocations when memory_present() is called.  If this cannot
  96# be done on your architecture, select this option.  However,
  97# statically allocating the mem_section[] array can potentially
  98# consume vast quantities of .bss, so be careful.
  99#
 100# This option will also potentially produce smaller runtime code
 101# with gcc 3.4 and later.
 102#
 103config SPARSEMEM_STATIC
 104        bool
 105
 106#
 107# Architecture platforms which require a two level mem_section in SPARSEMEM
 108# must select this option. This is usually for architecture platforms with
 109# an extremely sparse physical address space.
 110#
 111config SPARSEMEM_EXTREME
 112        def_bool y
 113        depends on SPARSEMEM && !SPARSEMEM_STATIC
 114
 115config SPARSEMEM_VMEMMAP_ENABLE
 116        bool
 117
 118config SPARSEMEM_VMEMMAP
 119        bool "Sparse Memory virtual memmap"
 120        depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
 121        default y
 122        help
 123         SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
 124         pfn_to_page and page_to_pfn operations.  This is the most
 125         efficient option when sufficient kernel resources are available.
 126
 127config HAVE_MEMBLOCK_PHYS_MAP
 128        bool
 129
 130config HAVE_FAST_GUP
 131        depends on MMU
 132        bool
 133
 134config ARCH_DISCARD_MEMBLOCK
 135        bool
 136
 137# Keep arch NUMA mapping infrastructure post-init.
 138config NUMA_KEEP_MEMINFO
 139        bool
 140
 141config MEMORY_ISOLATION
 142        bool
 143
 144#
 145# Only be set on architectures that have completely implemented memory hotplug
 146# feature. If you are not sure, don't touch it.
 147#
 148config HAVE_BOOTMEM_INFO_NODE
 149        def_bool n
 150
 151# eventually, we can have this option just 'select SPARSEMEM'
 152config MEMORY_HOTPLUG
 153        bool "Allow for memory hot-add"
 154        select MEMORY_ISOLATION
 155        depends on SPARSEMEM || X86_64_ACPI_NUMA
 156        depends on ARCH_ENABLE_MEMORY_HOTPLUG
 157        select NUMA_KEEP_MEMINFO if NUMA
 158
 159config MEMORY_HOTPLUG_SPARSE
 160        def_bool y
 161        depends on SPARSEMEM && MEMORY_HOTPLUG
 162
 163config MEMORY_HOTPLUG_DEFAULT_ONLINE
 164        bool "Online the newly added memory blocks by default"
 165        default n
 166        depends on MEMORY_HOTPLUG
 167        help
 168          This option sets the default policy setting for memory hotplug
 169          onlining policy (/sys/devices/system/memory/auto_online_blocks) which
 170          determines what happens to newly added memory regions. Policy setting
 171          can always be changed at runtime.
 172          See Documentation/memory-hotplug.txt for more information.
 173
 174          Say Y here if you want all hot-plugged memory blocks to appear in
 175          'online' state by default.
 176          Say N here if you want the default policy to keep all hot-plugged
 177          memory blocks in 'offline' state.
 178
 179config MEMORY_HOTREMOVE
 180        bool "Allow for memory hot remove"
 181        select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
 182        depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
 183        depends on MIGRATION
 184
 185# Heavily threaded applications may benefit from splitting the mm-wide
 186# page_table_lock, so that faults on different parts of the user address
 187# space can be handled with less contention: split it at this NR_CPUS.
 188# Default to 4 for wider testing, though 8 might be more appropriate.
 189# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
 190# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
 191# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
 192#
 193config SPLIT_PTLOCK_CPUS
 194        int
 195        default "999999" if !MMU
 196        default "999999" if ARM && !CPU_CACHE_VIPT
 197        default "999999" if PARISC && !PA20
 198        default "4"
 199
 200config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 201        bool
 202
 203#
 204# support for memory balloon
 205config MEMORY_BALLOON
 206        bool
 207
 208#
 209# support for memory balloon compaction
 210config BALLOON_COMPACTION
 211        bool "Allow for balloon memory compaction/migration"
 212        def_bool y
 213        depends on COMPACTION && MEMORY_BALLOON
 214        help
 215          Memory fragmentation introduced by ballooning might reduce
 216          significantly the number of 2MB contiguous memory blocks that can be
 217          used within a guest, thus imposing performance penalties associated
 218          with the reduced number of transparent huge pages that could be used
 219          by the guest workload. Allowing the compaction & migration for memory
 220          pages enlisted as being part of memory balloon devices avoids the
 221          scenario aforementioned and helps improving memory defragmentation.
 222
 223#
 224# support for memory compaction
 225config COMPACTION
 226        bool "Allow for memory compaction"
 227        def_bool y
 228        select MIGRATION
 229        depends on MMU
 230        help
 231          Compaction is the only memory management component to form
 232          high order (larger physically contiguous) memory blocks
 233          reliably. The page allocator relies on compaction heavily and
 234          the lack of the feature can lead to unexpected OOM killer
 235          invocations for high order memory requests. You shouldn't
 236          disable this option unless there really is a strong reason for
 237          it and then we would be really interested to hear about that at
 238          linux-mm@kvack.org.
 239
 240#
 241# support for free page reporting
 242config PAGE_REPORTING
 243        bool "Free page reporting"
 244        def_bool n
 245        help
 246          Free page reporting allows for the incremental acquisition of
 247          free pages from the buddy allocator for the purpose of reporting
 248          those pages to another entity, such as a hypervisor, so that the
 249          memory can be freed within the host for other uses.
 250
 251#
 252# support for page migration
 253#
 254config MIGRATION
 255        bool "Page migration"
 256        def_bool y
 257        depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
 258        help
 259          Allows the migration of the physical location of pages of processes
 260          while the virtual addresses are not changed. This is useful in
 261          two situations. The first is on NUMA systems to put pages nearer
 262          to the processors accessing. The second is when allocating huge
 263          pages as migration can relocate pages to satisfy a huge page
 264          allocation instead of reclaiming.
 265
 266config ARCH_ENABLE_HUGEPAGE_MIGRATION
 267        bool
 268
 269config ARCH_ENABLE_THP_MIGRATION
 270        bool
 271
 272config CONTIG_ALLOC
 273       def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
 274
 275config PHYS_ADDR_T_64BIT
 276        def_bool 64BIT
 277
 278config BOUNCE
 279        bool "Enable bounce buffers"
 280        default y
 281        depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
 282        help
 283          Enable bounce buffers for devices that cannot access
 284          the full range of memory available to the CPU. Enabled
 285          by default when ZONE_DMA or HIGHMEM is selected, but you
 286          may say n to override this.
 287
 288config NR_QUICK
 289        int
 290        depends on QUICKLIST
 291        default "1"
 292
 293config VIRT_TO_BUS
 294        bool
 295        help
 296          An architecture should select this if it implements the
 297          deprecated interface virt_to_bus().  All new architectures
 298          should probably not select this.
 299
 300
 301config MMU_NOTIFIER
 302        bool
 303        select SRCU
 304        select INTERVAL_TREE
 305
 306config KSM
 307        bool "Enable KSM for page merging"
 308        depends on MMU
 309        help
 310          Enable Kernel Samepage Merging: KSM periodically scans those areas
 311          of an application's address space that an app has advised may be
 312          mergeable.  When it finds pages of identical content, it replaces
 313          the many instances by a single page with that content, so
 314          saving memory until one or another app needs to modify the content.
 315          Recommended for use with KVM, or with other duplicative applications.
 316          See Documentation/vm/ksm.rst for more information: KSM is inactive
 317          until a program has madvised that an area is MADV_MERGEABLE, and
 318          root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
 319
 320config DEFAULT_MMAP_MIN_ADDR
 321        int "Low address space to protect from user allocation"
 322        depends on MMU
 323        default 4096
 324        help
 325          This is the portion of low virtual memory which should be protected
 326          from userspace allocation.  Keeping a user from writing to low pages
 327          can help reduce the impact of kernel NULL pointer bugs.
 328
 329          For most ia64, ppc64 and x86 users with lots of address space
 330          a value of 65536 is reasonable and should cause no problems.
 331          On arm and other archs it should not be higher than 32768.
 332          Programs which use vm86 functionality or have some need to map
 333          this low address space will need CAP_SYS_RAWIO or disable this
 334          protection by setting the value to 0.
 335
 336          This value can be changed after boot using the
 337          /proc/sys/vm/mmap_min_addr tunable.
 338
 339config ARCH_SUPPORTS_MEMORY_FAILURE
 340        bool
 341
 342config MEMORY_FAILURE
 343        depends on MMU
 344        depends on ARCH_SUPPORTS_MEMORY_FAILURE
 345        bool "Enable recovery from hardware memory errors"
 346        select MEMORY_ISOLATION
 347        select RAS
 348        help
 349          Enables code to recover from some memory failures on systems
 350          with MCA recovery. This allows a system to continue running
 351          even when some of its memory has uncorrected errors. This requires
 352          special hardware support and typically ECC memory.
 353
 354config HWPOISON_INJECT
 355        tristate "HWPoison pages injector"
 356        depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
 357        select PROC_PAGE_MONITOR
 358
 359config NOMMU_INITIAL_TRIM_EXCESS
 360        int "Turn on mmap() excess space trimming before booting"
 361        depends on !MMU
 362        default 1
 363        help
 364          The NOMMU mmap() frequently needs to allocate large contiguous chunks
 365          of memory on which to store mappings, but it can only ask the system
 366          allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
 367          more than it requires.  To deal with this, mmap() is able to trim off
 368          the excess and return it to the allocator.
 369
 370          If trimming is enabled, the excess is trimmed off and returned to the
 371          system allocator, which can cause extra fragmentation, particularly
 372          if there are a lot of transient processes.
 373
 374          If trimming is disabled, the excess is kept, but not used, which for
 375          long-term mappings means that the space is wasted.
 376
 377          Trimming can be dynamically controlled through a sysctl option
 378          (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
 379          excess pages there must be before trimming should occur, or zero if
 380          no trimming is to occur.
 381
 382          This option specifies the initial value of this option.  The default
 383          of 1 says that all excess pages should be trimmed.
 384
 385          See Documentation/nommu-mmap.txt for more information.
 386
 387config TRANSPARENT_HUGEPAGE
 388        bool "Transparent Hugepage Support"
 389        depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
 390        select COMPACTION
 391        select XARRAY_MULTI
 392        help
 393          Transparent Hugepages allows the kernel to use huge pages and
 394          huge tlb transparently to the applications whenever possible.
 395          This feature can improve computing performance to certain
 396          applications by speeding up page faults during memory
 397          allocation, by reducing the number of tlb misses and by speeding
 398          up the pagetable walking.
 399
 400          If memory constrained on embedded, you may want to say N.
 401
 402choice
 403        prompt "Transparent Hugepage Support sysfs defaults"
 404        depends on TRANSPARENT_HUGEPAGE
 405        default TRANSPARENT_HUGEPAGE_ALWAYS
 406        help
 407          Selects the sysfs defaults for Transparent Hugepage Support.
 408
 409        config TRANSPARENT_HUGEPAGE_ALWAYS
 410                bool "always"
 411        help
 412          Enabling Transparent Hugepage always, can increase the
 413          memory footprint of applications without a guaranteed
 414          benefit but it will work automatically for all applications.
 415
 416        config TRANSPARENT_HUGEPAGE_MADVISE
 417                bool "madvise"
 418        help
 419          Enabling Transparent Hugepage madvise, will only provide a
 420          performance improvement benefit to the applications using
 421          madvise(MADV_HUGEPAGE) but it won't risk to increase the
 422          memory footprint of applications without a guaranteed
 423          benefit.
 424endchoice
 425
 426config ARCH_WANTS_THP_SWAP
 427       def_bool n
 428
 429config THP_SWAP
 430        def_bool y
 431        depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP
 432        help
 433          Swap transparent huge pages in one piece, without splitting.
 434          XXX: For now, swap cluster backing transparent huge page
 435          will be split after swapout.
 436
 437          For selection by architectures with reasonable THP sizes.
 438
 439#
 440# UP and nommu archs use km based percpu allocator
 441#
 442config NEED_PER_CPU_KM
 443        depends on !SMP
 444        bool
 445        default y
 446
 447config CLEANCACHE
 448        bool "Enable cleancache driver to cache clean pages if tmem is present"
 449        default n
 450        help
 451          Cleancache can be thought of as a page-granularity victim cache
 452          for clean pages that the kernel's pageframe replacement algorithm
 453          (PFRA) would like to keep around, but can't since there isn't enough
 454          memory.  So when the PFRA "evicts" a page, it first attempts to use
 455          cleancache code to put the data contained in that page into
 456          "transcendent memory", memory that is not directly accessible or
 457          addressable by the kernel and is of unknown and possibly
 458          time-varying size.  And when a cleancache-enabled
 459          filesystem wishes to access a page in a file on disk, it first
 460          checks cleancache to see if it already contains it; if it does,
 461          the page is copied into the kernel and a disk access is avoided.
 462          When a transcendent memory driver is available (such as zcache or
 463          Xen transcendent memory), a significant I/O reduction
 464          may be achieved.  When none is available, all cleancache calls
 465          are reduced to a single pointer-compare-against-NULL resulting
 466          in a negligible performance hit.
 467
 468          If unsure, say Y to enable cleancache
 469
 470config FRONTSWAP
 471        bool "Enable frontswap to cache swap pages if tmem is present"
 472        depends on SWAP
 473        default n
 474        help
 475          Frontswap is so named because it can be thought of as the opposite
 476          of a "backing" store for a swap device.  The data is stored into
 477          "transcendent memory", memory that is not directly accessible or
 478          addressable by the kernel and is of unknown and possibly
 479          time-varying size.  When space in transcendent memory is available,
 480          a significant swap I/O reduction may be achieved.  When none is
 481          available, all frontswap calls are reduced to a single pointer-
 482          compare-against-NULL resulting in a negligible performance hit
 483          and swap data is stored as normal on the matching swap device.
 484
 485          If unsure, say Y to enable frontswap.
 486
 487config CMA
 488        bool "Contiguous Memory Allocator"
 489        depends on MMU
 490        select MIGRATION
 491        select MEMORY_ISOLATION
 492        help
 493          This enables the Contiguous Memory Allocator which allows other
 494          subsystems to allocate big physically-contiguous blocks of memory.
 495          CMA reserves a region of memory and allows only movable pages to
 496          be allocated from it. This way, the kernel can use the memory for
 497          pagecache and when a subsystem requests for contiguous area, the
 498          allocated pages are migrated away to serve the contiguous request.
 499
 500          If unsure, say "n".
 501
 502config CMA_DEBUG
 503        bool "CMA debug messages (DEVELOPMENT)"
 504        depends on DEBUG_KERNEL && CMA
 505        help
 506          Turns on debug messages in CMA.  This produces KERN_DEBUG
 507          messages for every CMA call as well as various messages while
 508          processing calls such as dma_alloc_from_contiguous().
 509          This option does not affect warning and error messages.
 510
 511config CMA_DEBUGFS
 512        bool "CMA debugfs interface"
 513        depends on CMA && DEBUG_FS
 514        help
 515          Turns on the DebugFS interface for CMA.
 516
 517config CMA_AREAS
 518        int "Maximum count of the CMA areas"
 519        depends on CMA
 520        default 7
 521        help
 522          CMA allows to create CMA areas for particular purpose, mainly,
 523          used as device private area. This parameter sets the maximum
 524          number of CMA area in the system.
 525
 526          If unsure, leave the default value "7".
 527
 528config MEM_SOFT_DIRTY
 529        bool "Track memory changes"
 530        depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
 531        select PROC_PAGE_MONITOR
 532        help
 533          This option enables memory changes tracking by introducing a
 534          soft-dirty bit on pte-s. This bit it set when someone writes
 535          into a page just as regular dirty bit, but unlike the latter
 536          it can be cleared by hands.
 537
 538          See Documentation/admin-guide/mm/soft-dirty.rst for more details.
 539
 540config ZSWAP
 541        bool "Compressed cache for swap pages (EXPERIMENTAL)"
 542        depends on FRONTSWAP && CRYPTO=y
 543        select CRYPTO_LZO
 544        select ZPOOL
 545        default n
 546        help
 547          A lightweight compressed cache for swap pages.  It takes
 548          pages that are in the process of being swapped out and attempts to
 549          compress them into a dynamically allocated RAM-based memory pool.
 550          This can result in a significant I/O reduction on swap device and,
 551          in the case where decompressing from RAM is faster that swap device
 552          reads, can also improve workload performance.
 553
 554          This is marked experimental because it is a new feature (as of
 555          v3.11) that interacts heavily with memory reclaim.  While these
 556          interactions don't cause any known issues on simple memory setups,
 557          they have not be fully explored on the large set of potential
 558          configurations and workloads that exist.
 559
 560config ZPOOL
 561        tristate "Common API for compressed memory storage"
 562        default n
 563        help
 564          Compressed memory storage API.  This allows using either zbud or
 565          zsmalloc.
 566
 567config ZBUD
 568        tristate "Low (Up to 2x) density storage for compressed pages"
 569        default n
 570        help
 571          A special purpose allocator for storing compressed pages.
 572          It is designed to store up to two compressed pages per physical
 573          page.  While this design limits storage density, it has simple and
 574          deterministic reclaim properties that make it preferable to a higher
 575          density approach when reclaim will be used.
 576
 577config Z3FOLD
 578        tristate "Up to 3x density storage for compressed pages"
 579        depends on ZPOOL
 580        default n
 581        help
 582          A special purpose allocator for storing compressed pages.
 583          It is designed to store up to three compressed pages per physical
 584          page. It is a ZBUD derivative so the simplicity and determinism are
 585          still there.
 586
 587config ZSMALLOC
 588        tristate "Memory allocator for compressed pages"
 589        depends on MMU
 590        default n
 591        help
 592          zsmalloc is a slab-based memory allocator designed to store
 593          compressed RAM pages.  zsmalloc uses virtual memory mapping
 594          in order to reduce fragmentation.  However, this results in a
 595          non-standard allocator interface where a handle, not a pointer, is
 596          returned by an alloc().  This handle must be mapped in order to
 597          access the allocated space.
 598
 599config PGTABLE_MAPPING
 600        bool "Use page table mapping to access object in zsmalloc"
 601        depends on ZSMALLOC=y
 602        help
 603          By default, zsmalloc uses a copy-based object mapping method to
 604          access allocations that span two pages. However, if a particular
 605          architecture (ex, ARM) performs VM mapping faster than copying,
 606          then you should select this. This causes zsmalloc to use page table
 607          mapping rather than copying for object mapping.
 608
 609          You can check speed with zsmalloc benchmark:
 610          https://github.com/spartacus06/zsmapbench
 611
 612config ZSMALLOC_STAT
 613        bool "Export zsmalloc statistics"
 614        depends on ZSMALLOC
 615        select DEBUG_FS
 616        help
 617          This option enables code in the zsmalloc to collect various
 618          statistics about whats happening in zsmalloc and exports that
 619          information to userspace via debugfs.
 620          If unsure, say N.
 621
 622config GENERIC_EARLY_IOREMAP
 623        bool
 624
 625config MAX_STACK_SIZE_MB
 626        int "Maximum user stack size for 32-bit processes (MB)"
 627        default 80
 628        range 8 2048
 629        depends on STACK_GROWSUP && (!64BIT || COMPAT)
 630        help
 631          This is the maximum stack size in Megabytes in the VM layout of 32-bit
 632          user processes when the stack grows upwards (currently only on parisc
 633          arch). The stack will be located at the highest memory address minus
 634          the given value, unless the RLIMIT_STACK hard limit is changed to a
 635          smaller value in which case that is used.
 636
 637          A sane initial value is 80 MB.
 638
 639config DEFERRED_STRUCT_PAGE_INIT
 640        bool "Defer initialisation of struct pages to kthreads"
 641        default n
 642        depends on SPARSEMEM
 643        depends on !NEED_PER_CPU_KM
 644        depends on 64BIT
 645        select PADATA
 646        help
 647          Ordinarily all struct pages are initialised during early boot in a
 648          single thread. On very large machines this can take a considerable
 649          amount of time. If this option is set, large machines will bring up
 650          a subset of memmap at boot and then initialise the rest in parallel.
 651          This has a potential performance impact on tasks running early in the
 652          lifetime of the system until these kthreads finish the
 653          initialisation.
 654
 655config IDLE_PAGE_TRACKING
 656        bool "Enable idle page tracking"
 657        depends on SYSFS && MMU
 658        select PAGE_EXTENSION if !64BIT
 659        help
 660          This feature allows to estimate the amount of user pages that have
 661          not been touched during a given period of time. This information can
 662          be useful to tune memory cgroup limits and/or for job placement
 663          within a compute cluster.
 664
 665          See Documentation/admin-guide/mm/idle_page_tracking.rst for
 666          more details.
 667
 668# arch_add_memory() comprehends device memory
 669config ARCH_HAS_ZONE_DEVICE
 670        bool
 671
 672config ZONE_DEVICE
 673        bool "Device memory (pmem, HMM, etc...) hotplug support"
 674        depends on MEMORY_HOTPLUG
 675        depends on MEMORY_HOTREMOVE
 676        depends on SPARSEMEM_VMEMMAP
 677        depends on ARCH_HAS_ZONE_DEVICE
 678        select XARRAY_MULTI
 679
 680        help
 681          Device memory hotplug support allows for establishing pmem,
 682          or other device driver discovered memory regions, in the
 683          memmap. This allows pfn_to_page() lookups of otherwise
 684          "device-physical" addresses which is needed for using a DAX
 685          mapping in an O_DIRECT operation, among other things.
 686
 687          If FS_DAX is enabled, then say Y.
 688
 689config DEV_PAGEMAP_OPS
 690        bool
 691
 692config HMM_MIRROR
 693        bool "HMM mirror CPU page table into a device page table"
 694        depends on (X86_64 || PPC64)
 695        depends on MMU && 64BIT
 696        select MMU_NOTIFIER
 697        select MIGRATE_VMA_HELPER
 698        help
 699          Select HMM_MIRROR if you want to mirror range of the CPU page table of a
 700          process into a device page table. Here, mirror means "keep synchronized".
 701          Prerequisites: the device must provide the ability to write-protect its
 702          page tables (at PAGE_SIZE granularity), and must be able to recover from
 703          the resulting potential page faults.
 704
 705config DEVICE_PRIVATE
 706        bool "Unaddressable device memory (GPU memory, ...)"
 707        depends on ARCH_HAS_HMM
 708        select HMM
 709        select DEV_PAGEMAP_OPS
 710
 711        help
 712          Allows creation of struct pages to represent unaddressable device
 713          memory; i.e., memory that is only accessible from the device (or
 714          group of devices). You likely also want to select HMM_MIRROR.
 715
 716config VMAP_PFN
 717        bool
 718
 719config FRAME_VECTOR
 720        bool
 721
 722config ARCH_USES_HIGH_VMA_FLAGS
 723        bool
 724config ARCH_HAS_PKEYS
 725        bool
 726
 727config PERCPU_STATS
 728        bool "Collect percpu memory statistics"
 729        default n
 730        help
 731          This feature collects and exposes statistics via debugfs. The
 732          information includes global and per chunk statistics, which can
 733          be used to help understand percpu memory usage.
 734
 735config GUP_BENCHMARK
 736        bool "Enable infrastructure for get_user_pages_fast() benchmarking"
 737        default n
 738        help
 739          Provides /sys/kernel/debug/gup_benchmark that helps with testing
 740          performance of get_user_pages_fast().
 741
 742          See tools/testing/selftests/vm/gup_benchmark.c
 743
 744config GUP_GET_PTE_LOW_HIGH
 745        bool
 746
 747config READ_ONLY_THP_FOR_FS
 748        bool "Read-only THP for filesystems (EXPERIMENTAL)"
 749        depends on TRANSPARENT_HUGEPAGE && SHMEM
 750
 751        help
 752          Allow khugepaged to put read-only file-backed pages in THP.
 753
 754          This is marked experimental because it is a new feature. Write
 755          support of file THPs will be developed in the next few release
 756          cycles.
 757
 758config ARCH_HAS_PTE_SPECIAL
 759        bool
 760
 761config MAPPING_DIRTY_HELPERS
 762        bool
 763
 764#
 765# Some architectures require a special hugepage directory format that is
 766# required to support multiple hugepage sizes. For example a4fe3ce76
 767# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
 768# introduced it on powerpc.  This allows for a more flexible hugepage
 769# pagetable layouts.
 770#
 771config ARCH_HAS_HUGEPD
 772        bool
 773