diff options
355 files changed, 12721 insertions, 8831 deletions
@@ -1895,6 +1895,7 @@ S: Czech Republic N: Seth Jennings E: sjenning@redhat.com D: Creation and maintenance of zswap +D: Creation and maintenace of the zbud allocator N: Jeremy Kerr D: Maintainer of SPU File System @@ -3803,6 +3804,7 @@ N: Dan Streetman E: ddstreet@ieee.org D: Maintenance and development of zswap D: Creation and maintenance of the zpool API +D: Maintenace of the zbud allocator N: Drew Sullivan E: drew@ss.org @@ -4330,6 +4332,7 @@ S: England N: Vitaly Wool E: vitaly.wool@konsulko.com D: Maintenance and development of zswap +D: Maintenance and development of z3fold N: Chris Wright E: chrisw@sous-sol.org diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 1ef69e0271f9..36c57de0a10a 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -22,14 +22,6 @@ Description: device. The reset operation frees all the memory associated with this device. -What: /sys/block/zram<id>/max_comp_streams -Date: February 2014 -Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> -Description: - The max_comp_streams file is read-write and specifies the - number of backend's zcomp_strm compression streams (number of - concurrent compress operations). - What: /sys/block/zram<id>/comp_algorithm Date: February 2014 Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma index dfd755201142..aaf2a5d8b13b 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-cma +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma @@ -29,3 +29,16 @@ Date: Feb 2024 Contact: Anshuman Khandual <anshuman.khandual@arm.com> Description: the number of pages CMA API succeeded to release + +What: /sys/kernel/mm/cma/<cma-heap-name>/total_pages +Date: Jun 2024 +Contact: Frank van der Linden <fvdl@google.com> +Description: + The size of the CMA area in pages. + +What: /sys/kernel/mm/cma/<cma-heap-name>/available_pages +Date: Jun 2024 +Contact: Frank van der Linden <fvdl@google.com> +Description: + The number of pages in the CMA area that are still + available for CMA allocation. diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index b057eddefbfc..293197f180ad 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -91,6 +91,36 @@ Description: Writing a value to this file sets the update interval of the DAMON context in microseconds as the value. Reading this file returns the value. +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/access_bp +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Writing a value to this file sets the monitoring intervals + auto-tuning target DAMON-observed access events ratio within + the given time interval (aggrs in same directory), in bp + (1/10,000). Reading this file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/aggrs +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Writing a value to this file sets the time interval to achieve + the monitoring intervals auto-tuning target DAMON-observed + access events ratio (access_bp in same directory) within. + Reading this file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/min_sample_us +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Writing a value to this file sets the minimum value of + auto-tuned sampling interval in microseconds. Reading this + file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/max_sample_us +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Writing a value to this file sets the maximum value of + auto-tuned sampling interval in microseconds. Reading this + file returns the value. + What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/nr_regions/min WDate: Mar 2022 @@ -345,6 +375,20 @@ Description: If 'addr' is written to the 'type' file, writing to or reading from this file sets or gets the end address of the address range for the filter. +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/min +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: If 'hugepage_size' is written to the 'type' file, writing to + or reading from this file sets or gets the minimum size of the + hugepage for the filter. + +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/max +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: If 'hugepage_size' is written to the 'type' file, writing to + or reading from this file sets or gets the maximum size of the + hugepage for the filter. + What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/target_idx Date: Dec 2022 Contact: SeongJae Park <sj@kernel.org> @@ -365,6 +409,22 @@ Description: Writing 'Y' or 'N' to this file sets whether to allow or reject applying the scheme's action to the memory that satisfies the 'type' and the 'matching' of the directory. +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/core_filters +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Directory for DAMON core layer-handled DAMOS filters. Files + under this directory works same to those of + /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters + directory. + +What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/ops_filters +Date: Feb 2025 +Contact: SeongJae Park <sj@kernel.org> +Description: Directory for DAMON operations set layer-handled DAMOS filters. + Files under this directory works same to those of + /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters + directory. + What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/nr_tried Date: Mar 2022 Contact: SeongJae Park <sj@kernel.org> diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index 1ef5784c1b84..53faeed7c190 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -971,6 +971,16 @@ unfortunately any spinlock in a ``SLAB_TYPESAFE_BY_RCU`` object must be initialized after each and every call to kmem_cache_alloc(), which renders reference-free spinlock acquisition completely unsafe. Therefore, when using ``SLAB_TYPESAFE_BY_RCU``, make proper use of a reference counter. +If using refcount_t, the specialized refcount_{add|inc}_not_zero_acquire() +and refcount_set_release() APIs should be used to ensure correct operation +ordering when verifying object identity and when initializing newly +allocated objects. Acquire fence in refcount_{add|inc}_not_zero_acquire() +ensures that identity checks happen *after* reference count is taken. +refcount_set_release() should be called after a newly allocated object is +fully initialized and release fence ensures that new values are visible +*before* refcount can be successfully taken by other users. Once +refcount_set_release() is called, the object should be considered visible +by other tasks. (Those willing to initialize their locks in a kmem_cache constructor may also use locking, including cache-friendly sequence locking.) diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst index 1576fb93f06c..9bdb30901a93 100644 --- a/Documentation/admin-guide/blockdev/zram.rst +++ b/Documentation/admin-guide/blockdev/zram.rst @@ -54,7 +54,7 @@ The list of possible return codes: If you use 'echo', the returned value is set by the 'echo' utility, and, in general case, something like:: - echo 3 > /sys/block/zram0/max_comp_streams + echo foo > /sys/block/zram0/comp_algorithm if [ $? -ne 0 ]; then handle_error fi @@ -73,21 +73,7 @@ This creates 4 devices: /dev/zram{0,1,2,3} num_devices parameter is optional and tells zram how many devices should be pre-created. Default: 1. -2) Set max number of compression streams -======================================== - -Regardless of the value passed to this attribute, ZRAM will always -allocate multiple compression streams - one per online CPU - thus -allowing several concurrent compression operations. The number of -allocated compression streams goes down when some of the CPUs -become offline. There is no single-compression-stream mode anymore, -unless you are running a UP system or have only 1 CPU online. - -To find out how many streams are currently available:: - - cat /sys/block/zram0/max_comp_streams - -3) Select compression algorithm +2) Select compression algorithm =============================== Using comp_algorithm device attribute one can see available and @@ -107,7 +93,7 @@ Examples:: For the time being, the `comp_algorithm` content shows only compression algorithms that are supported by zram. -4) Set compression algorithm parameters: Optional +3) Set compression algorithm parameters: Optional ================================================= Compression algorithms may support specific parameters which can be @@ -138,7 +124,7 @@ better the compression ratio, it even can take negatives values for some algorithms), for other algorithms `level` is acceleration level (the higher the value the lower the compression ratio). -5) Set Disksize +4) Set Disksize =============== Set disk size by writing the value to sysfs node 'disksize'. @@ -158,7 +144,7 @@ There is little point creating a zram of greater than twice the size of memory since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. -6) Set memory limit: Optional +5) Set memory limit: Optional ============================= Set memory limit by writing the value to sysfs node 'mem_limit'. @@ -177,7 +163,7 @@ Examples:: # To disable memory limit echo 0 > /sys/block/zram0/mem_limit -7) Activate +6) Activate =========== :: @@ -188,7 +174,7 @@ Examples:: mkfs.ext4 /dev/zram1 mount /dev/zram1 /tmp -8) Add/remove zram devices +7) Add/remove zram devices ========================== zram provides a control interface, which enables dynamic (on-demand) device @@ -208,7 +194,7 @@ execute:: echo X > /sys/class/zram-control/hot_remove -9) Stats +8) Stats ======== Per-device statistics are exported as various nodes under /sys/block/zram<id>/ @@ -228,8 +214,6 @@ mem_limit WO specifies the maximum amount of memory ZRAM can writeback_limit WO specifies the maximum amount of write IO zram can write out to backing device as 4KB unit writeback_limit_enable RW show and set writeback_limit feature -max_comp_streams RW the number of possible concurrent compress - operations comp_algorithm RW show and change the compression algorithm algorithm_params WO setup compression algorithm parameters compact WO trigger memory compaction @@ -310,7 +294,7 @@ a single line of text and contains the following stats separated by whitespace: Unit: 4K bytes ============== ============================================================= -10) Deactivate +9) Deactivate ============== :: @@ -318,7 +302,7 @@ a single line of text and contains the following stats separated by whitespace: swapoff /dev/zram0 umount /dev/zram1 -11) Reset +10) Reset ========= Write any positive value to 'reset' sysfs node:: diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 02b8206a3594..d6b1db8cc7eb 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -610,6 +610,10 @@ memory.stat file includes following statistics: 'rss + mapped_file" will give you resident set size of cgroup. + Note that some kernel configurations might account complete larger + allocations (e.g., THP) towards 'rss' and 'mapped_file', even if + only some, but not all that memory is mapped. + (Note: file and shmem may be shared among other cgroups. In that case, mapped_file is accounted only when the memory cgroup is owner of page cache.) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f293a13b42ed..1a16ce68a4d7 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1445,7 +1445,10 @@ The following nested keys are defined. anon Amount of memory used in anonymous mappings such as - brk(), sbrk(), and mmap(MAP_ANONYMOUS) + brk(), sbrk(), and mmap(MAP_ANONYMOUS). Note that + some kernel configurations might account complete larger + allocations (e.g., THP) if only some, but not all the + memory of such an allocation is mapped anymore. file Amount of memory used to cache filesystem data, @@ -1488,7 +1491,10 @@ The following nested keys are defined. Amount of application memory swapped out to zswap. file_mapped - Amount of cached filesystem data mapped with mmap() + Amount of cached filesystem data mapped with mmap(). Note + that some kernel configurations might account complete + larger allocations (e.g., THP) if only some, but not + not all the memory of such an allocation is mapped. file_dirty Amount of cached filesystem data that was modified but @@ -1560,6 +1566,12 @@ The following nested keys are defined. workingset_nodereclaim Number of times a shadow node has been reclaimed + pswpin (npn) + Number of pages swapped into memory + + pswpout (npn) + Number of pages swapped out of memory + pgscan (npn) Amount of scanned pages (in an inactive LRU list) @@ -1575,6 +1587,9 @@ The following nested keys are defined. pgscan_khugepaged (npn) Amount of scanned pages by khugepaged (in an inactive LRU list) + pgscan_proactive (npn) + Amount of scanned pages proactively (in an inactive LRU list) + pgsteal_kswapd (npn) Amount of reclaimed pages by kswapd @@ -1584,6 +1599,9 @@ The following nested keys are defined. pgsteal_khugepaged (npn) Amount of reclaimed pages by khugepaged + pgsteal_proactive (npn) + Amount of reclaimed pages proactively + pgfault (npn) Total number of page faults incurred @@ -1661,6 +1679,9 @@ The following nested keys are defined. pgdemote_khugepaged Number of pages demoted by khugepaged. + pgdemote_proactive + Number of pages demoted by proactively. + hugetlb Amount of memory used by hugetlb pages. This metric only shows up if hugetlb usage is accounted for in memory.current (i.e. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3435a062a208..559f4fe51824 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1866,7 +1866,7 @@ hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET registers. Default set by CONFIG_HPET_MMAP_DEFAULT. - hugepages= [HW] Number of HugeTLB pages to allocate at boot. + hugepages= [HW,EARLY] Number of HugeTLB pages to allocate at boot. If this follows hugepagesz (below), it specifies the number of pages of hugepagesz to be allocated. If this is the first HugeTLB parameter on the command @@ -1878,15 +1878,24 @@ <node>:<integer>[,<node>:<integer>] hugepagesz= - [HW] The size of the HugeTLB pages. This is used in - conjunction with hugepages (above) to allocate huge - pages of a specific size at boot. The pair - hugepagesz=X hugepages=Y can be specified once for - each supported huge page size. Huge page sizes are - architecture dependent. See also + [HW,EARLY] The size of the HugeTLB pages. This is + used in conjunction with hugepages (above) to + allocate huge pages of a specific size at boot. The + pair hugepagesz=X hugepages=Y can be specified once + for each supported huge page size. Huge page sizes + are architecture dependent. See also Documentation/admin-guide/mm/hugetlbpage.rst. Format: size[KMG] + hugepage_alloc_threads= + [HW] The number of threads that should be used to + allocate hugepages during boot. This option can be + used to improve system bootup time when allocating + a large amount of huge pages. + The default value is 25% of the available hardware threads. + + Note that this parameter only applies to non-gigantic huge pages. + hugetlb_cma= [HW,CMA,EARLY] The size of a CMA area used for allocation of gigantic hugepages. Or using node format, the size of a CMA area per node can be specified. @@ -1897,6 +1906,13 @@ hugepages using the CMA allocator. If enabled, the boot-time allocation of gigantic hugepages is skipped. + hugetlb_cma_only= + [HW,CMA,EARLY] When allocating new HugeTLB pages, only + try to allocate from the CMA areas. + + This option does nothing if hugetlb_cma= is not also + specified. + hugetlb_free_vmemmap= [KNL] Requires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP enabled. diff --git a/Documentation/admin-guide/mm/cma_debugfs.rst b/Documentation/admin-guide/mm/cma_debugfs.rst index 7367e6294ef6..4120e9cb0cd5 100644 --- a/Documentation/admin-guide/mm/cma_debugfs.rst +++ b/Documentation/admin-guide/mm/cma_debugfs.rst @@ -12,10 +12,16 @@ its CMA name like below: The structure of the files created under that directory is as follows: - - [RO] base_pfn: The base PFN (Page Frame Number) of the zone. + - [RO] base_pfn: The base PFN (Page Frame Number) of the CMA area. + This is the same as ranges/0/base_pfn. - [RO] count: Amount of memory in the CMA area. - [RO] order_per_bit: Order of pages represented by one bit. - - [RO] bitmap: The bitmap of page states in the zone. + - [RO] bitmap: The bitmap of allocated pages in the area. + This is the same as ranges/0/base_pfn. + - [RO] ranges/N/base_pfn: The base PFN of contiguous range N + in the CMA area. + - [RO] ranges/N/bitmap: The bit map of allocated pages in + range N in the CMA area. - [WO] alloc: Allocate N pages from that CMA area. For example:: echo 5 > <debugfs>/cma/<cma_name>/alloc diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 47a44bd348ab..ced2013db3df 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -64,6 +64,7 @@ comma (","). │ │ │ │ :ref:`0 <sysfs_context>`/avail_operations,operations │ │ │ │ │ :ref:`monitoring_attrs <sysfs_monitoring_attrs>`/ │ │ │ │ │ │ intervals/sample_us,aggr_us,update_us + │ │ │ │ │ │ │ intervals_goal/access_bp,aggrs,min_sample_us,max_sample_us │ │ │ │ │ │ nr_regions/min,max │ │ │ │ │ :ref:`targets <sysfs_targets>`/nr_targets │ │ │ │ │ │ :ref:`0 <sysfs_target>`/pid_target @@ -82,8 +83,8 @@ comma (","). │ │ │ │ │ │ │ │ :ref:`goals <sysfs_schemes_quota_goals>`/nr_goals │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value │ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low - │ │ │ │ │ │ │ :ref:`filters <sysfs_filters>`/nr_filters - │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx + │ │ │ │ │ │ │ :ref:`{core_,ops_,}filters <sysfs_filters>`/nr_filters + │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max │ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions <sysfs_schemes_tried_regions>`/total_bytes │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed @@ -132,6 +133,11 @@ Users can write below commands for the kdamond to the ``state`` file. - ``off``: Stop running. - ``commit``: Read the user inputs in the sysfs files except ``state`` file again. +- ``update_tuned_intervals``: Update the contents of ``sample_us`` and + ``aggr_us`` files of the kdamond with the auto-tuning applied ``sampling + interval`` and ``aggregation interval`` for the files. Please refer to + :ref:`intervals_goal section <damon_usage_sysfs_monitoring_intervals_goal>` + for more details. - ``commit_schemes_quota_goals``: Read the DAMON-based operation schemes' :ref:`quota goals <sysfs_schemes_quota_goals>`. - ``update_schemes_stats``: Update the contents of stats files for each @@ -213,6 +219,25 @@ writing to and rading from the files. For more details about the intervals and monitoring regions range, please refer to the Design document (:doc:`/mm/damon/design`). +.. _damon_usage_sysfs_monitoring_intervals_goal: + +contexts/<N>/monitoring_attrs/intervals/intervals_goal/ +------------------------------------------------------- + +Under the ``intervals`` directory, one directory for automated tuning of +``sample_us`` and ``aggr_us``, namely ``intervals_goal`` directory also exists. +Under the directory, four files for the auto-tuning control, namely +``access_bp``, ``aggrs``, ``min_sample_us`` and ``max_sample_us`` exist. +Please refer to the :ref:`design document of the feature +<damon_design_monitoring_intervals_autotuning>` for the internal of the tuning +mechanism. Reading and writing the four files under ``intervals_goal`` +directory shows and updates the tuning parameters that described in the +:ref:design doc <damon_design_monitoring_intervals_autotuning>` with the same +names. The tuning starts with the user-set ``sample_us`` and ``aggr_us``. The +tuning-applied current values of the two intervals can be read from the +``sample_us`` and ``aggr_us`` files after writing ``update_tuned_intervals`` to +the ``state`` file. + .. _sysfs_targets: contexts/<N>/targets/ @@ -282,9 +307,10 @@ to ``N-1``. Each directory represents each DAMON-based operation scheme. schemes/<N>/ ------------ -In each scheme directory, five directories (``access_pattern``, ``quotas``, -``watermarks``, ``filters``, ``stats``, and ``tried_regions``) and three files -(``action``, ``target_nid`` and ``apply_interval``) exist. +In each scheme directory, seven directories (``access_pattern``, ``quotas``, +``watermarks``, ``core_filters``, ``ops_filters``, ``filters``, ``stats``, and +``tried_regions``) and three files (``action``, ``target_nid`` and +``apply_interval``) exist. The ``action`` file is for setting and getting the scheme's :ref:`action <damon_design_damos_action>`. The keywords that can be written to and read @@ -395,33 +421,43 @@ The ``interval`` should written in microseconds unit. .. _sysfs_filters: -schemes/<N>/filters/ --------------------- +schemes/<N>/{core\_,ops\_,}filters/ +----------------------------------- -The directory for the :ref:`filters <damon_design_damos_filters>` of the given +Directories for :ref:`filters <damon_design_damos_filters>` of the given DAMON-based operation scheme. -In the beginning, this directory has only one file, ``nr_filters``. Writing a +``core_filters`` and ``ops_filters`` directories are for the filters handled by +the DAMON core layer and operations set layer, respectively. ``filters`` +directory can be used for installing filters regardless of their handled +layers. Filters that requested by ``core_filters`` and ``ops_filters`` will be +installed before those of ``filters``. All three directories have same files. + +Use of ``filters`` directory can make expecting evaluation orders of given +filters with the files under directory bit confusing. Users are hence +recommended to use ``core_filters`` and ``ops_filters`` directories. The +``filters`` directory could be deprecated in future. + +In the beginning, the directory has only one file, ``nr_filters``. Writing a number (``N``) to the file creates the number of child directories named ``0`` to ``N-1``. Each directory represents each filter. The filters are evaluated in the numeric order. -Each filter directory contains seven files, namely ``type``, ``matching``, -``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. -To ``type`` file, you can write one of five special keywords: ``anon`` for -anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young -pages, ``addr`` for specific address range (an open-ended interval), or -``target`` for specific DAMON monitoring target filtering. Meaning of the -types are same to the description on the :ref:`design doc -<damon_design_damos_filters>`. - -In case of the memory cgroup filtering, you can specify the memory cgroup of -the interest by writing the path of the memory cgroup from the cgroups mount -point to ``memcg_path`` file. In case of the address range filtering, you can -specify the start and end address of the range to ``addr_start`` and -``addr_end`` files, respectively. For the DAMON monitoring target filtering, -you can specify the index of the target between the list of the DAMON context's -monitoring targets list to ``target_idx`` file. +Each filter directory contains nine files, namely ``type``, ``matching``, +``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, ``min``, ``max`` +and ``target_idx``. To ``type`` file, you can write the type of the filter. +Refer to :ref:`the design doc <damon_design_damos_filters>` for available type +names, their meaning and on what layer those are handled. + +For ``memcg`` type, you can specify the memory cgroup of the interest by +writing the path of the memory cgroup from the cgroups mount point to +``memcg_path`` file. For ``addr`` type, you can specify the start and end +address of the range (open-ended interval) to ``addr_start`` and ``addr_end`` +files, respectively. For ``hugepage_size`` type, you can specify the minimum +and maximum size of the range (closed interval) to ``min`` and ``max`` files, +respectively. For ``target`` type, you can specify the index of the target +between the list of the DAMON context's monitoring targets list to +``target_idx`` file. You can write ``Y`` or ``N`` to ``matching`` file to specify whether the filter is for memory that matches the ``type``. You can write ``Y`` or ``N`` to @@ -431,6 +467,7 @@ the ``type`` and ``matching`` should be allowed or not. For example, below restricts a DAMOS action to be applied to only non-anonymous pages of all memory cgroups except ``/having_care_already``.:: + # cd ops_filters/0/ # echo 2 > nr_filters # # disallow anonymous pages echo anon > 0/type diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst index f34a0d798d5b..67a941903fd2 100644 --- a/Documentation/admin-guide/mm/hugetlbpage.rst +++ b/Documentation/admin-guide/mm/hugetlbpage.rst @@ -145,7 +145,17 @@ hugepages It will allocate 1 2M hugepage on node0 and 2 2M hugepages on node1. If the node number is invalid, the parameter will be ignored. +hugepage_alloc_threads + Specify the number of threads that should be used to allocate hugepages + during boot. This parameter can be used to improve system bootup time + when allocating a large amount of huge pages. + The default value is 25% of the available hardware threads. + Example to use 8 allocation threads:: + + hugepage_alloc_threads=8 + + Note that this parameter only applies to non-gigantic huge pages. default_hugepagesz Specify the default huge page size. This parameter can only be specified once on the command line. default_hugepagesz can diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst index caba0f52dd36..afce291649dd 100644 --- a/Documentation/admin-guide/mm/pagemap.rst +++ b/Documentation/admin-guide/mm/pagemap.rst @@ -21,7 +21,8 @@ There are four components to pagemap: * Bit 56 page exclusively mapped (since 4.2) * Bit 57 pte is uffd-wp write-protected (since 5.13) (see Documentation/admin-guide/mm/userfaultfd.rst) - * Bits 58-60 zero + * Bit 58 pte is a guard region (since 6.15) (see madvise (2) man page) + * Bits 59-60 zero * Bit 61 page is file-page or shared-anon (since 3.5) * Bit 62 page swapped * Bit 63 page present @@ -37,12 +38,28 @@ There are four components to pagemap: precisely which pages are mapped (or in swap) and comparing mapped pages between processes. + Traditionally, bit 56 indicates that a page is mapped exactly once and bit + 56 is clear when a page is mapped multiple times, even when mapped in the + same process multiple times. In some kernel configurations, the semantics + for pages part of a larger allocation (e.g., THP) can differ: bit 56 is set + if all pages part of the corresponding large allocation are *certainly* + mapped in the same process, even if the page is mapped multiple times in that + process. Bit 56 is clear when any page page of the larger allocation + is *maybe* mapped in a different process. In some cases, a large allocation + might be treated as "maybe mapped by multiple processes" even though this + is no longer the case. + Efficient users of this interface will use ``/proc/pid/maps`` to determine which areas of memory are actually mapped and llseek to skip over unmapped regions. * ``/proc/kpagecount``. This file contains a 64-bit count of the number of - times each page is mapped, indexed by PFN. + times each page is mapped, indexed by PFN. Some kernel configurations do + not track the precise number of times a page part of a larger allocation + (e.g., THP) is mapped. In these configurations, the average number of + mappings per page in this larger allocation is returned instead. However, + if any page of the large allocation is mapped, the returned value will + be at least 1. The page-types tool in the tools/mm directory can be used to query the number of times a page is mapped. diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst index 3598dcd7dbe7..fd3370aa43fe 100644 --- a/Documentation/admin-guide/mm/zswap.rst +++ b/Documentation/admin-guide/mm/zswap.rst @@ -60,15 +60,13 @@ accessed. The compressed memory pool grows on demand and shrinks as compressed pages are freed. The pool is not preallocated. By default, a zpool of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created, but it can be overridden at boot time by setting the ``zpool`` attribute, -e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs +e.g. ``zswap.zpool=zsmalloc``. It can also be changed at runtime using the sysfs ``zpool`` attribute, e.g.:: - echo zbud > /sys/module/zswap/parameters/zpool + echo zsmalloc > /sys/module/zswap/parameters/zpool -The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which -means the compression ratio will always be 2:1 or worse (because of half-full -zbud pages). The zsmalloc type zpool has a more complex compressed page -storage method, and it can achieve greater storage densities. +The zsmalloc type zpool has a complex compressed page storage method, and it +can achieve great storage densities. When a swap page is passed from swapout to zswap, zswap maintains a mapping of the swap entry, a combination of the swap type and swap offset, to the zpool diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index f48eaa98d22d..8290177b4f75 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm: - compact_memory - compaction_proactiveness - compact_unevictable_allowed +- defrag_mode - dirty_background_bytes - dirty_background_ratio - dirty_bytes @@ -145,6 +146,14 @@ On CONFIG_PREEMPT_RT the default value is 0 in order to avoid a page fault, due to compaction, which would block the task from becoming active until the fault is resolved. +defrag_mode +=========== + +When set to 1, the page allocator tries harder to avoid fragmentation +and maintain the ability to produce huge pages / higher-order pages. + +It is recommended to enable this right after boot, as fragmentation, +once it occurred, can be long-lasting or even permanent. dirty_background_bytes ====================== diff --git a/Documentation/arch/arm64/ptdump.rst b/Documentation/arch/arm64/ptdump.rst index 5dcfc5d7cddf..51eb902ba41a 100644 --- a/Documentation/arch/arm64/ptdump.rst +++ b/Documentation/arch/arm64/ptdump.rst @@ -22,8 +22,6 @@ offlining of memory being accessed by the ptdump code. In order to dump the kernel page tables, enable the following configurations and mount debugfs:: - CONFIG_GENERIC_PTDUMP=y - CONFIG_PTDUMP_CORE=y CONFIG_PTDUMP_DEBUGFS=y mount -t debugfs nodev /sys/kernel/debug diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst index 79a009ce11df..94e628c1eb49 100644 --- a/Documentation/core-api/refcount-vs-atomic.rst +++ b/Documentation/core-api/refcount-vs-atomic.rst @@ -86,7 +86,19 @@ Memory ordering guarantee changes: * none (both fully unordered) -case 2) - increment-based ops that return no value +case 2) - non-"Read/Modify/Write" (RMW) ops with release ordering +----------------------------------------------------------------- + +Function changes: + + * atomic_set_release() --> refcount_set_release() + +Memory ordering guarantee changes: + + * none (both provide RELEASE ordering) + + +case 3) - increment-based ops that return no value -------------------------------------------------- Function changes: @@ -98,7 +110,7 @@ Memory ordering guarantee changes: * none (both fully unordered) -case 3) - decrement-based RMW ops that return no value +case 4) - decrement-based RMW ops that return no value ------------------------------------------------------ Function changes: @@ -110,7 +122,7 @@ Memory ordering guarantee changes: * fully unordered --> RELEASE ordering -case 4) - increment-based RMW ops that return a value +case 5) - increment-based RMW ops that return a value ----------------------------------------------------- Function changes: @@ -126,7 +138,20 @@ Memory ordering guarantees changes: result of obtaining pointer to the object! -case 5) - generic dec/sub decrement-based RMW ops that return a value +case 6) - increment-based RMW ops with acquire ordering that return a value +--------------------------------------------------------------------------- + +Function changes: + + * atomic_inc_not_zero() --> refcount_inc_not_zero_acquire() + * no atomic counterpart --> refcount_add_not_zero_acquire() + +Memory ordering guarantees changes: + + * fully ordered --> ACQUIRE ordering on success + + +case 7) - generic dec/sub decrement-based RMW ops that return a value --------------------------------------------------------------------- Function changes: @@ -139,7 +164,7 @@ Memory ordering guarantees changes: * fully ordered --> RELEASE ordering + ACQUIRE ordering on success -case 6) other decrement-based RMW ops that return a value +case 8) other decrement-based RMW ops that return a value --------------------------------------------------------- Function changes: @@ -154,7 +179,7 @@ Memory ordering guarantees changes: .. note:: atomic_add_unless() only provides full order on success. -case 7) - lock-based RMW +case 9) - lock-based RMW ------------------------ Function changes: diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index f6a3eef4fe7f..c6c91cbd0c3c 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -489,7 +489,19 @@ Storing ``NULL`` into any index of a multi-index entry will set the entry at every index to ``NULL`` and dissolve the tie. A multi-index entry can be split into entries occupying smaller ranges by calling xas_split_alloc() without the xa_lock held, followed by taking the lock -and calling xas_split(). +and calling xas_split() or calling xas_try_split() with xa_lock. The +difference between xas_split_alloc()+xas_split() and xas_try_alloc() is +that xas_split_alloc() + xas_split() split the entry from the original +order to the new order in one shot uniformly, whereas xas_try_split() +iteratively splits the entry containing the index non-uniformly. +For example, to split an order-9 entry, which takes 2^(9-6)=8 slots, +assuming ``XA_CHUNK_SHIFT`` is 6, xas_split_alloc() + xas_split() need +8 xa_node. xas_try_split() splits the order-9 entry into +2 order-8 entries, then split one order-8 entry, based on the given index, +to 2 order-7 entries, ..., and split one order-1 entry to 2 order-0 entries. +When splitting the order-6 entry and a new xa_node is needed, xas_try_split() +will try to allocate one if possible. As a result, xas_try_split() would only +need 1 xa_node instead of 8. Functions and structures ======================== diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst index 719e90f1988e..08dd5e254cc5 100644 --- a/Documentation/filesystems/dax.rst +++ b/Documentation/filesystems/dax.rst @@ -207,7 +207,6 @@ implement direct_access. These block devices may be used for inspiration: - brd: RAM backed block device driver -- dcssblk: s390 dcss block device driver - pmem: NVDIMM persistent memory driver diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 09f0aed5a08b..f97692b31a2d 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -502,9 +502,25 @@ process, its PSS will be 1500. "Pss_Dirty" is the portion of PSS which consists of dirty pages. ("Pss_Clean" is not included, but it can be calculated by subtracting "Pss_Dirty" from "Pss".) -Note that even a page which is part of a MAP_SHARED mapping, but has only -a single pte mapped, i.e. is currently used by only one process, is accounted -as private and not as shared. +Traditionally, a page is accounted as "private" if it is mapped exactly once, +and a page is accounted as "shared" when mapped multiple times, even when +mapped in the same process multiple times. Note that this accounting is +independent of MAP_SHARED. + +In some kernel configurations, the semantics of pages part of a larger +allocation (e.g., THP) can differ: a page is accounted as "private" if all +pages part of the corresponding large allocation are *certainly* mapped in the +same process, even if the page is mapped multiple times in that process. A +page is accounted as "shared" if any page page of the larger allocation +is *maybe* mapped in a different process. In some cases, a large allocation +might be treated as "maybe mapped by multiple processes" even though this +is no longer the case. + +Some kernel configurations do not track the precise number of times a page part +of a larger allocation is mapped. In this case, when calculating the PSS, the +average number of mappings per page in this larger allocation might be used +as an approximation for the number of mappings of a page. The PSS calculation +will be imprecise in this case. "Referenced" indicates the amount of memory currently marked as referenced or accessed. @@ -686,6 +702,11 @@ Where: node locality page counters (N0 == node0, N1 == node1, ...) and the kernel page size, in KB, that is backing the mapping up. +Note that some kernel configurations do not track the precise number of times +a page part of a larger allocation (e.g., THP) is mapped. In these +configurations, "mapmax" might corresponds to the average number of mappings +per page in such a larger allocation instead. + 1.2 Kernel data --------------- @@ -1060,6 +1081,8 @@ Example output. You may not have all of these fields. FilePmdMapped: 0 kB CmaTotal: 0 kB CmaFree: 0 kB + Unaccepted: 0 kB + Balloon: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 @@ -1132,9 +1155,15 @@ Dirty Writeback Memory which is actively being written back to the disk AnonPages - Non-file backed pages mapped into userspace page tables + Non-file backed pages mapped into userspace page tables. Note that + some kernel configurations might consider all pages part of a + larger allocation (e.g., THP) as "mapped", as soon as a single + page is mapped. Mapped - files which have been mmapped, such as libraries + files which have been mmapped, such as libraries. Note that some + kernel configurations might consider all pages part of a larger + allocation (e.g., THP) as "mapped", as soon as a single page is + mapped. Shmem Total memory used by shared memory (shmem) and tmpfs KReclaimable @@ -1228,6 +1257,10 @@ CmaTotal Memory reserved for the Contiguous Memory Allocator (CMA) CmaFree Free remaining memory in the CMA reserves +Unaccepted + Memory that has not been accepted by the guest +Balloon + Memory returned to Host by VM Balloon Drivers HugePages_Total, HugePages_Free, HugePages_Rsvd, HugePages_Surp, Hugepagesize, Hugetlb See Documentation/admin-guide/mm/hugetlbpage.rst. DirectMap4k, DirectMap2M, DirectMap1G diff --git a/Documentation/mm/balance.rst b/Documentation/mm/balance.rst index abaa78561c31..c4962c89a7f5 100644 --- a/Documentation/mm/balance.rst +++ b/Documentation/mm/balance.rst @@ -81,7 +81,7 @@ Page stealing from process memory and shm is done if stealing the page would alleviate memory pressure on any zone in the page's node that has fallen below its watermark. -watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These +watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These are per-zone fields, used to determine when a zone needs to be balanced. When the number of pages falls below watermark[WMARK_MIN], the hysteric field low_on_memory gets set. This stays set till the number of free pages becomes diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index e28c6a1b40ae..f12d33749329 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -313,6 +313,10 @@ sufficient for the given purpose, it shouldn't be unnecessarily further lowered. It is recommended to be set proportional to ``aggregation interval``. By default, the ratio is set as ``1/20``, and it is still recommended. +Based on the manual tuning guide, DAMON provides more intuitive knob-based +intervals auto tuning mechanism. Please refer to :ref:`the design document of +the feature <damon_design_monitoring_intervals_autotuning>` for detail. + Refer to below documents for an example tuning based on the above guide. .. toctree:: @@ -321,6 +325,52 @@ Refer to below documents for an example tuning based on the above guide. monitoring_intervals_tuning_example +.. _damon_design_monitoring_intervals_autotuning: + +Monitoring Intervals Auto-tuning +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DAMON provides automatic tuning of the ``sampling interval`` and ``aggregation +interval`` based on the :ref:`the tuning guide idea +<damon_design_monitoring_params_tuning_guide>`. The tuning mechanism allows +users to set the aimed amount of access events to observe via DAMON within +given time interval. The target can be specified by the user as a ratio of +DAMON-observed access events to the theoretical maximum amount of the events +(``access_bp``) that measured within a given number of aggregations +(``aggrs``). + +The DAMON-observed access events are calculated in byte granularity based on +DAMON :ref:`region assumption <damon_design_region_based_sampling>`. For +example, if a region of size ``X`` bytes of ``Y`` ``nr_accesses`` is found, it +means ``X * Y`` access events are observed by DAMON. Theoretical maximum +access events for the region is calculated in same way, but replacing ``Y`` +with theoretical maximum ``nr_accesses``, which can be calculated as +``aggregation interval / sampling interval``. + +The mechanism calculates the ratio of access events for ``aggrs`` aggregations, +and increases or decrease the ``sampleing interval`` and ``aggregation +interval`` in same ratio, if the observed access ratio is lower or higher than +the target, respectively. The ratio of the intervals change is decided in +proportion to the distance between current samples ratio and the target ratio. + +The user can further set the minimum and maximum ``sampling interval`` that can +be set by the tuning mechanism using two parameters (``min_sample_us`` and +``max_sample_us``). Because the tuning mechanism changes ``sampling interval`` +and ``aggregation interval`` in same ratio always, the minimum and maximum +``aggregation interval`` after each of the tuning changes can automatically set +together. + +The tuning is turned off by default, and need to be set explicitly by the user. +As a rule of thumbs and the Parreto principle, 4% access samples ratio target +is recommended. Note that Parreto principle (80/20 rule) has applied twice. +That is, assumes 4% (20% of 20%) DAMON-observed access events ratio (source) +to capture 64% (80% multipled by 80%) real access events (outcomes). + +To know how user-space can use this feature via :ref:`DAMON sysfs interface +<sysfs_interface>`, refer to :ref:`intervals_goal <sysfs_scheme>` part of +the documentation. + + .. _damon_design_damos: Operation Schemes @@ -569,11 +619,22 @@ number of filters for each scheme. Each filter specifies - whether it is to allow (include) or reject (exclude) applying the scheme's action to the memory (``allow``). -When multiple filters are installed, each filter is evaluated in the installed -order. If a part of memory is matched to one of the filter, next filters are -ignored. If the memory passes through the filters evaluation stage because it -is not matched to any of the filters, applying the scheme's action to it is -allowed, same to the behavior when no filter exists. +For efficient handling of filters, some types of filters are handled by the +core layer, while others are handled by operations set. In the latter case, +hence, support of the filter types depends on the DAMON operations set. In +case of the core layer-handled filters, the memory regions that excluded by the +filter are not counted as the scheme has tried to the region. In contrast, if +a memory regions is filtered by an operations set layer-handled filter, it is +counted as the scheme has tried. This difference affects the statistics. + +When multiple filters are installed, the group of filters that handled by the +core layer are evaluated first. After that, the group of filters that handled +by the operations layer are evaluated. Filters in each of the groups are +evaluated in the installed order. If a part of memory is matched to one of the +filter, next filters are ignored. If the part passes through the filters +evaluation stage because it is not matched to any of the filters, applying the +scheme's action to it depends on the last filter's allowance type. If the last +filter was for allowing, the part of memory will be rejected, and vice versa. For example, let's assume 1) a filter for allowing anonymous pages and 2) another filter for rejecting young pages are installed in the order. If a page @@ -585,39 +646,29 @@ second reject-filter blocks it. If the page is neither anonymous nor young, the page will pass through the filters evaluation stage since there is no matching filter, and the action will be applied to the page. -Note that the action can equally be applied to memory that either explicitly -filter-allowed or filters evaluation stage passed. It means that installing -allow-filters at the end of the list makes no practical change but only -filters-checking overhead. - -For efficient handling of filters, some types of filters are handled by the -core layer, while others are handled by operations set. In the latter case, -hence, support of the filter types depends on the DAMON operations set. In -case of the core layer-handled filters, the memory regions that excluded by the -filter are not counted as the scheme has tried to the region. In contrast, if -a memory regions is filtered by an operations set layer-handled filter, it is -counted as the scheme has tried. This difference affects the statistics. - Below ``type`` of filters are currently supported. -- anonymous page - - Applied to pages that containing data that not stored in files. - - Handled by operations set layer. Supported by only ``paddr`` set. -- memory cgroup - - Applied to pages that belonging to a given cgroup. - - Handled by operations set layer. Supported by only ``paddr`` set. -- young page - - Applied to pages that are accessed after the last access check from the - scheme. - - Handled by operations set layer. Supported by only ``paddr`` set. -- address range - - Applied to pages that belonging to a given address range. - - Handled by the core logic. -- DAMON monitoring target - - Applied to pages that belonging to a given DAMON monitoring target. - - Handled by the core logic. - -To know how user-space can set the watermarks via :ref:`DAMON sysfs interface +- Core layer handled + - addr + - Applied to pages that belonging to a given address range. + - target + - Applied to pages that belonging to a given DAMON monitoring target. +- Operations layer handled, supported by only ``paddr`` operations set. + - anon + - Applied to pages that containing data that not stored in files. + - active + - Applied to active pages. + - memcg + - Applied to pages that belonging to a given cgroup. + - young + - Applied to pages that are accessed after the last access check from the + scheme. + - hugepage_size + - Applied to pages that managed in a given size range. + - unmapped + - Applied to pages that unmapped. + +To know how user-space can set the filters via :ref:`DAMON sysfs interface <sysfs_interface>`, refer to :ref:`filters <sysfs_filters>` part of the documentation. diff --git a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst index 334a854efb40..7207cbed591f 100644 --- a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst +++ b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst @@ -36,7 +36,7 @@ Then, list the DAMON-found regions of different access patterns, sorted by the "access temperature". "Access temperature" is a metric representing the access-hotness of a region. It is calculated as a weighted sum of the access frequency and the age of the region. If the access frequency is 0 %, the -temperature is multipled by minus one. That is, if a region is not accessed, +temperature is multiplied by minus one. That is, if a region is not accessed, it gets minus temperature and it gets lower as not accessed for longer time. The sorting is in temperature-ascendint order, so the region at the top of the list is the coldest, and the one at the bottom is the hottest one. :: @@ -58,11 +58,11 @@ list is the coldest, and the one at the bottom is the hottest one. :: The list shows not seemingly hot regions, and only minimum access pattern diversity. Every region has zero access frequency. The number of region is 10, which is the default ``min_nr_regions value``. Size of each region is also -nearly idential. We can suspect this is because “adaptive regions adjustment” +nearly identical. We can suspect this is because “adaptive regions adjustment” mechanism was not well working. As the guide suggested, we can get relative hotness of regions using ``age`` as the recency information. That would be better than nothing, but given the fact that the longest age is only about 6 -seconds while we waited about ten minuts, it is unclear how useful this will +seconds while we waited about ten minutes, it is unclear how useful this will be. The temperature ranges to total size of regions of each range histogram @@ -190,7 +190,7 @@ for sampling and aggregation intervals, respectively). :: The number of regions having different access patterns has significantly increased. Size of each region is also more varied. Total size of non-zero access frequency regions is also significantly increased. Maybe this is already -good enough to make some meaningful memory management efficieny changes. +good enough to make some meaningful memory management efficiency changes. 800ms/16s intervals: Another bias ================================= diff --git a/Documentation/mm/hmm.rst b/Documentation/mm/hmm.rst index f6d53c37a2ca..7d61b7a8b65b 100644 --- a/Documentation/mm/hmm.rst +++ b/Documentation/mm/hmm.rst @@ -400,7 +400,7 @@ Exclusive access memory Some devices have features such as atomic PTE bits that can be used to implement atomic access to system memory. To support atomic operations to a shared virtual memory page such a device needs access to that page which is exclusive of any -userspace access from the CPU. The ``make_device_exclusive_range()`` function +userspace access from the CPU. The ``make_device_exclusive()`` function can be used to make a memory range inaccessible from userspace. This replaces all mappings for pages in the given range with special swap diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst index 0be1c7503a01..d3ada3e45e10 100644 --- a/Documentation/mm/index.rst +++ b/Documentation/mm/index.rst @@ -62,5 +62,4 @@ documentation, or deleted if it has served its purpose. unevictable-lru vmalloced-kernel-stacks vmemmap_dedup - z3fold zsmalloc diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst index 71fd4a6acf42..d3ac106e6b14 100644 --- a/Documentation/mm/physical_memory.rst +++ b/Documentation/mm/physical_memory.rst @@ -338,10 +338,272 @@ Statistics Zones ===== +As we have mentioned, each zone in memory is described by a ``struct zone`` +which is an element of the ``node_zones`` array of the node it belongs to. +``struct zone`` is the core data structure of the page allocator. A zone +represents a range of physical memory and may have holes. + +The page allocator uses the GFP flags, see :ref:`mm-api-gfp-flags`, specified by +a memory allocation to determine the highest zone in a node from which the +memory allocation can allocate memory. The page allocator first allocates memory +from that zone, if the page allocator can't allocate the requested amount of +memory from the zone, it will allocate memory from the next lower zone in the +node, the process continues up to and including the lowest zone. For example, if +a node contains ``ZONE_DMA32``, ``ZONE_NORMAL`` and ``ZONE_MOVABLE`` and the +highest zone of a memory allocation is ``ZONE_MOVABLE``, the order of the zones +from which the page allocator allocates memory is ``ZONE_MOVABLE`` > +``ZONE_NORMAL`` > ``ZONE_DMA32``. + +At runtime, free pages in a zone are in the Per-CPU Pagesets (PCP) or free areas +of the zone. The Per-CPU Pagesets are a vital mechanism in the kernel's memory +management system. By handling most frequent allocations and frees locally on +each CPU, the Per-CPU Pagesets improve performance and scalability, especially +on systems with many cores. The page allocator in the kernel employs a two-step +strategy for memory allocation, starting with the Per-CPU Pagesets before +falling back to the buddy allocator. Pages are transferred between the Per-CPU +Pagesets and the global free areas (managed by the buddy allocator) in batches. +This minimizes the overhead of frequent interactions with the global buddy +allocator. + +Architecture specific code calls free_area_init() to initializes zones. + +Zone structure +-------------- +The zones structure ``struct zone`` is defined in ``include/linux/mmzone.h``. +Here we briefly describe fields of this structure: -.. admonition:: Stub +General +~~~~~~~ - This section is incomplete. Please list and describe the appropriate fields. +``_watermark`` + The watermarks for this zone. When the amount of free pages in a zone is below + the min watermark, boosting is ignored, an allocation may trigger direct + reclaim and direct compaction, it is also used to throttle direct reclaim. + When the amount of free pages in a zone is below the low watermark, kswapd is + woken up. When the amount of free pages in a zone is above the high watermark, + kswapd stops reclaiming (a zone is balanced) when the + ``NUMA_BALANCING_MEMORY_TIERING`` bit of ``sysctl_numa_balancing_mode`` is not + set. The promo watermark is used for memory tiering and NUMA balancing. When + the amount of free pages in a zone is above the promo watermark, kswapd stops + reclaiming when the ``NUMA_BALANCING_MEMORY_TIERING`` bit of + ``sysctl_numa_balancing_mode`` is set. The watermarks are set by + ``__setup_per_zone_wmarks()``. The min watermark is calculated according to + ``vm.min_free_kbytes`` sysctl. The other three watermarks are set according + to the distance between two watermarks. The distance itself is calculated + taking ``vm.watermark_scale_factor`` sysctl into account. + +``watermark_boost`` + The number of pages which are used to boost watermarks to increase reclaim + pressure to reduce the likelihood of future fallbacks and wake kswapd now + as the node may be balanced overall and kswapd will not wake naturally. + +``nr_reserved_highatomic`` + The number of pages which are reserved for high-order atomic allocations. + +``nr_free_highatomic`` + The number of free pages in reserved highatomic pageblocks + +``lowmem_reserve`` + The array of the amounts of the memory reserved in this zone for memory + allocations. For example, if the highest zone a memory allocation can + allocate memory from is ``ZONE_MOVABLE``, the amount of memory reserved in + this zone for this allocation is ``lowmem_reserve[ZONE_MOVABLE]`` when + attempting to allocate memory from this zone. This is a mechanism the page + allocator uses to prevent allocations which could use ``highmem`` from using + too much ``lowmem``. For some specialised workloads on ``highmem`` machines, + it is dangerous for the kernel to allow process memory to be allocated from + the ``lowmem`` zone. This is because that memory could then be pinned via the + ``mlock()`` system call, or by unavailability of swapspace. + ``vm.lowmem_reserve_ratio`` sysctl determines how aggressive the kernel is in + defending these lower zones. This array is recalculated by + ``setup_per_zone_lowmem_reserve()`` at runtime if ``vm.lowmem_reserve_ratio`` + sysctl changes. + +``node`` + The index of the node this zone belongs to. Available only when + ``CONFIG_NUMA`` is enabled because there is only one zone in a UMA system. + +``zone_pgdat`` + Pointer to the ``struct pglist_data`` of the node this zone belongs to. + +``per_cpu_pageset`` + Pointer to the Per-CPU Pagesets (PCP) allocated and initialized by + ``setup_zone_pageset()``. By handling most frequent allocations and frees + locally on each CPU, PCP improves performance and scalability on systems with + many cores. + +``pageset_high_min`` + Copied to the ``high_min`` of the Per-CPU Pagesets for faster access. + +``pageset_high_max`` + Copied to the ``high_max`` of the Per-CPU Pagesets for faster access. + +``pageset_batch`` + Copied to the ``batch`` of the Per-CPU Pagesets for faster access. The + ``batch``, ``high_min`` and ``high_max`` of the Per-CPU Pagesets are used to + calculate the number of elements the Per-CPU Pagesets obtain from the buddy + allocator under a single hold of the lock for efficiency. They are also used + to decide if the Per-CPU Pagesets return pages to the buddy allocator in page + free process. + +``pageblock_flags`` + The pointer to the flags for the pageblocks in the zone (see + ``include/linux/pageblock-flags.h`` for flags list). The memory is allocated + in ``setup_usemap()``. Each pageblock occupies ``NR_PAGEBLOCK_BITS`` bits. + Defined only when ``CONFIG_FLATMEM`` is enabled. The flags is stored in + ``mem_section`` when ``CONFIG_SPARSEMEM`` is enabled. + +``zone_start_pfn`` + The start pfn of the zone. It is initialized by + ``calculate_node_totalpages()``. + +``managed_pages`` + The present pages managed by the buddy system, which is calculated as: + ``managed_pages`` = ``present_pages`` - ``reserved_pages``, ``reserved_pages`` + includes pages allocated by the memblock allocator. It should be used by page + allocator and vm scanner to calculate all kinds of watermarks and thresholds. + It is accessed using ``atomic_long_xxx()`` functions. It is initialized in + ``free_area_init_core()`` and then is reinitialized when memblock allocator + frees pages into buddy system. + +``spanned_pages`` + The total pages spanned by the zone, including holes, which is calculated as: + ``spanned_pages`` = ``zone_end_pfn`` - ``zone_start_pfn``. It is initialized + by ``calculate_node_totalpages()``. + +``present_pages`` + The physical pages existing within the zone, which is calculated as: + ``present_pages`` = ``spanned_pages`` - ``absent_pages`` (pages in holes). It + may be used by memory hotplug or memory power management logic to figure out + unmanaged pages by checking (``present_pages`` - ``managed_pages``). Write + access to ``present_pages`` at runtime should be protected by + ``mem_hotplug_begin/done()``. Any reader who can't tolerant drift of + ``present_pages`` should use ``get_online_mems()`` to get a stable value. It + is initialized by ``calculate_node_totalpages()``. + +``present_early_pages`` + The present pages existing within the zone located on memory available since + early boot, excluding hotplugged memory. Defined only when + ``CONFIG_MEMORY_HOTPLUG`` is enabled and initialized by + ``calculate_node_totalpages()``. + +``cma_pages`` + The pages reserved for CMA use. These pages behave like ``ZONE_MOVABLE`` when + they are not used for CMA. Defined only when ``CONFIG_CMA`` is enabled. + +``name`` + The name of the zone. It is a pointer to the corresponding element of + the ``zone_names`` array. + +``nr_isolate_pageblock`` + Number of isolated pageblocks. It is used to solve incorrect freepage counting + problem due to racy retrieving migratetype of pageblock. Protected by + ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled. + +``span_seqlock`` + The seqlock to protect ``zone_start_pfn`` and ``spanned_pages``. It is a + seqlock because it has to be read outside of ``zone->lock``, and it is done in + the main allocator path. However, the seqlock is written quite infrequently. + Defined only when ``CONFIG_MEMORY_HOTPLUG`` is enabled. + +``initialized`` + The flag indicating if the zone is initialized. Set by + ``init_currently_empty_zone()`` during boot. + +``free_area`` + The array of free areas, where each element corresponds to a specific order + which is a power of two. The buddy allocator uses this structure to manage + free memory efficiently. When allocating, it tries to find the smallest + sufficient block, if the smallest sufficient block is larger than the + requested size, it will be recursively split into the next smaller blocks + until the required size is reached. When a page is freed, it may be merged + with its buddy to form a larger block. It is initialized by + ``zone_init_free_lists()``. + +``unaccepted_pages`` + The list of pages to be accepted. All pages on the list are ``MAX_PAGE_ORDER``. + Defined only when ``CONFIG_UNACCEPTED_MEMORY`` is enabled. + +``flags`` + The zone flags. The least three bits are used and defined by + ``enum zone_flags``. ``ZONE_BOOSTED_WATERMARK`` (bit 0): zone recently boosted + watermarks. Cleared when kswapd is woken. ``ZONE_RECLAIM_ACTIVE`` (bit 1): + kswapd may be scanning the zone. ``ZONE_BELOW_HIGH`` (bit 2): zone is below + high watermark. + +``lock`` + The main lock that protects the internal data structures of the page allocator + specific to the zone, especially protects ``free_area``. + +``percpu_drift_mark`` + When free pages are below this point, additional steps are taken when reading + the number of free pages to avoid per-cpu counter drift allowing watermarks + to be breached. It is updated in ``refresh_zone_stat_thresholds()``. + +Compaction control +~~~~~~~~~~~~~~~~~~ + +``compact_cached_free_pfn`` + The PFN where compaction free scanner should start in the next scan. + +``compact_cached_migrate_pfn`` + The PFNs where compaction migration scanner should start in the next scan. + This array has two elements: the first one is used in ``MIGRATE_ASYNC`` mode, + and the other one is used in ``MIGRATE_SYNC`` mode. + +``compact_init_migrate_pfn`` + The initial migration PFN which is initialized to 0 at boot time, and to the + first pageblock with migratable pages in the zone after a full compaction + finishes. It is used to check if a scan is a whole zone scan or not. + +``compact_init_free_pfn`` + The initial free PFN which is initialized to 0 at boot time and to the last + pageblock with free ``MIGRATE_MOVABLE`` pages in the zone. It is used to check + if it is the start of a scan. + +``compact_considered`` + The number of compactions attempted since last failure. It is reset in + ``defer_compaction()`` when a compaction fails to result in a page allocation + success. It is increased by 1 in ``compaction_deferred()`` when a compaction + should be skipped. ``compaction_deferred()`` is called before + ``compact_zone()`` is called, ``compaction_defer_reset()`` is called when + ``compact_zone()`` returns ``COMPACT_SUCCESS``, ``defer_compaction()`` is + called when ``compact_zone()`` returns ``COMPACT_PARTIAL_SKIPPED`` or + ``COMPACT_COMPLETE``. + +``compact_defer_shift`` + The number of compactions skipped before trying again is + ``1<<compact_defer_shift``. It is increased by 1 in ``defer_compaction()``. + It is reset in ``compaction_defer_reset()`` when a direct compaction results + in a page allocation success. Its maximum value is ``COMPACT_MAX_DEFER_SHIFT``. + +``compact_order_failed`` + The minimum compaction failed order. It is set in ``compaction_defer_reset()`` + when a compaction succeeds and in ``defer_compaction()`` when a compaction + fails to result in a page allocation success. + +``compact_blockskip_flush`` + Set to true when compaction migration scanner and free scanner meet, which + means the ``PB_migrate_skip`` bits should be cleared. + +``contiguous`` + Set to true when the zone is contiguous (in other words, no hole). + +Statistics +~~~~~~~~~~ + +``vm_stat`` + VM statistics for the zone. The items tracked are defined by + ``enum zone_stat_item``. + +``vm_numa_event`` + VM NUMA event statistics for the zone. The items tracked are defined by + ``enum numa_stat_item``. + +``per_cpu_zonestats`` + Per-CPU VM statistics for the zone. It records VM statistics and VM NUMA event + statistics on a per-CPU basis. It reduces updates to the global ``vm_stat`` + and ``vm_numa_event`` fields of the zone to improve performance. .. _pages: diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst index 81417fa2ed20..e6756e78b476 100644 --- a/Documentation/mm/process_addrs.rst +++ b/Documentation/mm/process_addrs.rst @@ -716,9 +716,14 @@ calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`, before releasing the RCU lock via :c:func:`!rcu_read_unlock`. -VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for -their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it -via :c:func:`!vma_end_read`. +In cases when the user already holds mmap read lock, :c:func:`!vma_start_read_locked` +and :c:func:`!vma_start_read_locked_nested` can be used. These functions do not +fail due to lock contention but the caller should still check their return values +in case they fail for other reasons. + +VMA read locks increment :c:member:`!vma.vm_refcnt` reference counter for their +duration and the caller of :c:func:`!lock_vma_under_rcu` must drop it via +:c:func:`!vma_end_read`. VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always @@ -726,9 +731,9 @@ acquired. An mmap write lock **must** be held for the duration of the VMA write lock, releasing or downgrading the mmap write lock also releases the VMA write lock so there is no :c:func:`!vma_end_write` function. -Note that a semaphore write lock is not held across a VMA lock. Rather, a -sequence number is used for serialisation, and the write semaphore is only -acquired at the point of write lock to update this. +Note that when write-locking a VMA lock, the :c:member:`!vma.vm_refcnt` is temporarily +modified so that readers can detect the presense of a writer. The reference counter is +restored once the vma sequence number used for serialisation is updated. This ensures the semantics we require - VMA write locks provide exclusive write access to the VMA. @@ -738,7 +743,7 @@ Implementation details The VMA lock mechanism is designed to be a lightweight means of avoiding the use of the heavily contended mmap lock. It is implemented using a combination of a -read/write semaphore and sequence numbers belonging to the containing +reference counter and sequence numbers belonging to the containing :c:struct:`!struct mm_struct` and the VMA. Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic @@ -779,28 +784,31 @@ release of any VMA locks on its release makes sense, as you would never want to keep VMAs locked across entirely separate write operations. It also maintains correct lock ordering. -Each time a VMA read lock is acquired, we acquire a read lock on the -:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that -the sequence count of the VMA does not match that of the mm. +Each time a VMA read lock is acquired, we increment :c:member:`!vma.vm_refcnt` +reference counter and check that the sequence count of the VMA does not match +that of the mm. -If it does, the read lock fails. If it does not, we hold the lock, excluding -writers, but permitting other readers, who will also obtain this lock under RCU. +If it does, the read lock fails and :c:member:`!vma.vm_refcnt` is dropped. +If it does not, we keep the reference counter raised, excluding writers, but +permitting other readers, who can also obtain this lock under RCU. Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu` are also RCU safe, so the whole read lock operation is guaranteed to function correctly. -On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock` -read/write semaphore, before setting the VMA's sequence number under this lock, -also simultaneously holding the mmap write lock. +On the write side, we set a bit in :c:member:`!vma.vm_refcnt` which can't be +modified by readers and wait for all readers to drop their reference count. +Once there are no readers, the VMA's sequence number is set to match that of +the mm. During this entire operation mmap write lock is held. This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep until these are finished and mutual exclusion is achieved. -After setting the VMA's sequence number, the lock is released, avoiding -complexity with a long-term held write lock. +After setting the VMA's sequence number, the bit in :c:member:`!vma.vm_refcnt` +indicating a writer is cleared. From this point on, VMA's sequence number will +indicate VMA's write-locked state until mmap write lock is dropped or downgraded. -This clever combination of a read/write semaphore and sequence count allows for +This clever combination of a reference counter and sequence count allows for fast RCU-based per-VMA lock acquisition (especially on page fault, though utilised elsewhere) with minimal complexity around lock ordering. diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index a2cd8800d527..0e7f8e4cd2e3 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -116,14 +116,27 @@ pages: succeeds on tail pages. - map/unmap of a PMD entry for the whole THP increment/decrement - folio->_entire_mapcount, increment/decrement folio->_large_mapcount - and also increment/decrement folio->_nr_pages_mapped by ENTIRELY_MAPPED - when _entire_mapcount goes from -1 to 0 or 0 to -1. + folio->_entire_mapcount and folio->_large_mapcount. + + We also maintain the two slots for tracking MM owners (MM ID and + corresponding mapcount), and the current status ("maybe mapped shared" vs. + "mapped exclusively"). + + With CONFIG_PAGE_MAPCOUNT, we also increment/decrement + folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes + from -1 to 0 or 0 to -1. - map/unmap of individual pages with PTE entry increment/decrement - page->_mapcount, increment/decrement folio->_large_mapcount and also - increment/decrement folio->_nr_pages_mapped when page->_mapcount goes - from -1 to 0 or 0 to -1 as this counts the number of pages mapped by PTE. + folio->_large_mapcount. + + We also maintain the two slots for tracking MM owners (MM ID and + corresponding mapcount), and the current status ("maybe mapped shared" vs. + "mapped exclusively"). + + With CONFIG_PAGE_MAPCOUNT, we also increment/decrement + page->_mapcount and increment/decrement folio->_nr_pages_mapped when + page->_mapcount goes from -1 to 0 or 0 to -1 as this counts the number + of pages mapped by PTE. split_huge_page internally has to distribute the refcounts in the head page to the tail pages before clearing all PG_head/tail bits from the page @@ -151,8 +164,8 @@ clear where references should go after split: it will stay on the head page. Note that split_huge_pmd() doesn't have any limitations on refcounting: pmd can be split at any point and never fails. -Partial unmap and deferred_split_folio() -======================================== +Partial unmap and deferred_split_folio() (anon THP only) +======================================================== Unmapping part of THP (with munmap() or other way) is not going to free memory immediately. Instead, we detect that a subpage of THP is not in use @@ -167,3 +180,13 @@ a THP crosses a VMA boundary. The function deferred_split_folio() is used to queue a folio for splitting. The splitting itself will happen when we get memory pressure via shrinker interface. + +With CONFIG_PAGE_MAPCOUNT, we reliably detect partial mappings based on +folio->_nr_pages_mapped. + +With CONFIG_NO_PAGE_MAPCOUNT, we detect partial mappings based on the +average per-page mapcount in a THP: if the average is < 1, an anon THP is +certainly partially mapped. As long as only a single process maps a THP, +this detection is reliable. With long-running child processes, there can +be scenarios where partial mappings can currently not be detected, and +might need asynchronous detection during memory reclaim in the future. diff --git a/Documentation/mm/z3fold.rst b/Documentation/mm/z3fold.rst deleted file mode 100644 index 25b5935d06c7..000000000000 --- a/Documentation/mm/z3fold.rst +++ /dev/null @@ -1,28 +0,0 @@ -====== -z3fold -====== - -z3fold is a special purpose allocator for storing compressed pages. -It is designed to store up to three compressed pages per physical page. -It is a zbud derivative which allows for higher compression -ratio keeping the simplicity and determinism of its predecessor. - -The main differences between z3fold and zbud are: - -* unlike zbud, z3fold allows for up to PAGE_SIZE allocations -* z3fold can hold up to 3 compressed pages in its page -* z3fold doesn't export any API itself and is thus intended to be used - via the zpool API. - -To keep the determinism and simplicity, z3fold, just like zbud, always -stores an integral number of compressed pages per page, but it can store -up to 3 pages unlike zbud which can store at most 2. Therefore the -compression ratio goes to around 2.7x while zbud's one is around 1.7x. - -Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not -return a dereferenceable pointer. Instead, it returns an unsigned long -handle which encodes actual location of the allocated object. - -Keeping effective compression ratio close to zsmalloc's, z3fold doesn't -depend on MMU enabled and provides more predictable reclaim behavior -which makes it a better fit for small and response-critical systems. diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst index 76902835e68e..d2bbecd78e14 100644 --- a/Documentation/mm/zsmalloc.rst +++ b/Documentation/mm/zsmalloc.rst @@ -27,9 +27,8 @@ Instead, it returns an opaque handle (unsigned long) which encodes actual location of the allocated object. The reason for this indirection is that zsmalloc does not keep zspages permanently mapped since that would cause issues on 32-bit systems where the VA region for kernel space mappings -is very small. So, before using the allocating memory, the object has to -be mapped using zs_map_object() to get a usable pointer and subsequently -unmapped using zs_unmap_object(). +is very small. So, using the allocated memory should be done through the +proper handle-based APIs. stat ==== diff --git a/Documentation/translations/zh_CN/mm/hmm.rst b/Documentation/translations/zh_CN/mm/hmm.rst index 0669f947d0bc..22c210f4e94f 100644 --- a/Documentation/translations/zh_CN/mm/hmm.rst +++ b/Documentation/translations/zh_CN/mm/hmm.rst @@ -326,7 +326,7 @@ devm_memunmap_pages() 和 devm_release_mem_region() 当资源可以绑定到 ``s 一些设备具有诸如原子PTE位的功能,可以用来实现对系统内存的原子访问。为了支持对一 个共享的虚拟内存页的原子操作,这样的设备需要对该页的访问是排他的,而不是来自CPU -的任何用户空间访问。 ``make_device_exclusive_range()`` 函数可以用来使一 +的任何用户空间访问。 ``make_device_exclusive()`` 函数可以用来使一 个内存范围不能从用户空间访问。 这将用特殊的交换条目替换给定范围内的所有页的映射。任何试图访问交换条目的行为都会 diff --git a/Documentation/translations/zh_CN/mm/index.rst b/Documentation/translations/zh_CN/mm/index.rst index c8726bce8f74..a71116be058f 100644 --- a/Documentation/translations/zh_CN/mm/index.rst +++ b/Documentation/translations/zh_CN/mm/index.rst @@ -58,7 +58,6 @@ Linux内存管理文档 remap_file_pages split_page_table_lock vmalloced-kernel-stacks - z3fold zsmalloc TODOLIST: diff --git a/Documentation/translations/zh_CN/mm/z3fold.rst b/Documentation/translations/zh_CN/mm/z3fold.rst deleted file mode 100644 index 9569a6d88270..000000000000 --- a/Documentation/translations/zh_CN/mm/z3fold.rst +++ /dev/null @@ -1,31 +0,0 @@ -:Original: Documentation/mm/z3fold.rst - -:翻译: - - 司延腾 Yanteng Si <siyanteng@loongson.cn> - -:校译: - - -====== -z3fold -====== - -z3fold是一个专门用于存储压缩页的分配器。它被设计为每个物理页最多可以存储三个压缩页。 -它是zbud的衍生物,允许更高的压缩率,保持其前辈的简单性和确定性。 - -z3fold和zbud的主要区别是: - -* 与zbud不同的是,z3fold允许最大的PAGE_SIZE分配。 -* z3fold在其页面中最多可以容纳3个压缩页面 -* z3fold本身没有输出任何API,因此打算通过zpool的API来使用 - -为了保持确定性和简单性,z3fold,就像zbud一样,总是在每页存储一个整数的压缩页,但是 -它最多可以存储3页,不像zbud最多可以存储2页。因此压缩率达到2.7倍左右,而zbud的压缩 -率是1.7倍左右。 - -不像zbud(但也像zsmalloc),z3fold_alloc()那样不返回一个可重复引用的指针。相反,它 -返回一个无符号长句柄,它编码了被分配对象的实际位置。 - -保持有效的压缩率接近于zsmalloc,z3fold不依赖于MMU的启用,并提供更可预测的回收行 -为,这使得它更适合于小型和反应迅速的系统。 diff --git a/MAINTAINERS b/MAINTAINERS index d5dfb9186962..59e33095aeb6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10933,6 +10933,8 @@ F: fs/hugetlbfs/ F: include/linux/hugetlb.h F: include/trace/events/hugetlbfs.h F: mm/hugetlb.c +F: mm/hugetlb_cma.c +F: mm/hugetlb_cma.h F: mm/hugetlb_vmemmap.c F: mm/hugetlb_vmemmap.h F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c @@ -14177,8 +14179,8 @@ F: include/linux/maple_tree.h F: include/trace/events/maple_tree.h F: lib/maple_tree.c F: lib/test_maple_tree.c -F: tools/testing/radix-tree/linux/maple_tree.h F: tools/testing/radix-tree/maple.c +F: tools/testing/shared/linux/maple_tree.h MARDUK (CREATOR CI40) DEVICE TREE SUPPORT M: Rahul Bedarkar <rahulbedarkar89@gmail.com> @@ -24292,6 +24294,7 @@ F: drivers/hwmon/tmp513.c TMPFS (SHMEM FILESYSTEM) M: Hugh Dickins <hughd@google.com> +R: Baolin Wang <baolin.wang@linux.alibaba.com> L: linux-mm@kvack.org S: Maintained F: include/linux/shmem_fs.h @@ -25650,7 +25653,6 @@ F: tools/testing/vsock/ VMALLOC M: Andrew Morton <akpm@linux-foundation.org> R: Uladzislau Rezki <urezki@gmail.com> -R: Christoph Hellwig <hch@infradead.org> L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org @@ -26539,13 +26541,6 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* -Z3FOLD COMPRESSED PAGE ALLOCATOR -M: Vitaly Wool <vitaly.wool@konsulko.com> -R: Miaohe Lin <linmiaohe@huawei.com> -L: linux-mm@kvack.org -S: Maintained -F: mm/z3fold.c - Z8530 DRIVER FOR AX.25 M: Joerg Reuter <jreuter@yaina.de> L: linux-hams@vger.kernel.org @@ -26556,13 +26551,6 @@ F: Documentation/networking/device_drivers/hamradio/z8530drv.rst F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h -ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings <sjenning@redhat.com> -M: Dan Streetman <ddstreet@ieee.org> -L: linux-mm@kvack.org -S: Maintained -F: mm/zbud.c - ZD1211RW WIRELESS DRIVER L: linux-wireless@vger.kernel.org S: Orphan diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 61c2198b1359..2d491b8cdab9 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -273,14 +273,6 @@ srm_paging_stop (void) } #endif -void __init -mem_init(void) -{ - set_max_mapnr(max_low_pfn); - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - memblock_free_all(); -} - static const pgprot_t protection_map[16] = { [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR), diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 6a71b23f1383..a73cc94f806e 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -150,41 +150,18 @@ void __init setup_arch_memory(void) */ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; - high_memory = (void *)(min_high_pfn << PAGE_SHIFT); - arch_pfn_offset = min(min_low_pfn, min_high_pfn); kmap_init(); - -#else /* CONFIG_HIGHMEM */ - /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */ - max_mapnr = max_low_pfn - min_low_pfn; - #endif /* CONFIG_HIGHMEM */ free_area_init(max_zone_pfn); } -static void __init highmem_init(void) +void __init arch_mm_preinit(void) { #ifdef CONFIG_HIGHMEM - unsigned long tmp; - memblock_phys_free(high_mem_start, high_mem_sz); - for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); #endif -} - -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - memblock_free_all(); - highmem_init(); BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE); diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index b07004d53267..fd8897a0e52c 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -32,7 +32,7 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size) return (void __iomem *)(u32)paddr; return ioremap_prot(paddr, size, - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); } EXPORT_SYMBOL(ioremap); @@ -44,10 +44,8 @@ EXPORT_SYMBOL(ioremap); * might need finer access control (R/W/X) */ void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, - unsigned long flags) + pgprot_t prot) { - pgprot_t prot = __pgprot(flags); - /* force uncached */ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); } diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be91e376df79..6b986ef6042f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -19,14 +19,13 @@ extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) #endif -#ifndef CONFIG_MMU - #include <asm-generic/pgtable-nopud.h> + +#ifndef CONFIG_MMU #include <asm/pgtable-nommu.h> #else -#include <asm-generic/pgtable-nopud.h> #include <asm/page.h> #include <asm/pgtable-hwdef.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 5345d218899a..54bdca025c9f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -237,56 +237,17 @@ static inline void poison_init_mem(void *s, size_t count) *p++ = 0xe7fddef0; } -static void __init free_highpages(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn; - phys_addr_t range_start, range_end; - u64 i; - - /* set highmem page free */ - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, - &range_start, &range_end, NULL) { - unsigned long start = PFN_UP(range_start); - unsigned long end = PFN_DOWN(range_end); - - /* Ignore complete lowmem entries */ - if (end <= max_low) - continue; - - /* Truncate partial highmem entries */ - if (start < max_low) - start = max_low; - - for (; start < end; start++) - free_highmem_page(pfn_to_page(start)); - } -#endif -} - -/* - * mem_init() marks the free areas in the mem_map and tells us how much - * memory is free. This is done after various parts of the system have - * claimed their memory after the kernel image. - */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { #ifdef CONFIG_ARM_LPAE swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE); #endif - set_max_mapnr(pfn_to_page(max_pfn) - mem_map); - - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); + memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif - free_highpages(); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 62dc903ecc7f..748c34dc953c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -41,6 +41,7 @@ config ARM64 select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG @@ -157,7 +158,6 @@ config ARM64 select GENERIC_IRQ_SHOW_LEVEL select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP - select GENERIC_PTDUMP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 76ebbdc6ffdd..9b96840fb979 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -270,9 +270,9 @@ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook); #define _PAGE_IOREMAP PROT_DEVICE_nGnRE #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), PROT_NORMAL_NC) + ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) #define ioremap_np(addr, size) \ - ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE) + ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) /* * io{read,write}{16,32,64}be() macros @@ -293,7 +293,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size) if (pfn_is_map_memory(__phys_to_pfn(addr))) return (void __iomem *)__phys_to_virt(addr); - return ioremap_prot(addr, size, PROT_NORMAL); + return ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); } /* diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 6cf4aae05219..b2931d1ae0fb 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -7,7 +7,7 @@ #include <linux/ptdump.h> -#ifdef CONFIG_PTDUMP_CORE +#ifdef CONFIG_PTDUMP #include <linux/mm_types.h> #include <linux/seq_file.h> @@ -70,6 +70,6 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info, #else static inline void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) { } -#endif /* CONFIG_PTDUMP_CORE */ +#endif /* CONFIG_PTDUMP */ #endif /* __ASM_PTDUMP_H */ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8104aee4f9a0..eba1a98657f1 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) return true; } -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) -{ - __flush_tlb_page_nosync(mm, uaddr); -} - /* * If mprotect/munmap/etc occurs during TLB batched flushing, we need to * synchronise all the TLBI issued with a DSB to avoid the race mentioned in @@ -450,7 +443,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start, return false; } -static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, +static inline void __flush_tlb_range_nosync(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) @@ -462,12 +455,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, pages = (end - start) >> PAGE_SHIFT; if (__flush_tlb_range_limit_excess(start, end, pages, stride)) { - flush_tlb_mm(vma->vm_mm); + flush_tlb_mm(mm); return; } dsb(ishst); - asid = ASID(vma->vm_mm); + asid = ASID(mm); if (last_level) __flush_tlb_range_op(vale1is, start, pages, stride, asid, @@ -476,7 +469,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true, lpa2_is_enabled()); - mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } static inline void __flush_tlb_range(struct vm_area_struct *vma, @@ -484,7 +477,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long stride, bool last_level, int tlb_level) { - __flush_tlb_range_nosync(vma, start, end, stride, + __flush_tlb_range_nosync(vma->vm_mm, start, end, stride, last_level, tlb_level); dsb(ish); } @@ -535,6 +528,12 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, unsigned long start, unsigned long end) +{ + __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3); +} #endif #endif diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index e6f66491fbe9..b9a66fc146c9 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -379,7 +379,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) prot = __acpi_get_writethrough_mem_attribute(); } } - return ioremap_prot(phys, size, pgprot_val(prot)); + return ioremap_prot(phys, size, prot); } /* diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index ead632ad01b4..096e45acadb2 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -71,8 +71,8 @@ config PTDUMP_STAGE2_DEBUGFS depends on KVM depends on DEBUG_KERNEL depends on DEBUG_FS - depends on GENERIC_PTDUMP - select PTDUMP_CORE + depends on ARCH_HAS_PTDUMP + select PTDUMP default n help Say Y here if you want to show the stage-2 kernel pagetables diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index fc92170a8f37..c26489cf96cd 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -5,7 +5,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o fixmap.o obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 55107d27d3f8..bcac4f55f9c1 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, * eliding the trailing DSB applies here. */ addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE, + __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, PAGE_SIZE, true, 3); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ccdef53872a0..4b966d5709d2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -309,8 +309,6 @@ void __init arm64_memblock_init(void) } early_init_fdt_scan_reserved_mem(); - - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; } void __init bootmem_init(void) @@ -359,12 +357,7 @@ void __init bootmem_init(void) memblock_dump_all(); } -/* - * mem_init() marks the free areas in the mem_map and tells us how much memory - * is free. This is done after various parts of the system have claimed their - * memory after the kernel image. - */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { unsigned int flags = SWIOTLB_VERBOSE; bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); @@ -388,9 +381,6 @@ void __init mem_init(void) swiotlb_init(swiotlb, flags); swiotlb_update_mem_attributes(); - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 6cc0b7e7eb03..10e246f11271 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -15,10 +15,9 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook) } void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t pgprot) { unsigned long last_addr = phys_addr + size - 1; - pgprot_t pgprot = __pgprot(prot); /* Don't allow outside PHYS_MASK */ if (last_addr & ~PHYS_MASK) diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index ed53f0b47388..536d3bf32ff1 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -36,7 +36,7 @@ */ #define ioremap_wc(addr, size) \ ioremap_prot((addr), (size), \ - (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) + __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)) #include <asm-generic/io.h> diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index fe715b707fd0..e0d6ca86ea8c 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -12,6 +12,45 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + static void __init csky_memblock_init(void) { unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); @@ -40,6 +79,10 @@ static void __init csky_memblock_init(void) max_low_pfn = min_low_pfn + sseg_size; } +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; mmu_init(min_low_pfn, max_low_pfn); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bde7cabd23df..573da66b2543 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -42,73 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - unsigned long size; - - if (initrd_start >= initrd_end) { - pr_err("initrd not found or empty"); - goto disable; - } - - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); - goto disable; - } - - size = initrd_end - initrd_start; - - if (memblock_is_region_reserved(__pa(initrd_start), size)) { - pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", - __pa(initrd_start), size); - goto disable; - } - - memblock_reserve(__pa(initrd_start), size); - - pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - - initrd_below_start_ok = 1; - - return; - -disable: - initrd_start = initrd_end = 0; - - pr_err(" - disabling initrd\n"); -} -#endif - -void __init mem_init(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); -#else - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); -#endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif - - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - /* FIXME not sure about */ - if (!memblock_is_reserved(tmp << PAGE_SHIFT)) - free_highmem_page(page); - } -#endif -} - void free_initmem(void) { free_initmem_default(-1); diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 3458f39ca2ac..34eb9d424b96 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -43,32 +43,6 @@ DEFINE_SPINLOCK(kmap_gen_lock); /* checkpatch says don't init this to 0. */ unsigned long long kmap_generation; -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Fixes up more stuff for HIGHMEM - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - /* No idea where this is actually declared. Seems to evade LXR. */ - memblock_free_all(); - - /* - * To-Do: someone somewhere should wipe out the bootmem map - * after we're done? - */ - - /* - * This can be moved to some more virtual-memory-specific - * initialization hook at some point. Set the init_mm - * descriptors "context" value to point to the initial - * kernel segment table's physical address. - */ - init_mm.context.ptbase = __pa(init_mm.pgd); -} - void sync_icache_dcache(pte_t pte) { unsigned long addr; @@ -104,10 +78,10 @@ static void __init paging_init(void) free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */ /* - * Start of high memory area. Will probably need something more - * fancy if we... get more fancy. + * Set the init_mm descriptors "context" value to point to the + * initial kernel segment table's physical address. */ - high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); + init_mm.context.ptbase = __pa(init_mm.pgd); } #ifndef DMA_RESERVE diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 3c240afe5aed..c9f564e1d4d9 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -109,8 +109,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y # CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index e77a56eaf906..eaff72b38dc8 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -23,9 +23,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); #ifdef CONFIG_ARCH_IOREMAP static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - switch (prot_val & _CACHE_MASK) { + switch (pgprot_val(prot) & _CACHE_MASK) { case _CACHE_CC: return (void __iomem *)(unsigned long)(CACHE_BASE + offset); case _CACHE_SUC: @@ -38,7 +38,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, } #define ioremap(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) + ioremap_prot((offset), (size), PAGE_KERNEL_SUC) #define iounmap(addr) ((void)(addr)) @@ -55,10 +55,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, */ #define ioremap_wc(offset, size) \ ioremap_prot((offset), (size), \ - pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) + wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC) #define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((offset), (size), PAGE_KERNEL) #define mmiowb() wmb() diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 84fe7f854820..30a72fd528c0 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -387,12 +387,6 @@ void __init paging_init(void) free_area_init(zones_size); } -void __init mem_init(void) -{ - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); -} - int pcibus_to_node(struct pci_bus *bus) { return dev_to_node(&bus->dev); diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index ca5aa5f46a9f..fdb7f73ad160 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -75,14 +75,6 @@ void __init paging_init(void) free_area_init(max_zone_pfns); } - -void __init mem_init(void) -{ - max_mapnr = max_low_pfn; - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - - memblock_free_all(); -} #endif /* !CONFIG_NUMA */ void __ref free_initmem(void) diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 8b11d0d545aa..488411af1b3f 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -121,7 +121,5 @@ static inline void init_pointer_tables(void) void __init mem_init(void) { - /* this will put all memory onto the freelists */ - memblock_free_all(); init_pointer_tables(); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 4520c5741579..65f0d1fb8a2a 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -52,19 +52,6 @@ static void __init highmem_init(void) map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); } - -static void __meminit highmem_setup(void) -{ - unsigned long pfn; - - for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { - struct page *page = pfn_to_page(pfn); - - /* FIXME not sure about */ - if (!memblock_is_reserved(pfn << PAGE_SHIFT)) - free_highmem_page(page); - } -} #endif /* CONFIG_HIGHMEM */ /* @@ -104,17 +91,13 @@ void __init setup_memory(void) * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn - * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ - /* RAM is assumed contiguous */ - max_mapnr = memory_size >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; - pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); @@ -124,14 +107,6 @@ void __init setup_memory(void) void __init mem_init(void) { - high_memory = (void *)__va(memory_start + lowmem_size - 1); - - /* this will put all memory onto the freelists */ - memblock_free_all(); -#ifdef CONFIG_HIGHMEM - highmem_setup(); -#endif - mem_init_done = 1; } diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 78c6573f91f2..980187a83053 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -115,7 +115,7 @@ static inline unsigned long isa_virt_to_bus(volatile void *address) } void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val); + pgprot_t prot); void iounmap(const volatile void __iomem *addr); /* @@ -130,7 +130,7 @@ void iounmap(const volatile void __iomem *addr); * address. */ #define ioremap(offset, size) \ - ioremap_prot((offset), (size), _CACHE_UNCACHED) + ioremap_prot((offset), (size), __pgprot(_CACHE_UNCACHED)) /* * ioremap_cache - map bus memory into CPU space @@ -148,7 +148,7 @@ void iounmap(const volatile void __iomem *addr); * memory-like regions on I/O busses. */ #define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), _page_cachable_default) + ioremap_prot((offset), (size), __pgprot(_page_cachable_default)) /* * ioremap_wc - map bus memory into CPU space @@ -169,7 +169,7 @@ void iounmap(const volatile void __iomem *addr); * _CACHE_UNCACHED option (see cpu_probe() method). */ #define ioremap_wc(offset, size) \ - ioremap_prot((offset), (size), boot_cpu_data.writecombine) + ioremap_prot((offset), (size), __pgprot(boot_cpu_data.writecombine)) #if defined(CONFIG_CPU_CAVIUM_OCTEON) #define war_io_reorder_wmb() wmb() diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index 14226ea42036..602a21aee9d4 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -20,6 +20,4 @@ #define nid_to_addrbase(nid) 0 #endif -extern void setup_zero_pages(void); - #endif /* _ASM_MMZONE_H_ */ diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 8388400d052f..95d5f553ce19 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -164,13 +164,6 @@ void __init paging_init(void) free_area_init(zones_size); } -void __init mem_init(void) -{ - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); - memblock_free_all(); - setup_zero_pages(); /* This comes from node 0 */ -} - /* All PCI device belongs to logical Node-0 */ int pcibus_to_node(struct pci_bus *bus) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4583d1a2a73e..a673d3d68254 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -59,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask); /* * Not static inline because used by IP27 special magic initialization code */ -void setup_zero_pages(void) +static void __init setup_zero_pages(void) { - unsigned int order, i; - struct page *page; + unsigned int order; if (cpu_has_vce) order = 3; else order = 0; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Oh boy, that early out of memory?"); - - page = virt_to_page((void *)empty_zero_page); - split_page(page, order); - for (i = 0; i < (1 << order); i++, page++) - mark_page_reserved(page); + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -423,17 +415,8 @@ void __init paging_init(void) " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; - - max_mapnr = max_low_pfn; - } else if (highend_pfn) { - max_mapnr = highend_pfn; - } else { - max_mapnr = max_low_pfn; } -#else - max_mapnr = max_low_pfn; #endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); free_area_init(max_zone_pfns); } @@ -442,26 +425,7 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -static inline void __init mem_init_free_highmem(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - if (cpu_has_dc_aliases) - return; - - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - if (!memblock_is_memory(PFN_PHYS(tmp))) - SetPageReserved(page); - else - free_highmem_page(page); - } -#endif -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE @@ -470,9 +434,7 @@ void __init mem_init(void) BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); maar_init(); - memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ - mem_init_free_highmem(); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -482,6 +444,11 @@ void __init mem_init(void) 0x80000000 - 4, KCORE_TEXT); #endif } +#else /* CONFIG_NUMA */ +void __init arch_mm_preinit(void) +{ + setup_zero_pages(); /* This comes from node 0 */ +} #endif /* !CONFIG_NUMA */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index d8243d61ef32..c6c4576cd4a8 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -44,9 +44,9 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * ioremap_prot gives the caller control over cache coherency attributes (CCA) */ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; unsigned long offset, pfn, last_pfn; struct vm_struct *area; phys_addr_t last_addr; diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c index 15e7820d6a5f..acc03ba20098 100644 --- a/arch/mips/mm/ioremap64.c +++ b/arch/mips/mm/ioremap64.c @@ -3,9 +3,9 @@ #include <ioremap.h> void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE); void __iomem *addr; diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 1963313f55d8..2b3e46e2e607 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -406,8 +406,6 @@ void __init prom_meminit(void) } } -extern void setup_zero_pages(void); - void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; @@ -416,10 +414,3 @@ void __init paging_init(void) zones_size[ZONE_NORMAL] = max_low_pfn; free_area_init(zones_size); } - -void __init mem_init(void) -{ - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); - memblock_free_all(); - setup_zero_pages(); /* This comes from node 0 */ -} diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index da122a5fa43b..2a40150142c3 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -149,6 +149,8 @@ void __init setup_arch(char **cmdline_p) memory_start = memblock_start_of_DRAM(); memory_end = memblock_end_of_DRAM(); + pr_debug("%s: start=%lx, end=%lx\n", __func__, memory_start, memory_end); + setup_initial_init_mm(_stext, _etext, _edata, _end); init_task.thread.kregs = &fake_regs; @@ -156,7 +158,6 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); - max_mapnr = max_low_pfn; memblock_reserve(__pa_symbol(_stext), _end - _stext); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index a2278485de19..94efa3de3933 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -51,7 +51,7 @@ void __init paging_init(void) pagetable_init(); pgd_current = swapper_pg_dir; - max_zone_pfn[ZONE_NORMAL] = max_mapnr; + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; /* pass the memory from the bootmem allocator to the main allocator */ free_area_init(max_zone_pfn); @@ -60,20 +60,6 @@ void __init paging_init(void) (unsigned long)empty_zero_page + PAGE_SIZE); } -void __init mem_init(void) -{ - unsigned long end_mem = memory_end; /* this must not include - kernel stack at top */ - - pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end); - - end_mem &= PAGE_MASK; - high_memory = __va(end_mem); - - /* this will put all memory onto the freelists */ - memblock_free_all(); -} - void __init mmu_init(void) { flush_tlb_all(); diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index d0cb1a0126f9..be1c2eb8bb94 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -193,15 +193,9 @@ void __init mem_init(void) { BUG_ON(!mem_map); - max_mapnr = max_low_pfn; - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); - /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - memblock_free_all(); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 325ae693c20e..f01ad3ad60b5 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -131,7 +131,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) _PAGE_ACCESSED | _PAGE_NO_CACHE) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), _PAGE_IOREMAP) + ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP)) #define pci_iounmap pci_iounmap diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 61c0a2477072..14270715d754 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -562,10 +562,6 @@ void __init mem_init(void) BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); #endif - high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(max_low_pfn); - memblock_free_all(); - #ifdef CONFIG_PA11 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index fd996472dfe7..0b65c4b3baee 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -14,7 +14,7 @@ #include <linux/mm.h> void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; @@ -41,6 +41,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, } } - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b5630f8ad436..b696fe3a325e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -148,6 +148,7 @@ config PPC select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API select ARCH_HAS_PREEMPT_LAZY + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 @@ -207,7 +208,6 @@ config PPC select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP if PCI - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_VDSO_DATA_STORE diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 89da51d724fb..9bc2758a6a9a 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -77,4 +77,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index f0bba9c5f9c3..bb786694dd26 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -94,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift) return mmu_psize; } +#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc + +static inline bool arch_has_huge_bootmem_alloc(void) +{ + return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()); +} #endif diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 492e8855e00f..7a89754842d6 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -826,7 +826,7 @@ void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_cache(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((addr), (size), PAGE_KERNEL) #define iounmap iounmap diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index a08b0ede4e64..68d47c53876c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -957,8 +957,6 @@ void __init setup_arch(char **cmdline_p) /* Parse memory topology */ mem_topology_setup(); - /* Set max_mapnr before paging_init() */ - set_max_mapnr(max_pfn); high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 0fe2f085c05a..8c1582b2987d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -15,5 +15,5 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump/ +obj-$(CONFIG_PTDUMP) += ptdump/ obj-$(CONFIG_KASAN) += kasan/ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6b043180220a..d3c1b749dcfc 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -113,6 +113,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages[0]); m->hstate = hstate; + m->flags = 0; return 1; } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d96bbc001e73..b6f3ae03ca9e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -41,6 +41,7 @@ #include <linux/libfdt.h> #include <linux/memremap.h> #include <linux/memory.h> +#include <linux/bootmem_info.h> #include <asm/pgalloc.h> #include <asm/page.h> @@ -386,10 +387,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, } #endif + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } +#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 70b08bf3dd1f..4b4feba9873b 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -34,9 +34,9 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) return __ioremap_caller(addr, size, prot, caller); } -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot) { - pte_t pte = __pte(flags); + pte_t pte = __pte(pgprot_val(prot)); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 34806c858e54..38e582252aed 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -273,7 +273,7 @@ void __init paging_init(void) mark_nonram_nosave(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * book3s is limited to 16 page sizes due to encoding this in @@ -295,22 +295,6 @@ void __init mem_init(void) kasan_late_init(); - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - { - unsigned long pfn, highmem_mapnr; - - highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; - for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { - phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) - free_highmem_page(page); - } - } -#endif /* CONFIG_HIGHMEM */ - #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 4a2520ec6d7f..61b37c9400b2 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu) static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), shadow_flags); + sizeof(struct spe_shadow), + pgprot_noncached_wc(PAGE_KERNEL_RO)); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 10116f68569d..e776fb35667e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -45,6 +45,7 @@ config RISCV select ARCH_HAS_PMEM_API select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREPARE_SYNC_CORE_CMD + select ARCH_HAS_PTDUMP if MMU select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU @@ -115,7 +116,6 @@ config RISCV select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PENDING_IRQ if SMP select GENERIC_PCI_IOMAP - select GENERIC_PTDUMP if MMU select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL if MMU && 64BIT diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 0257f4aa7ff4..a0e51840b9db 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -137,7 +137,7 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) #ifdef CONFIG_MMU #define arch_memremap_wb(addr, size, flags) \ - ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) + ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL))) #endif #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 72e559934952..ce0dd0fed764 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -60,8 +60,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, bool arch_tlbbatch_should_defer(struct mm_struct *mm); void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr); + struct mm_struct *mm, unsigned long start, unsigned long end); void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 2fd29695a788..3f6d5a6789e8 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -305,7 +305,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } } - return ioremap_prot(phys, size, pgprot_val(prot)); + return ioremap_prot(phys, size, prot); } #ifdef CONFIG_PCI diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index cbe4d775ef56..b916a68d324a 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -19,7 +19,7 @@ obj-y += context.o obj-y += pmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_KASAN) += kasan_init.o ifdef CONFIG_KASAN diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 15b2eda4c364..79b649f6de72 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -171,7 +171,7 @@ static void __init print_vm_layout(void) static void print_vm_layout(void) { } #endif /* CONFIG_DEBUG_VM */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit); #ifdef CONFIG_FLATMEM @@ -192,7 +192,6 @@ void __init mem_init(void) } swiotlb_init(swiotlb, SWIOTLB_VERBOSE); - memblock_free_all(); print_vm_layout(); } @@ -295,10 +294,8 @@ static void __init setup_bootmem(void) phys_ram_end = memblock_end_of_DRAM(); min_low_pfn = PFN_UP(phys_ram_base); max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); - high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); reserve_initrd_mem(); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 9b6e86ce3867..74dd9307fbf1 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -186,8 +186,7 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm) } void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) + struct mm_struct *mm, unsigned long start, unsigned long end) { cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 6412e39a795d..c809c486d136 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -92,6 +92,7 @@ config S390 select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_PREEMPT_LAZY + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME select ARCH_HAS_SET_DIRECT_MAP @@ -159,7 +160,6 @@ config S390 select GENERIC_CPU_VULNERABILITIES select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_VDSO_DATA_STORE diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 8b825e3578d8..6f2c9ce1b154 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -92,7 +92,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y CONFIG_SLUB_STATS=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 8392f8a5ad6d..f18a7d97ac21 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -86,7 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 251e0372ccbd..faddb9aef3b8 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -33,7 +33,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) + ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL)) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index f6c2db7a8669..9726b91fe7e4 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -9,6 +9,6 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o +obj-$(CONFIG_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PGSTE) += gmap.o obj-$(CONFIG_PFAULT) += pfault.o diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f4ac69506608..afa085e8186c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -74,8 +74,6 @@ static void __init setup_zero_pages(void) { unsigned long total_pages = memblock_estimated_nr_free_pages(); unsigned int order; - struct page *page; - int i; /* Latest machines require a mapping granularity of 512KB */ order = 7; @@ -84,16 +82,7 @@ static void __init setup_zero_pages(void) while (order > 2 && (total_pages >> 10) < (1UL << order)) order--; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Out of memory in setup_zero_pages"); - - page = virt_to_page((void *) empty_zero_page); - split_page(page, order); - for (i = 1 << order; i > 0; i--) { - mark_page_reserved(page); - page++; - } + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -166,18 +155,13 @@ static void pv_init(void) swiotlb_update_mem_attributes(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); - set_max_mapnr(max_low_pfn); - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - pv_init(); - /* this will put all low memory onto the freelists */ - memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ } @@ -239,16 +223,13 @@ struct s390_cma_mem_data { static int s390_cma_check_range(struct cma *cma, void *data) { struct s390_cma_mem_data *mem_data; - unsigned long start, end; mem_data = data; - start = cma_get_base(cma); - end = start + cma_get_size(cma); - if (end < mem_data->start) - return 0; - if (start >= mem_data->end) - return 0; - return -EBUSY; + + if (cma_intersects(cma, mem_data->start, mem_data->end)) + return -EBUSY; + + return 0; } static int s390_cma_mem_notifier(struct notifier_block *nb, diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 054240c6798f..5bbdc4190b8b 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -255,7 +255,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { /* * When PCI MIO instructions are unavailable the "physical" address @@ -265,7 +265,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, if (!static_branch_unlikely(&have_mio)) return (void __iomem *)phys_addr; - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 2c44b94f82fb..1b3f43c3ac46 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -58,7 +58,7 @@ static int __init landisk_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c index 20d01b430f2a..e95bde207adb 100644 --- a/arch/sh/boards/mach-lboxre2/setup.c +++ b/arch/sh/boards/mach-lboxre2/setup.c @@ -53,7 +53,7 @@ static int __init lboxre2_devices_setup(void) paddrbase = virt_to_phys((void*)PA_AREA5_IO); psize = PAGE_SIZE; prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot)); + cf0_io_base = (u32)ioremap_prot(paddrbase, psize, prot); if (!cf0_io_base) { printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); return -ENOMEM; diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index 3901b6031ad5..5c9312f334d3 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -75,7 +75,7 @@ static int __init sh03_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 0f663ebec700..6d282b253815 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -279,7 +279,7 @@ unsigned long long poke_real_address_q(unsigned long long addr, #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) #define ioremap_cache(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((addr), (size), PAGE_KERNEL) #endif /* CONFIG_MMU */ #include <asm-generic/io.h> diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 289a2fecebef..99e302eeeec1 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -290,7 +290,6 @@ void __init paging_init(void) */ max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; - set_max_mapnr(max_low_pfn - min_low_pfn); nodes_clear(node_online_map); @@ -331,15 +330,6 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - pg_data_t *pgdat; - - high_memory = NULL; - for_each_online_pgdat(pgdat) - high_memory = max_t(void *, high_memory, - __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - - memblock_free_all(); - /* Set this up early, so we can take care of the zero page */ cpu_cache_init(); diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 33d20f34560f..5bbde53fb32d 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -73,10 +73,9 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) #endif /* CONFIG_29BIT */ void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t pgprot) { void __iomem *mapped; - pgprot_t pgprot = __pgprot(prot); mapped = __ioremap_trapped(phys_addr, size); if (mapped) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2b7f358762c1..dc28f2c4eee3 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -936,7 +936,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - arch_enter_lazy_mmu_mode(); for (;;) { __set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) @@ -945,7 +944,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_val(pte) += PAGE_SIZE; addr += PAGE_SIZE; } - arch_leave_lazy_mmu_mode(); } #define set_ptes set_ptes diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d96a14ffceeb..fdc93dd12c3e 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,19 +232,7 @@ static void __init taint_real_pages(void) } } -static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long tmp; - -#ifdef CONFIG_DEBUG_HIGHMEM - printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); -#endif - - for (tmp = start_pfn; tmp < end_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { int i; @@ -274,23 +262,6 @@ void __init mem_init(void) memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); - - max_mapnr = last_valid_pfn - pfn_base; - high_memory = __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); - - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - - if (end_pfn <= highstart_pfn) - continue; - - if (start_pfn < highstart_pfn) - start_pfn = highstart_pfn; - - map_high_region(start_pfn, end_pfn); - } } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 05882bca5b73..760818950464 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2505,10 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - high_memory = __va(last_valid_pfn << PAGE_SHIFT); - - memblock_free_all(); - /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 8648a50afe88..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, diff --git a/arch/um/include/shared/mem_user.h b/arch/um/include/shared/mem_user.h index adfa08062f88..d4727efcf23d 100644 --- a/arch/um/include/shared/mem_user.h +++ b/arch/um/include/shared/mem_user.h @@ -47,7 +47,6 @@ extern int iomem_size; #define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1)) extern unsigned long find_iomem(char *driver, unsigned long *len_out); -extern void mem_total_pages(unsigned long physmem, unsigned long iomem); extern void setup_physmem(unsigned long start, unsigned long usable, unsigned long len); extern void map_memory(unsigned long virt, unsigned long phys, diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index befed230aac2..379f33a1babf 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,7 @@ int kmalloc_ok = 0; /* Used during early boot */ static unsigned long brk_end; -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); @@ -66,10 +66,11 @@ void __init mem_init(void) map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); memblock_free((void *)brk_end, uml_reserved - brk_end); uml_reserved = brk_end; - - /* this will put all low memory onto the freelists */ - memblock_free_all(); max_pfn = max_low_pfn; +} + +void __init mem_init(void) +{ kmalloc_ok = 1; } diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index a74f17b033c4..af02b5f9911d 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -22,18 +22,6 @@ static int physmem_fd = -1; unsigned long high_physmem; EXPORT_SYMBOL(high_physmem); -void __init mem_total_pages(unsigned long physmem, unsigned long iomem) -{ - unsigned long phys_pages, iomem_pages, total_pages; - - phys_pages = physmem >> PAGE_SHIFT; - iomem_pages = iomem >> PAGE_SHIFT; - - total_pages = phys_pages + iomem_pages; - - max_mapnr = total_pages; -} - void map_memory(unsigned long virt, unsigned long phys, unsigned long len, int r, int w, int x) { diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 8be91974e786..050279814eac 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -385,7 +385,6 @@ int __init linux_main(int argc, char **argv, char **envp) high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; - high_memory = (void *) end_iomem; start_vm = VMALLOC_START; @@ -419,7 +418,6 @@ void __init setup_arch(char **cmdline_p) stack_protections((unsigned long) init_task.stack); setup_physmem(uml_physmem, uml_reserved, physmem_size); - mem_total_pages(physmem_size, iomem_size); uml_dtb_init(); read_initrd(); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15f346f02af0..9395ec37bb64 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,6 +26,7 @@ config X86_64 depends on 64BIT # Options that are inherently 64-bit kernel only: select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_PTDUMP select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE @@ -148,6 +149,7 @@ config X86 select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 + select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64 select ARCH_WANTS_THP_SWAP if X86_64 select ARCH_HAS_PARANOID_L1D_FLUSH select BUILDTIME_TABLE_SORT @@ -176,7 +178,6 @@ config X86 select GENERIC_IRQ_RESERVATION_MODE select GENERIC_IRQ_SHOW select GENERIC_PENDING_IRQ if SMP - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_GETTIMEOFDAY diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 1eb4d23cdaae..c95c3aaadf97 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -59,7 +59,7 @@ config EARLY_PRINTK_USB_XDBC config EFI_PGT_DUMP bool "Dump the EFI pagetable" depends on EFI - select PTDUMP_CORE + select PTDUMP help Enable this if you want to dump the EFI page table before enabling virtual mode. This can be used to debug miscellaneous diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 731ee7cc40a5..585bdadba47d 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -69,9 +69,6 @@ extern unsigned long highstart_pfn, highend_pfn; arch_flush_lazy_mmu_mode(); \ } while (0) -extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn); - #endif /* __KERNEL__ */ #endif /* _ASM_X86_HIGHMEM_H */ diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 1a0dc2b2bf5b..e889c3bab5a2 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -170,7 +170,7 @@ extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); #define ioremap_cache ioremap_cache -extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot); #define ioremap_prot ioremap_prot extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size); #define ioremap_encrypted ioremap_encrypted diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 5469d7a7c40f..53ba39ce010c 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -41,10 +41,6 @@ static inline int numa_cpu_node(int cpu) } #endif /* CONFIG_NUMA */ -#ifdef CONFIG_X86_32 -# include <asm/numa_32.h> -#endif - #ifdef CONFIG_NUMA extern void numa_set_node(int cpu, int node); extern void numa_clear_node(int cpu); diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h deleted file mode 100644 index 9c8e9e85be77..000000000000 --- a/arch/x86/include/asm/numa_32.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_NUMA_32_H -#define _ASM_X86_NUMA_32_H - -#ifdef CONFIG_HIGHMEM -extern void set_highmem_pages_init(void); -#else -static inline void set_highmem_pages_init(void) -{ -} -#endif - -#endif /* _ASM_X86_NUMA_32_H */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 105db2d33c7b..5fe314a2e73e 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -63,10 +63,14 @@ unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ \ tcp_ptr__ += (__force unsigned long)(_ptr); \ - (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__; \ }) #else -#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; }) +#define arch_raw_cpu_ptr(_ptr) \ +({ \ + BUILD_BUG(); \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0; \ +}) #endif #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel @@ -81,9 +85,18 @@ #endif /* CONFIG_SMP */ -#define __my_cpu_type(var) typeof(var) __percpu_seg_override -#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) -#define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +#if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL) +# define __my_cpu_type(var) typeof(var) +# define __my_cpu_ptr(ptr) (ptr) +# define __my_cpu_var(var) (var) + +# define __percpu_qual __percpu_seg_override +#else +# define __my_cpu_type(var) typeof(var) __percpu_seg_override +# define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) +# define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +#endif + #define __percpu_arg(x) __percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x @@ -150,7 +163,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -191,7 +204,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -212,7 +225,7 @@ do { \ (val) == (typeof(val))-1)) ? (int)(val) : 0; \ \ if (0) { \ - typeof(var) pao_tmp__; \ + TYPEOF_UNQUAL(var) pao_tmp__; \ pao_tmp__ = (val); \ (void)pao_tmp__; \ } \ @@ -245,7 +258,7 @@ do { \ */ #define raw_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = raw_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var); \ \ raw_cpu_write(_var, _nval); \ \ @@ -259,7 +272,7 @@ do { \ */ #define this_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = this_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var); \ \ do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \ \ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index a9af8759de34..e9b81876ebe4 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -348,8 +348,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) } static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) + struct mm_struct *mm, unsigned long start, unsigned long end) { inc_mm_tlb_gen(mm); cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index a9088250770f..bd0fc69a10a7 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -72,18 +72,10 @@ enum xen_lazy_mode { }; DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode); -DECLARE_PER_CPU(unsigned int, xen_lazy_nesting); static inline void enter_lazy(enum xen_lazy_mode mode) { - enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode); - - if (mode == old_mode) { - this_cpu_inc(xen_lazy_nesting); - return; - } - - BUG_ON(old_mode != XEN_LAZY_NONE); + BUG_ON(this_cpu_read(xen_lazy_mode) != XEN_LAZY_NONE); this_cpu_write(xen_lazy_mode, mode); } @@ -92,10 +84,7 @@ static inline void leave_lazy(enum xen_lazy_mode mode) { BUG_ON(this_cpu_read(xen_lazy_mode) != mode); - if (this_cpu_read(xen_lazy_nesting) == 0) - this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE); - else - this_cpu_dec(xen_lazy_nesting); + this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE); } enum xen_lazy_mode xen_get_lazy_mode(void); diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index dac4d64dfb2a..2235a7477436 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) copy_user = is_copy_from_user(regs); instrumentation_end(); - switch (fixup_type) { - case EX_TYPE_UACCESS: - if (!copy_user) - return IN_KERNEL; - m->kflags |= MCE_IN_KERNEL_COPYIN; - fallthrough; + if (copy_user) { + m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; + return IN_KERNEL_RECOV; + } + switch (fixup_type) { case EX_TYPE_FAULT_MCE_SAFE: case EX_TYPE_DEFAULT_MCE_SAFE: m->kflags |= MCE_IN_KERNEL_RECOV; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 9c9faa1634fb..102641fd2172 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -655,7 +655,7 @@ void kgdb_arch_late(void) if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); - if (IS_ERR((void * __force)breakinfo[i].pev)) { + if (IS_ERR_PCPU(breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c7164a8de983..a4f818fbfb57 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = e820__end_of_low_ram_pfn(); else max_low_pfn = max_pfn; - - high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; #endif /* Find and reserve MPTABLE area */ @@ -1166,8 +1164,10 @@ void __init setup_arch(char **cmdline_p) initmem_init(); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); - if (boot_cpu_has(X86_FEATURE_GBPAGES)) + if (boot_cpu_has(X86_FEATURE_GBPAGES)) { hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); + hugetlb_bootmem_alloc(); + } /* * Reserve memory for crash kernel after SRAT is parsed so that it diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 690fbf48e853..32035d5be5a0 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -39,11 +39,9 @@ CFLAGS_fault.o := -I $(src)/../include/asm/trace obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o +obj-$(CONFIG_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o -obj-$(CONFIG_HIGHMEM) += highmem_32.o - KASAN_SANITIZE_kasan_init_$(BITS).o := n obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c deleted file mode 100644 index d9efa35711ee..000000000000 --- a/arch/x86/mm/highmem_32.c +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include <linux/highmem.h> -#include <linux/export.h> -#include <linux/swap.h> /* for totalram_pages */ -#include <linux/memblock.h> -#include <asm/numa.h> - -void __init set_highmem_pages_init(void) -{ - struct zone *zone; - int nid; - - /* - * Explicitly reset zone->managed_pages because set_highmem_pages_init() - * is invoked before memblock_free_all() - */ - reset_all_zones_managed_pages(); - for_each_zone(zone) { - unsigned long zone_start_pfn, zone_end_pfn; - - if (!is_highmem(zone)) - continue; - - zone_start_pfn = zone->zone_start_pfn; - zone_end_pfn = zone_start_pfn + zone->spanned_pages; - - nid = zone_to_nid(zone); - printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", - zone->name, nid, zone_start_pfn, zone_end_pfn); - - add_highpages_with_active_regions(nid, zone_start_pfn, - zone_end_pfn); - } -} diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f288aad8dc74..ad662cc4605c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -394,23 +394,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) pkmap_page_table = virt_to_kpte(vaddr); } - -void __init add_highpages_with_active_regions(int nid, - unsigned long start_pfn, unsigned long end_pfn) -{ - phys_addr_t start, end; - u64 i; - - for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { - unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), - start_pfn, end_pfn); - unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), - start_pfn, end_pfn); - for ( ; pfn < e_pfn; pfn++) - if (pfn_valid(pfn)) - free_highmem_page(pfn_to_page(pfn)); - } -} #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { @@ -645,9 +628,6 @@ void __init initmem_init(void) memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); -#ifdef CONFIG_FLATMEM - max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; -#endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", @@ -709,27 +689,17 @@ static void __init test_wp_bit(void) panic("Linux doesn't support CPUs with broken WP."); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif - /* - * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to - * be done before memblock_free_all(). Memblock use free low memory for - * temporary data (see find_range_array()) and for this purpose can use - * pages that was already passed to the buddy allocator, hence marked as - * not accessible in the page tables when compiled with - * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not - * important here. - */ - set_highmem_pages_init(); - - /* this will put all low memory onto the freelists */ - memblock_free_all(); +} +void __init mem_init(void) +{ after_bootmem = 1; x86_init.hyper.init_after_bootmem(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 519aa53114fa..36a017b4a30d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1340,14 +1340,15 @@ failed: panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { pci_iommu_alloc(); +} +void __init mem_init(void) +{ /* clear_bss() already clear the empty_zero_page */ - /* this will put all memory onto the freelists */ - memblock_free_all(); after_bootmem = 1; x86_init.hyper.init_after_bootmem(); @@ -1591,11 +1592,14 @@ void register_page_bootmem_memmap(unsigned long section_nr, } get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); - if (!boot_cpu_has(X86_FEATURE_PSE)) { + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + continue; + } + + if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) { next = (addr + PAGE_SIZE) & PAGE_MASK; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - continue; get_page_bootmem(section_nr, pmd_page(*pmd), MIX_SECTION_INFO); @@ -1606,12 +1610,7 @@ void register_page_bootmem_memmap(unsigned long section_nr, SECTION_INFO); } else { next = pmd_addr_end(addr, end); - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - continue; - - nr_pmd_pages = 1 << get_order(PMD_SIZE); + nr_pmd_pages = (next - addr) >> PAGE_SHIFT; page = pmd_page(*pmd); while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 42c90b420773..331e101bf801 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -440,10 +440,10 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) EXPORT_SYMBOL(ioremap_cache); void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { return __ioremap_caller(phys_addr, size, - pgprot2cachemode(__pgprot(prot_val)), + pgprot2cachemode(prot), __builtin_return_address(0), false); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index dcc2041f8e61..846b5737d320 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -138,7 +138,6 @@ struct tls_descs { }; DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE; -DEFINE_PER_CPU(unsigned int, xen_lazy_nesting); enum xen_lazy_mode xen_get_lazy_mode(void) { diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 934e58399c8c..7cdcc2deab3e 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -29,7 +29,7 @@ * I/O memory mapping functions. */ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot); + pgprot_t prot); #define ioremap_prot ioremap_prot #define iounmap iounmap @@ -40,7 +40,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); else return ioremap_prot(offset, size, - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); } #define ioremap ioremap @@ -51,7 +51,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); else - return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); + return ioremap_prot(offset, size, PAGE_KERNEL); } #define ioremap_cache ioremap_cache diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index b2587a1a7c46..cc52733a0649 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -66,59 +66,8 @@ void __init bootmem_init(void) memblock_dump_all(); } - -void __init zones_init(void) +static void __init print_vm_layout(void) { - /* All pages are DMA-able, so we put them all in the DMA zone. */ - unsigned long max_zone_pfn[MAX_NR_ZONES] = { - [ZONE_NORMAL] = max_low_pfn, -#ifdef CONFIG_HIGHMEM - [ZONE_HIGHMEM] = max_pfn, -#endif - }; - free_area_init(max_zone_pfn); -} - -static void __init free_highpages(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn; - phys_addr_t range_start, range_end; - u64 i; - - /* set highmem page free */ - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, - &range_start, &range_end, NULL) { - unsigned long start = PFN_UP(range_start); - unsigned long end = PFN_DOWN(range_end); - - /* Ignore complete lowmem entries */ - if (end <= max_low) - continue; - - /* Truncate partial highmem entries */ - if (start < max_low) - start = max_low; - - for (; start < end; start++) - free_highmem_page(pfn_to_page(start)); - } -#endif -} - -/* - * Initialize memory pages. - */ - -void __init mem_init(void) -{ - free_highpages(); - - max_mapnr = max_pfn - ARCH_PFN_OFFSET; - high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); - - memblock_free_all(); - pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" @@ -167,6 +116,19 @@ void __init mem_init(void) (unsigned long)(__bss_stop - __bss_start) >> 10); } +void __init zones_init(void) +{ + /* All pages are DMA-able, so we put them all in the DMA zone. */ + unsigned long max_zone_pfn[MAX_NR_ZONES] = { + [ZONE_NORMAL] = max_low_pfn, +#ifdef CONFIG_HIGHMEM + [ZONE_HIGHMEM] = max_pfn, +#endif + }; + free_area_init(max_zone_pfn); + print_vm_layout(); +} + static void __init parse_memmap_one(char *p) { char *oldp; diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c index 8ca660b7ab49..26f238fa9d0d 100644 --- a/arch/xtensa/mm/ioremap.c +++ b/arch/xtensa/mm/ioremap.c @@ -11,12 +11,12 @@ #include <asm/io.h> void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { unsigned long pfn = __phys_to_pfn((phys_addr)); WARN_ON(pfn_valid(pfn)); - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 348c5dbbfa68..8f3a41d9bfaa 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -455,7 +455,7 @@ static ssize_t valid_zones_show(struct device *dev, struct memory_group *group = mem->group; struct zone *default_zone; int nid = mem->nid; - int len = 0; + int len; /* * Check the existing zone. Make sure that we do that only on the @@ -466,22 +466,18 @@ static ssize_t valid_zones_show(struct device *dev, * If !mem->zone, the memory block spans multiple zones and * cannot get offlined. */ - default_zone = mem->zone; - if (!default_zone) - return sysfs_emit(buf, "%s\n", "none"); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); - goto out; + return sysfs_emit(buf, "%s\n", + mem->zone ? mem->zone->name : "none"); } default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, start_pfn, nr_pages); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len = sysfs_emit(buf, "%s", default_zone->name); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, default_zone); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, default_zone); -out: len += sysfs_emit_at(buf, len, "\n"); return len; } @@ -822,18 +818,17 @@ static int add_memory_block(unsigned long block_id, unsigned long state, static int __init add_boot_memory_block(unsigned long base_section_nr) { - int section_count = 0; unsigned long nr; - for (nr = base_section_nr; nr < base_section_nr + sections_per_block; - nr++) - if (present_section_nr(nr)) - section_count++; + for_each_present_section_nr(base_section_nr, nr) { + if (nr >= (base_section_nr + sections_per_block)) + break; - if (section_count == 0) - return 0; - return add_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE, NULL, NULL); + return add_memory_block(memory_block_id(base_section_nr), + MEM_ONLINE, NULL, NULL); + } + + return 0; } static int add_hotplug_memory_block(unsigned long block_id, diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c index 1184c0036f44..22c8067536f3 100644 --- a/drivers/block/zram/backend_zstd.c +++ b/drivers/block/zram/backend_zstd.c @@ -24,19 +24,10 @@ struct zstd_params { /* * For C/D dictionaries we need to provide zstd with zstd_custom_mem, * which zstd uses internally to allocate/free memory when needed. - * - * This means that allocator.customAlloc() can be called from zcomp_compress() - * under local-lock (per-CPU compression stream), in which case we must use - * GFP_ATOMIC. - * - * Another complication here is that we can be configured as a swap device. */ static void *zstd_custom_alloc(void *opaque, size_t size) { - if (!preemptible()) - return kvzalloc(size, GFP_ATOMIC); - - return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN); + return kvzalloc(size, GFP_NOIO | __GFP_NOWARN); } static void zstd_custom_free(void *opaque, void *address) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index bb514403e305..d26a58c67e95 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -6,8 +6,7 @@ #include <linux/slab.h> #include <linux/wait.h> #include <linux/sched.h> -#include <linux/cpu.h> -#include <linux/crypto.h> +#include <linux/cpuhotplug.h> #include <linux/vmalloc.h> #include "zcomp.h" @@ -46,6 +45,7 @@ static const struct zcomp_ops *backends[] = { static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) { comp->ops->destroy_ctx(&zstrm->ctx); + vfree(zstrm->local_copy); vfree(zstrm->buffer); zstrm->buffer = NULL; } @@ -58,12 +58,13 @@ static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm) if (ret) return ret; + zstrm->local_copy = vzalloc(PAGE_SIZE); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ zstrm->buffer = vzalloc(2 * PAGE_SIZE); - if (!zstrm->buffer) { + if (!zstrm->buffer || !zstrm->local_copy) { zcomp_strm_free(comp, zstrm); return -ENOMEM; } @@ -109,13 +110,29 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - local_lock(&comp->stream->lock); - return this_cpu_ptr(comp->stream); + for (;;) { + struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream); + + /* + * Inspired by zswap + * + * stream is returned with ->mutex locked which prevents + * cpu_dead() from releasing this stream under us, however + * there is still a race window between raw_cpu_ptr() and + * mutex_lock(), during which we could have been migrated + * from a CPU that has already destroyed its stream. If + * so then unlock and re-try on the current CPU. + */ + mutex_lock(&zstrm->lock); + if (likely(zstrm->buffer)) + return zstrm; + mutex_unlock(&zstrm->lock); + } } -void zcomp_stream_put(struct zcomp *comp) +void zcomp_stream_put(struct zcomp_strm *zstrm) { - local_unlock(&comp->stream->lock); + mutex_unlock(&zstrm->lock); } int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, @@ -129,6 +146,7 @@ int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, }; int ret; + might_sleep(); ret = comp->ops->compress(comp->params, &zstrm->ctx, &req); if (!ret) *dst_len = req.dst_len; @@ -145,18 +163,16 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm, .dst_len = PAGE_SIZE, }; + might_sleep(); return comp->ops->decompress(comp->params, &zstrm->ctx, &req); } int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); - struct zcomp_strm *zstrm; + struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu); int ret; - zstrm = per_cpu_ptr(comp->stream, cpu); - local_lock_init(&zstrm->lock); - ret = zcomp_strm_init(comp, zstrm); if (ret) pr_err("Can't allocate a compression stream\n"); @@ -166,16 +182,17 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); - struct zcomp_strm *zstrm; + struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu); - zstrm = per_cpu_ptr(comp->stream, cpu); + mutex_lock(&zstrm->lock); zcomp_strm_free(comp, zstrm); + mutex_unlock(&zstrm->lock); return 0; } static int zcomp_init(struct zcomp *comp, struct zcomp_params *params) { - int ret; + int ret, cpu; comp->stream = alloc_percpu(struct zcomp_strm); if (!comp->stream) @@ -186,6 +203,9 @@ static int zcomp_init(struct zcomp *comp, struct zcomp_params *params) if (ret) goto cleanup; + for_each_possible_cpu(cpu) + mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock); + ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node); if (ret < 0) goto cleanup; diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index ad5762813842..25339ed1e07e 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -3,7 +3,7 @@ #ifndef _ZCOMP_H_ #define _ZCOMP_H_ -#include <linux/local_lock.h> +#include <linux/mutex.h> #define ZCOMP_PARAM_NO_LEVEL INT_MIN @@ -31,9 +31,11 @@ struct zcomp_ctx { }; struct zcomp_strm { - local_lock_t lock; + struct mutex lock; /* compression buffer */ void *buffer; + /* local copy of handle memory */ + void *local_copy; struct zcomp_ctx ctx; }; @@ -77,7 +79,7 @@ struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params); void zcomp_destroy(struct zcomp *comp); struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); -void zcomp_stream_put(struct zcomp *comp); +void zcomp_stream_put(struct zcomp_strm *zstrm); int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, const void *src, unsigned int *dst_len); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9f5020b077c5..fda7d8624889 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex); static int zram_major; static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; +#define ZRAM_MAX_ALGO_NAME_SZ 128 + /* Module params (documentation at end) */ static unsigned int num_devices = 1; /* @@ -58,19 +60,56 @@ static void zram_free_page(struct zram *zram, size_t index); static int zram_read_from_zspool(struct zram *zram, struct page *page, u32 index); -static int zram_slot_trylock(struct zram *zram, u32 index) +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map) + +static void zram_slot_lock_init(struct zram *zram, u32 index) +{ + static struct lock_class_key __key; + + lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock", + &__key, 0); +} + +/* + * entry locking rules: + * + * 1) Lock is exclusive + * + * 2) lock() function can sleep waiting for the lock + * + * 3) Lock owner can sleep + * + * 4) Use TRY lock variant when in atomic context + * - must check return value and handle locking failers + */ +static __must_check bool zram_slot_trylock(struct zram *zram, u32 index) { - return spin_trylock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) { + mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); + return true; + } + + return false; } static void zram_slot_lock(struct zram *zram, u32 index) { - spin_lock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_); + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); } static void zram_slot_unlock(struct zram *zram, u32 index) { - spin_unlock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_release(slot_dep_map(zram, index), _RET_IP_); + clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock); } static inline bool init_done(struct zram *zram) @@ -93,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) zram->table[index].handle = handle; } -/* flag operations require table entry bit_spin_lock() being held */ static bool zram_test_flag(struct zram *zram, u32 index, enum zram_pageflags flag) { @@ -257,15 +295,24 @@ static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl) kfree(ctl); } -static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, - struct zram_pp_slot *pps) +static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, + u32 index) { - u32 idx; + struct zram_pp_slot *pps; + u32 bid; + + pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN); + if (!pps) + return false; + + INIT_LIST_HEAD(&pps->entry); + pps->index = index; - idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; - list_add(&pps->entry, &ctl->pp_buckets[idx]); + bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; + list_add(&pps->entry, &ctl->pp_buckets[bid]); zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); + return true; } static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl) @@ -699,15 +746,8 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, unsigned long index, struct zram_pp_ctl *ctl) { - struct zram_pp_slot *pps = NULL; - for (; nr_pages != 0; index++, nr_pages--) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -727,14 +767,13 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } @@ -748,7 +787,7 @@ static ssize_t writeback_store(struct device *dev, unsigned long index = 0; struct bio bio; struct bio_vec bio_vec; - struct page *page; + struct page *page = NULL; ssize_t ret = len; int mode, err; unsigned long blk_idx = 0; @@ -890,8 +929,10 @@ next: if (blk_idx) free_block_bdev(zram, blk_idx); - __free_page(page); + release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -1065,27 +1106,6 @@ static void zram_debugfs_register(struct zram *zram) {}; static void zram_debugfs_unregister(struct zram *zram) {}; #endif -/* - * We switched to per-cpu streams and this attr is not needed anymore. - * However, we will keep it around for some time, because: - * a) we may revert per-cpu streams in the future - * b) it's visible to user space and we need to follow our 2 years - * retirement rule; but we already have a number of 'soon to be - * altered' attrs, so max_comp_streams need to wait for the next - * layoff cycle. - */ -static ssize_t max_comp_streams_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); -} - -static ssize_t max_comp_streams_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - return len; -} - static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) { /* Do not free statically defined compression algorithms */ @@ -1112,7 +1132,7 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) size_t sz; sz = strlen(buf); - if (sz >= CRYPTO_MAX_ALG_NAME) + if (sz >= ZRAM_MAX_ALGO_NAME_SZ) return -E2BIG; compressor = kstrdup(buf, GFP_KERNEL); @@ -1420,9 +1440,8 @@ static ssize_t debug_stat_show(struct device *dev, down_read(&zram->init_lock); ret = scnprintf(buf, PAGE_SIZE, - "version: %d\n%8llu %8llu\n", + "version: %d\n0 %8llu\n", version, - (u64)atomic64_read(&zram->stats.writestall), (u64)atomic64_read(&zram->stats.miss_free)); up_read(&zram->init_lock); @@ -1473,15 +1492,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) huge_class_size = zs_huge_class_size(zram->mem_pool); for (index = 0; index < num_pages; index++) - spin_lock_init(&zram->table[index].lock); + zram_slot_lock_init(zram, index); + return true; } -/* - * To protect concurrent access to the same index entry, - * caller should hold this table index entry's bit_spinlock to - * indicate this index entry is accessing. - */ static void zram_free_page(struct zram *zram, size_t index) { unsigned long handle; @@ -1548,11 +1563,11 @@ static int read_incompressible_page(struct zram *zram, struct page *page, void *src, *dst; handle = zram_get_handle(zram, index); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, NULL); dst = kmap_local_page(page); copy_page(dst, src); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_read_end(zram->mem_pool, handle, src); return 0; } @@ -1570,12 +1585,12 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index) prio = zram_get_priority(zram, index); zstrm = zcomp_stream_get(zram->comps[prio]); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy); dst = kmap_local_page(page); ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); - zcomp_stream_put(zram->comps[prio]); + zs_obj_read_end(zram->mem_pool, handle, src); + zcomp_stream_put(zstrm); return ret; } @@ -1670,7 +1685,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, u32 index) { unsigned long handle; - void *src, *dst; + void *src; /* * This function is called from preemptible context so we don't need @@ -1678,7 +1693,8 @@ static int write_incompressible_page(struct zram *zram, struct page *page, * like we do for compressible pages. */ handle = zs_malloc(zram->mem_pool, PAGE_SIZE, - GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); @@ -1687,11 +1703,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page, return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); src = kmap_local_page(page); - memcpy(dst, src, PAGE_SIZE); + zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE); kunmap_local(src); - zs_unmap_object(zram->mem_pool, handle); zram_slot_lock(zram, index); zram_set_flag(zram, index, ZRAM_HUGE); @@ -1710,11 +1724,11 @@ static int write_incompressible_page(struct zram *zram, struct page *page, static int zram_write_page(struct zram *zram, struct page *page, u32 index) { int ret = 0; - unsigned long handle = -ENOMEM; - unsigned int comp_len = 0; - void *dst, *mem; + unsigned long handle; + unsigned int comp_len; + void *mem; struct zcomp_strm *zstrm; - unsigned long element = 0; + unsigned long element; bool same_filled; /* First, free memory allocated to this slot (if any) */ @@ -1728,7 +1742,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) if (same_filled) return write_same_filled_page(zram, element, index); -compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); mem = kmap_local_page(page); ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, @@ -1736,59 +1749,32 @@ compress_again: kunmap_local(mem); if (unlikely(ret)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); pr_err("Compression failed! err=%d\n", ret); - zs_free(zram->mem_pool, handle); return ret; } if (comp_len >= huge_class_size) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); return write_incompressible_page(zram, page, index); } - /* - * handle allocation has 2 paths: - * a) fast path is executed with preemption disabled (for - * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, - * since we can't sleep; - * b) slow path enables preemption and attempts to allocate - * the page with __GFP_DIRECT_RECLAIM bit set. we have to - * put per-cpu compression stream and, thus, to re-do - * the compression once handle is allocated. - * - * if we have a 'non-null' handle here then we are coming - * from the slow path and handle has already been allocated. - */ - if (IS_ERR_VALUE(handle)) - handle = zs_malloc(zram->mem_pool, comp_len, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + handle = zs_malloc(zram->mem_pool, comp_len, + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); - atomic64_inc(&zram->stats.writestall); - handle = zs_malloc(zram->mem_pool, comp_len, - GFP_NOIO | __GFP_HIGHMEM | - __GFP_MOVABLE); - if (IS_ERR_VALUE(handle)) - return PTR_ERR((void *)handle); - - goto compress_again; + zcomp_stream_put(zstrm); + return PTR_ERR((void *)handle); } if (!zram_can_store_page(zram)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); zs_free(zram->mem_pool, handle); return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); - - memcpy(dst, zstrm->buffer, comp_len); - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len); + zcomp_stream_put(zstrm); zram_slot_lock(zram, index); zram_set_handle(zram, index, handle); @@ -1835,20 +1821,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, #define RECOMPRESS_IDLE (1 << 0) #define RECOMPRESS_HUGE (1 << 1) -static int scan_slots_for_recompress(struct zram *zram, u32 mode, +static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max, struct zram_pp_ctl *ctl) { unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct zram_pp_slot *pps = NULL; unsigned long index; for (index = 0; index < nr_pages; index++) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -1867,14 +1847,17 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + /* Already compressed with same of higher priority */ + if (zram_get_priority(zram, index) + 1 >= prio_max) + goto next; + + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } @@ -1896,9 +1879,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, unsigned int comp_len_new; unsigned int class_index_old; unsigned int class_index_new; - u32 num_recomps = 0; - void *src, *dst; - int ret; + void *src; + int ret = 0; handle_old = zram_get_handle(zram, index); if (!handle_old) @@ -1923,6 +1905,16 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, zram_clear_flag(zram, index, ZRAM_IDLE); class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); + + prio = max(prio, zram_get_priority(zram, index) + 1); + /* + * Recompression slots scan should not select slots that are + * already compressed with a higher priority algorithm, but + * just in case + */ + if (prio >= prio_max) + return 0; + /* * Iterate the secondary comp algorithms list (in order of priority) * and try to recompress the page. @@ -1931,14 +1923,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (!zram->comps[prio]) continue; - /* - * Skip if the object is already re-compressed with a higher - * priority algorithm (or same algorithm). - */ - if (prio <= zram_get_priority(zram, index)) - continue; - - num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); src = kmap_local_page(page); ret = zcomp_compress(zram->comps[prio], zstrm, @@ -1946,8 +1930,9 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, kunmap_local(src); if (ret) { - zcomp_stream_put(zram->comps[prio]); - return ret; + zcomp_stream_put(zstrm); + zstrm = NULL; + break; } class_index_new = zs_lookup_class_index(zram->mem_pool, @@ -1956,7 +1941,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, /* Continue until we make progress */ if (class_index_new >= class_index_old || (threshold && comp_len_new >= threshold)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); + zstrm = NULL; continue; } @@ -1965,14 +1951,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, } /* - * We did not try to recompress, e.g. when we have only one - * secondary algorithm and the page is already recompressed - * using that algorithm - */ - if (!zstrm) - return 0; - - /* * Decrement the limit (if set) on pages we can recompress, even * when current recompression was unsuccessful or did not compress * the page below the threshold, because we still spent resources @@ -1981,48 +1959,39 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (*num_recomp_pages) *num_recomp_pages -= 1; - if (class_index_new >= class_index_old) { + /* Compression error */ + if (ret) + return ret; + + if (!zstrm) { /* * Secondary algorithms failed to re-compress the page - * in a way that would save memory, mark the object as - * incompressible so that we will not try to compress - * it again. + * in a way that would save memory. * - * We need to make sure that all secondary algorithms have - * failed, so we test if the number of recompressions matches - * the number of active secondary algorithms. + * Mark the object incompressible if the max-priority + * algorithm couldn't re-compress it. */ - if (num_recomps == zram->num_active_comps - 1) - zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); + if (prio < zram->num_active_comps) + return 0; + zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); return 0; } - /* Successful recompression but above threshold */ - if (threshold && comp_len_new >= threshold) - return 0; - /* - * No direct reclaim (slow path) for handle allocation and no - * re-compression attempt (unlike in zram_write_bvec()) since - * we already have stored that object in zsmalloc. If we cannot - * alloc memory for recompressed object then we bail out and - * simply keep the old (existing) object in zsmalloc. + * We are holding per-CPU stream mutex and entry lock so better + * avoid direct reclaim. Allocation error is not fatal since + * we still have the old object in the mem_pool. */ handle_new = zs_malloc(zram->mem_pool, comp_len_new, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle_new)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); } - dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); - memcpy(dst, zstrm->buffer, comp_len_new); - zcomp_stream_put(zram->comps[prio]); - - zs_unmap_object(zram->mem_pool, handle_new); + zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new); + zcomp_stream_put(zstrm); zram_free_page(zram, index); zram_set_handle(zram, index, handle_new); @@ -2039,16 +2008,19 @@ static ssize_t recompress_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; struct zram *zram = dev_to_zram(dev); char *args, *param, *val, *algo = NULL; u64 num_recomp_pages = ULLONG_MAX; struct zram_pp_ctl *ctl = NULL; struct zram_pp_slot *pps; u32 mode = 0, threshold = 0; - struct page *page; + u32 prio, prio_max; + struct page *page = NULL; ssize_t ret; + prio = ZRAM_SECONDARY_COMP; + prio_max = zram->num_active_comps; + args = skip_spaces(buf); while (*args) { args = next_arg(args, ¶m, &val); @@ -2101,7 +2073,7 @@ static ssize_t recompress_store(struct device *dev, if (prio == ZRAM_PRIMARY_COMP) prio = ZRAM_SECONDARY_COMP; - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; continue; } } @@ -2129,7 +2101,7 @@ static ssize_t recompress_store(struct device *dev, continue; if (!strcmp(zram->comp_algs[prio], algo)) { - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; found = true; break; } @@ -2141,6 +2113,12 @@ static ssize_t recompress_store(struct device *dev, } } + prio_max = min(prio_max, (u32)zram->num_active_comps); + if (prio >= prio_max) { + ret = -EINVAL; + goto release_init_lock; + } + page = alloc_page(GFP_KERNEL); if (!page) { ret = -ENOMEM; @@ -2153,7 +2131,7 @@ static ssize_t recompress_store(struct device *dev, goto release_init_lock; } - scan_slots_for_recompress(zram, mode, ctl); + scan_slots_for_recompress(zram, mode, prio_max, ctl); ret = len; while ((pps = select_pp_slot(ctl))) { @@ -2181,9 +2159,9 @@ next: cond_resched(); } - __free_page(page); - release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -2506,7 +2484,6 @@ static DEVICE_ATTR_WO(reset); static DEVICE_ATTR_WO(mem_limit); static DEVICE_ATTR_WO(mem_used_max); static DEVICE_ATTR_WO(idle); -static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); #ifdef CONFIG_ZRAM_WRITEBACK static DEVICE_ATTR_RW(backing_dev); @@ -2528,7 +2505,6 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_mem_limit.attr, &dev_attr_mem_used_max.attr, &dev_attr_idle.attr, - &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, #ifdef CONFIG_ZRAM_WRITEBACK &dev_attr_backing_dev.attr, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index db78d7c01b9a..6cee93f9c0d0 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -17,7 +17,6 @@ #include <linux/rwsem.h> #include <linux/zsmalloc.h> -#include <linux/crypto.h> #include "zcomp.h" @@ -28,7 +27,6 @@ #define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) - /* * ZRAM is mainly used for memory efficiency so we want to keep memory * footprint small and thus squeeze size and zram pageflags into a flags @@ -46,6 +44,7 @@ /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */ + ZRAM_ENTRY_LOCK, /* entry access lock bit */ ZRAM_WB, /* page is stored on backing_device */ ZRAM_PP_SLOT, /* Selected for post-processing */ ZRAM_HUGE, /* Incompressible page */ @@ -58,16 +57,19 @@ enum zram_pageflags { __NR_ZRAM_PAGEFLAGS, }; -/*-- Data structures */ - -/* Allocated for each disk page */ +/* + * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit + * of flags) to save memory. There can be plenty of entries and standard + * locking primitives (e.g. mutex) will significantly increase sizeof() + * of each entry and hence of the meta table. + */ struct zram_table_entry { unsigned long handle; - unsigned int flags; - spinlock_t lock; + unsigned long flags; #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME ktime_t ac_time; #endif + struct lockdep_map dep_map; }; struct zram_stats { @@ -80,7 +82,6 @@ struct zram_stats { atomic64_t huge_pages_since; /* no. of huge pages since zram set up */ atomic64_t pages_stored; /* no. of pages currently stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */ - atomic64_t writestall; /* no. of write slow paths */ atomic64_t miss_free; /* no. of missed free */ #ifdef CONFIG_ZRAM_WRITEBACK atomic64_t bd_count; /* no. of pages in backing device */ diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6d74e62bbee0..328231cfb028 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -89,14 +89,13 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, ALIGN_DOWN(vmf->address, fault_size)); for (i = 0; i < nr_pages; i++) { - struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); + struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i); - page = compound_head(page); - if (page->mapping) + if (folio->mapping) continue; - page->mapping = filp->f_mapping; - page->index = pgoff + i; + folio->mapping = filp->f_mapping; + folio->index = pgoff + i; } } @@ -126,11 +125,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), + vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, @@ -169,11 +169,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -214,11 +215,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 2451c816edd5..38431e8360e7 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1416,13 +1416,13 @@ map_pages: goto err_unmap; } zdd = page->zone_device_data; - if (pagemap != page->pgmap) { + if (pagemap != page_pgmap(page)) { if (i > 0) { err = -EOPNOTSUPP; goto err_unmap; } - pagemap = page->pgmap; + pagemap = page_pgmap(page); dpagemap = zdd->devmem_allocation->dpagemap; if (drm_WARN_ON(gpusvm->drm, !dpagemap)) { /* diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1a072568cef6..61d0f411ef84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -88,7 +88,8 @@ struct nouveau_dmem { static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page) { - return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap); + return container_of(page_pgmap(page), struct nouveau_dmem_chunk, + pagemap); } static struct nouveau_drm *page_to_drm(struct page *page) diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 825c867eba7c..e12e2596ed84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -610,10 +610,9 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, notifier_seq = mmu_interval_read_begin(¬ifier->notifier); mmap_read_lock(mm); - ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE, - &page, drm->dev); + page = make_device_exclusive(mm, start, drm->dev, &folio); mmap_read_unlock(mm); - if (ret <= 0 || !page) { + if (IS_ERR(page)) { ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 516898e99b26..3e829c87d7b4 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -341,7 +341,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w) static struct xe_vram_region *page_to_vr(struct page *page) { - return container_of(page->pgmap, struct xe_vram_region, pagemap); + return container_of(page_pgmap(page), struct xe_vram_region, pagemap); } static struct xe_tile *vr_to_tile(struct xe_vram_region *vr) diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index fec2f18679e3..2b4080e51f97 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1192,6 +1192,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm, __ClearPageOffline(pg); __free_page(pg); dm->num_pages_ballooned--; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, -1); adjust_managed_page_count(pg, 1); } } @@ -1221,6 +1222,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, return i * alloc_unit; dm->num_pages_ballooned += alloc_unit; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, alloc_unit); /* * If we allocatted 2M pages; split them so we diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 43156e1576c9..aa50006b7616 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -513,7 +513,7 @@ static int pmem_attach_disk(struct device *dev, pmem->disk = disk; pmem->pgmap.owner = pmem; - pmem->pfn_flags = PFN_DEV; + pmem->pfn_flags = 0; if (is_nd_pfn(dev)) { pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; @@ -522,7 +522,6 @@ static int pmem_attach_disk(struct device *dev, pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->pfn_pad = resource_size(res) - range_len(&pmem->pgmap.range); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; bb_range.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { @@ -532,7 +531,6 @@ static int pmem_attach_disk(struct device *dev, pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; } else { addr = devm_memremap(dev, pmem->phys_addr, diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 0cb7e0aaba0e..19214ec81fbb 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj, rcu_read_unlock(); for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { - ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr)); + struct page *page = virt_to_page(kaddr); + + /* + * Initialise the refcount for the freshly allocated page. As + * we have just allocated the page no one else should be + * using it. + */ + VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page); + set_page_count(page, 1); + ret = vm_insert_page(vma, vaddr, page); if (ret) { gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); return ret; } percpu_ref_get(ref); - put_page(virt_to_page(kaddr)); + put_page(page); kaddr += PAGE_SIZE; len -= PAGE_SIZE; } @@ -193,7 +202,7 @@ static const struct attribute_group p2pmem_group = { static void p2pdma_page_free(struct page *page) { - struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap); + struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); /* safe to dereference while a reference is held to the percpu ref */ struct pci_p2pdma *p2pdma = rcu_dereference_protected(pgmap->provider->p2pdma, 1); @@ -1016,8 +1025,8 @@ enum pci_p2pdma_map_type pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, struct scatterlist *sg) { - if (state->pgmap != sg_page(sg)->pgmap) { - state->pgmap = sg_page(sg)->pgmap; + if (state->pgmap != page_pgmap(sg_page(sg))) { + state->pgmap = page_pgmap(sg_page(sg)); state->map = pci_p2pdma_map_type(state->pgmap, dev); state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset; } diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index e3710a762aba..4bfe469c04aa 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -4,13 +4,21 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m - select FS_DAX_LIMITED - select DAX prompt "DCSSBLK support" depends on S390 && BLOCK help Support for dcss block device +config DCSSBLK_DAX + def_bool y + depends on DCSSBLK + # requires S390 ZONE_DEVICE support + depends on BROKEN + select DAX + prompt "DCSSBLK DAX support" + help + Enable DAX operation for the dcss block device + config DASD def_tristate y prompt "Support for DASD devices" diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 0f14d279d30b..7248e547fefb 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -534,6 +534,21 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = { NULL, }; +static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) +{ + struct dax_device *dax_dev; + + if (!IS_ENABLED(CONFIG_DCSSBLK_DAX)) + return 0; + + dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); + if (IS_ERR(dax_dev)) + return PTR_ERR(dax_dev); + set_dax_synchronous(dax_dev); + dev_info->dax_dev = dax_dev; + return dax_add_host(dev_info->dax_dev, dev_info->gd); +} + /* * device attribute for adding devices */ @@ -547,7 +562,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; - struct dax_device *dax_dev; char *local_buf; unsigned long seg_byte_size; @@ -674,14 +688,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; - dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); - if (IS_ERR(dax_dev)) { - rc = PTR_ERR(dax_dev); - goto put_dev; - } - set_dax_synchronous(dax_dev); - dev_info->dax_dev = dax_dev; - rc = dax_add_host(dev_info->dax_dev, dev_info->gd); + rc = dcssblk_setup_dax(dev_info); if (rc) goto out_dax; @@ -917,7 +924,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, *kaddr = __va(dev_info->start + offset); if (pfn) *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV|PFN_SPECIAL); + PFN_DEV); return (dev_sz - offset) / PAGE_SIZE; } diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 65363df8e81b..4fc93f253e06 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -69,14 +69,6 @@ out: return pageref; } -static void fb_deferred_io_pageref_clear(struct fb_deferred_io_pageref *pageref) -{ - struct page *page = pageref->page; - - if (page) - page->mapping = NULL; -} - static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info, unsigned long offset, struct page *page) @@ -140,13 +132,10 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) if (!page) return VM_FAULT_SIGBUS; - if (vmf->vma->vm_file) - page->mapping = vmf->vma->vm_file->f_mapping; - else - printk(KERN_ERR "no mapping available\n"); + if (!vmf->vma->vm_file) + fb_err(info, "no mapping available\n"); - BUG_ON(!page->mapping); - page->index = vmf->pgoff; /* for folio_mkclean() */ + BUG_ON(!info->fbdefio->mapping); vmf->page = page; return 0; @@ -194,9 +183,9 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long /* * We want the page to remain locked from ->page_mkwrite until - * the PTE is marked dirty to avoid folio_mkclean() being called - * before the PTE is updated, which would leave the page ignored - * by defio. + * the PTE is marked dirty to avoid mapping_wrprotect_range() + * being called before the PTE is updated, which would leave + * the page ignored by defio. * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ @@ -274,15 +263,17 @@ static void fb_deferred_io_work(struct work_struct *work) struct fb_deferred_io_pageref *pageref, *next; struct fb_deferred_io *fbdefio = info->fbdefio; - /* here we mkclean the pages, then do all deferred IO */ + /* here we wrprotect the page's mappings, then do all deferred IO. */ mutex_lock(&fbdefio->lock); +#ifdef CONFIG_MMU list_for_each_entry(pageref, &fbdefio->pagereflist, list) { - struct folio *folio = page_folio(pageref->page); + struct page *page = pageref->page; + pgoff_t pgoff = pageref->offset >> PAGE_SHIFT; - folio_lock(folio); - folio_mkclean(folio); - folio_unlock(folio); + mapping_wrprotect_range(fbdefio->mapping, pgoff, + page_to_pfn(page), 1); } +#endif /* driver's callback with pagereflist */ fbdefio->deferred_io(info, &fbdefio->pagereflist); @@ -337,6 +328,7 @@ void fb_deferred_io_open(struct fb_info *info, { struct fb_deferred_io *fbdefio = info->fbdefio; + fbdefio->mapping = file->f_mapping; file->f_mapping->a_ops = &fb_deferred_io_aops; fbdefio->open_count++; } @@ -344,13 +336,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); static void fb_deferred_io_lastclose(struct fb_info *info) { - unsigned long i; - flush_delayed_work(&info->deferred_work); - - /* clear out the mapping that we setup */ - for (i = 0; i < info->npagerefs; ++i) - fb_deferred_io_pageref_clear(&info->pagerefs[i]); } void fb_deferred_io_release(struct fb_info *info) @@ -370,5 +356,6 @@ void fb_deferred_io_cleanup(struct fb_info *info) kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); + fbdefio->mapping = NULL; } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 163f7f1d70f1..65d4e7fa1eb8 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -157,6 +157,8 @@ static void balloon_append(struct page *page) list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } + inc_node_page_state(page, NR_BALLOON_PAGES); + wake_up(&balloon_wq); } @@ -179,6 +181,8 @@ static struct page *balloon_retrieve(bool require_lowmem) balloon_stats.balloon_low--; __ClearPageOffline(page); + dec_node_page_state(page, NR_BALLOON_PAGES); + return page; } diff --git a/fs/Kconfig b/fs/Kconfig index afe21866d6b4..c718b2e2de0e 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -286,6 +286,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP def_bool HUGETLB_PAGE depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP depends on SPARSEMEM_VMEMMAP + select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT config HUGETLB_PMD_PAGE_TABLE_SHARING def_bool HUGETLB_PAGE diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 1e94f89aabed..6ba5071ab6dd 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -622,7 +622,7 @@ do { \ #define per_cpu_sum(_p) \ ({ \ - typeof(*_p) _ret = 0; \ + TYPEOF_UNQUAL(*_p) _ret = 0; \ \ int cpu; \ for_each_possible_cpu(cpu) \ diff --git a/fs/buffer.c b/fs/buffer.c index 194eacbefc95..c7abb4a029dc 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2166,7 +2166,7 @@ int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, } EXPORT_SYMBOL(__block_write_begin); -static void __block_commit_write(struct folio *folio, size_t from, size_t to) +void block_commit_write(struct folio *folio, size_t from, size_t to) { size_t block_start, block_end; bool partial = false; @@ -2204,6 +2204,7 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to) if (!partial) folio_mark_uptodate(folio); } +EXPORT_SYMBOL(block_commit_write); /* * block_write_begin takes care of the basic task of block allocation and @@ -2262,7 +2263,7 @@ int block_write_end(struct file *file, struct address_space *mapping, flush_dcache_folio(folio); /* This could be a short (even 0-length) commit */ - __block_commit_write(folio, start, start + copied); + block_commit_write(folio, start, start + copied); return copied; } @@ -2566,13 +2567,6 @@ int cont_write_begin(struct file *file, struct address_space *mapping, } EXPORT_SYMBOL(cont_write_begin); -void block_commit_write(struct page *page, unsigned from, unsigned to) -{ - struct folio *folio = page_folio(page); - __block_commit_write(folio, from, to); -} -EXPORT_SYMBOL(block_commit_write); - /* * block_page_mkwrite() is not allowed to change the file size as it gets * called from a page fault handler when a page is first dirtied. Hence we must @@ -2618,7 +2612,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (unlikely(ret)) goto out_unlock; - __block_commit_write(folio, 0, end); + block_commit_write(folio, 0, end); folio_mark_dirty(folio); folio_wait_stable(folio); @@ -71,6 +71,11 @@ static unsigned long dax_to_pfn(void *entry) return xa_to_value(entry) >> DAX_SHIFT; } +static struct folio *dax_to_folio(void *entry) +{ + return page_folio(pfn_to_page(dax_to_pfn(entry))); +} + static void *dax_make_entry(pfn_t pfn, unsigned long flags) { return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); @@ -206,7 +211,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, * * Must be called with the i_pages lock held. */ -static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) +static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) { void *entry; struct wait_exceptional_entry_queue ewait; @@ -236,6 +241,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) } /* + * Wait for the given entry to become unlocked. Caller must hold the i_pages + * lock and call either put_unlocked_entry() if it did not lock the entry or + * dax_unlock_entry() if it did. Returns an unlocked entry if still present. + */ +static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) +{ + struct wait_exceptional_entry_queue ewait; + wait_queue_head_t *wq; + + init_wait(&ewait.wait); + ewait.wait.func = wake_exceptional_entry_func; + + while (unlikely(dax_is_locked(entry))) { + wq = dax_entry_waitqueue(xas, entry, &ewait.key); + prepare_to_wait_exclusive(wq, &ewait.wait, + TASK_UNINTERRUPTIBLE); + xas_pause(xas); + xas_unlock_irq(xas); + schedule(); + finish_wait(wq, &ewait.wait); + xas_lock_irq(xas); + entry = xas_load(xas); + } + + if (xa_is_internal(entry)) + return NULL; + + return entry; +} + +/* * The only thing keeping the address space around is the i_pages lock * (it's cycled in clear_inode() after removing the entries from i_pages) * After we call xas_unlock_irq(), we cannot touch xas->xa. @@ -250,7 +286,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) wq = dax_entry_waitqueue(xas, entry, &ewait.key); /* - * Unlike get_unlocked_entry() there is no guarantee that this + * Unlike get_next_unlocked_entry() there is no guarantee that this * path ever successfully retrieves an unlocked entry before an * inode dies. Perform a non-exclusive wait in case this path * never successfully performs its own wake up. @@ -307,109 +343,156 @@ static unsigned long dax_entry_size(void *entry) return PAGE_SIZE; } -static unsigned long dax_end_pfn(void *entry) +/* + * A DAX folio is considered shared if it has no mapping set and ->share (which + * shares the ->index field) is non-zero. Note this may return false even if the + * page is shared between multiple files but has not yet actually been mapped + * into multiple address spaces. + */ +static inline bool dax_folio_is_shared(struct folio *folio) { - return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; + return !folio->mapping && folio->share; } /* - * Iterate through all mapped pfns represented by an entry, i.e. skip - * 'empty' and 'zero' entries. + * When it is called by dax_insert_entry(), the shared flag will indicate + * whether this entry is shared by multiple files. If the page has not + * previously been associated with any mappings the ->mapping and ->index + * fields will be set. If it has already been associated with a mapping + * the mapping will be cleared and the share count set. It's then up to + * reverse map users like memory_failure() to call back into the filesystem to + * recover ->mapping and ->index information. For example by implementing + * dax_holder_operations. */ -#define for_each_mapped_pfn(entry, pfn) \ - for (pfn = dax_to_pfn(entry); \ - pfn < dax_end_pfn(entry); pfn++) - -static inline bool dax_page_is_shared(struct page *page) +static void dax_folio_make_shared(struct folio *folio) { - return page->mapping == PAGE_MAPPING_DAX_SHARED; + /* + * folio is not currently shared so mark it as shared by clearing + * folio->mapping. + */ + folio->mapping = NULL; + + /* + * folio has previously been mapped into one address space so set the + * share count. + */ + folio->share = 1; } -/* - * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the - * refcount. - */ -static inline void dax_page_share_get(struct page *page) +static inline unsigned long dax_folio_put(struct folio *folio) { - if (page->mapping != PAGE_MAPPING_DAX_SHARED) { + unsigned long ref; + int order, i; + + if (!dax_folio_is_shared(folio)) + ref = 0; + else + ref = --folio->share; + + if (ref) + return ref; + + folio->mapping = NULL; + order = folio_order(folio); + if (!order) + return 0; + + for (i = 0; i < (1UL << order); i++) { + struct dev_pagemap *pgmap = page_pgmap(&folio->page); + struct page *page = folio_page(folio, i); + struct folio *new_folio = (struct folio *)page; + + ClearPageHead(page); + clear_compound_head(page); + + new_folio->mapping = NULL; /* - * Reset the index if the page was already mapped - * regularly before. + * Reset pgmap which was over-written by + * prep_compound_page(). */ - if (page->mapping) - page->share = 1; - page->mapping = PAGE_MAPPING_DAX_SHARED; + new_folio->pgmap = pgmap; + new_folio->share = 0; + WARN_ON_ONCE(folio_ref_count(new_folio)); } - page->share++; + + return ref; } -static inline unsigned long dax_page_share_put(struct page *page) +static void dax_folio_init(void *entry) { - return --page->share; + struct folio *folio = dax_to_folio(entry); + int order = dax_entry_order(entry); + + /* + * Folio should have been split back to order-0 pages in + * dax_folio_put() when they were removed from their + * final mapping. + */ + WARN_ON_ONCE(folio_order(folio)); + + if (order > 0) { + prep_compound_page(&folio->page, order); + if (order > 1) + INIT_LIST_HEAD(&folio->_deferred_list); + WARN_ON_ONCE(folio_ref_count(folio)); + } } -/* - * When it is called in dax_insert_entry(), the shared flag will indicate that - * whether this entry is shared by multiple files. If so, set the page->mapping - * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. - */ static void dax_associate_entry(void *entry, struct address_space *mapping, - struct vm_area_struct *vma, unsigned long address, bool shared) + struct vm_area_struct *vma, + unsigned long address, bool shared) { - unsigned long size = dax_entry_size(entry), pfn, index; - int i = 0; + unsigned long size = dax_entry_size(entry), index; + struct folio *folio = dax_to_folio(entry); + + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return; if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; index = linear_page_index(vma, address & ~(size - 1)); - for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + if (shared && (folio->mapping || dax_folio_is_shared(folio))) { + if (folio->mapping) + dax_folio_make_shared(folio); - if (shared) { - dax_page_share_get(page); - } else { - WARN_ON_ONCE(page->mapping); - page->mapping = mapping; - page->index = index + i++; - } + WARN_ON_ONCE(!folio->share); + WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); + folio->share++; + } else { + WARN_ON_ONCE(folio->mapping); + dax_folio_init(entry); + folio = dax_to_folio(entry); + folio->mapping = mapping; + folio->index = index; } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, - bool trunc) + bool trunc) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; - for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); - - WARN_ON_ONCE(trunc && page_ref_count(page) > 1); - if (dax_page_is_shared(page)) { - /* keep the shared flag if this page is still shared */ - if (dax_page_share_put(page) > 0) - continue; - } else - WARN_ON_ONCE(page->mapping && page->mapping != mapping); - page->mapping = NULL; - page->index = 0; - } + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return; + + dax_folio_put(folio); } static struct page *dax_busy_page(void *entry) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); - for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return NULL; - if (page_ref_count(page) > 1) - return page; - } - return NULL; + if (folio_ref_count(folio) - folio_mapcount(folio)) + return &folio->page; + else + return NULL; } /** @@ -580,7 +663,7 @@ static void *grab_mapping_entry(struct xa_state *xas, retry: pmd_downgrade = false; xas_lock_irq(xas); - entry = get_unlocked_entry(xas, order); + entry = get_next_unlocked_entry(xas, order); if (entry) { if (dax_is_conflict(entry)) @@ -690,7 +773,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return NULL; - if (!dax_mapping(mapping) || !mapping_mapped(mapping)) + if (!dax_mapping(mapping)) return NULL; /* If end == LLONG_MAX, all pages from start to till end of file */ @@ -716,8 +799,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, xas_for_each(&xas, entry, end_idx) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; - if (unlikely(dax_is_locked(entry))) - entry = get_unlocked_entry(&xas, 0); + entry = wait_entry_unlocked_exclusive(&xas, entry); if (entry) page = dax_busy_page(entry); put_unlocked_entry(&xas, entry, WAKE_NEXT); @@ -743,14 +825,14 @@ struct page *dax_layout_busy_page(struct address_space *mapping) EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, - pgoff_t index, bool trunc) + pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; void *entry; xas_lock_irq(&xas); - entry = get_unlocked_entry(&xas, 0); + entry = get_next_unlocked_entry(&xas, 0); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && @@ -776,7 +858,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping, xas_lock_irq(&xas); xas_for_each(&xas, entry, end) { - entry = get_unlocked_entry(&xas, 0); + entry = wait_entry_unlocked_exclusive(&xas, entry); + if (!entry) + continue; xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); put_unlocked_entry(&xas, entry, WAKE_NEXT); @@ -813,6 +897,107 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) return ret; } +void dax_delete_mapping_range(struct address_space *mapping, + loff_t start, loff_t end) +{ + void *entry; + pgoff_t start_idx = start >> PAGE_SHIFT; + pgoff_t end_idx; + XA_STATE(xas, &mapping->i_pages, start_idx); + + /* If end == LLONG_MAX, all pages from start to till end of file */ + if (end == LLONG_MAX) + end_idx = ULONG_MAX; + else + end_idx = end >> PAGE_SHIFT; + + xas_lock_irq(&xas); + xas_for_each(&xas, entry, end_idx) { + if (!xa_is_value(entry)) + continue; + entry = wait_entry_unlocked_exclusive(&xas, entry); + if (!entry) + continue; + dax_disassociate_entry(entry, mapping, true); + xas_store(&xas, NULL); + mapping->nrpages -= 1UL << dax_entry_order(entry); + put_unlocked_entry(&xas, entry, WAKE_ALL); + } + xas_unlock_irq(&xas); +} +EXPORT_SYMBOL_GPL(dax_delete_mapping_range); + +static int wait_page_idle(struct page *page, + void (cb)(struct inode *), + struct inode *inode) +{ + return ___wait_var_event(page, dax_page_is_idle(page), + TASK_INTERRUPTIBLE, 0, 0, cb(inode)); +} + +static void wait_page_idle_uninterruptible(struct page *page, + struct inode *inode) +{ + ___wait_var_event(page, dax_page_is_idle(page), + TASK_UNINTERRUPTIBLE, 0, 0, schedule()); +} + +/* + * Unmaps the inode and waits for any DMA to complete prior to deleting the + * DAX mapping entries for the range. + * + * For NOWAIT behavior, pass @cb as NULL to early-exit on first found + * busy page + */ +int dax_break_layout(struct inode *inode, loff_t start, loff_t end, + void (cb)(struct inode *)) +{ + struct page *page; + int error = 0; + + if (!dax_mapping(inode->i_mapping)) + return 0; + + do { + page = dax_layout_busy_page_range(inode->i_mapping, start, end); + if (!page) + break; + if (!cb) { + error = -ERESTARTSYS; + break; + } + + error = wait_page_idle(page, cb, inode); + } while (error == 0); + + if (!page) + dax_delete_mapping_range(inode->i_mapping, start, end); + + return error; +} +EXPORT_SYMBOL_GPL(dax_break_layout); + +void dax_break_layout_final(struct inode *inode) +{ + struct page *page; + + if (!dax_mapping(inode->i_mapping)) + return; + + do { + page = dax_layout_busy_page_range(inode->i_mapping, 0, + LLONG_MAX); + if (!page) + break; + + wait_page_idle_uninterruptible(page, inode); + } while (true); + + if (!page) + dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); +} +EXPORT_SYMBOL_GPL(dax_break_layout_final); + /* * Invalidate DAX entry if it is clean. */ @@ -895,8 +1080,9 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, void *old; dax_disassociate_entry(entry, mapping, false); - dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, - shared); + dax_associate_entry(new_entry, mapping, vmf->vma, + vmf->address, shared); + /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -940,7 +1126,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, if (unlikely(dax_is_locked(entry))) { void *old_entry = entry; - entry = get_unlocked_entry(xas, 0); + entry = get_next_unlocked_entry(xas, 0); /* Entry got punched out / reallocated? */ if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) @@ -1084,9 +1270,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, goto out; if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) goto out; - /* For larger pages we need devmap */ - if (length > 1 && !pfn_t_devmap(*pfnp)) - goto out; + rc = 0; out_check_addr: @@ -1193,7 +1377,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); - ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); trace_dax_load_hole(inode, vmf, ret); return ret; } @@ -1664,7 +1848,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; - int err = 0; + struct folio *folio; + int ret, err = 0; pfn_t pfn; void *kaddr; @@ -1696,17 +1881,19 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return dax_fault_return(err); } + folio = dax_to_folio(*entry); if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); - /* insert PMD pfn */ + folio_ref_inc(folio); if (pmd) - return vmf_insert_pfn_pmd(vmf, pfn, write); + ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), + write); + else + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); + folio_put(folio); - /* insert PTE pfn */ - if (write) - return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return ret; } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, @@ -1949,11 +2136,12 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); + struct folio *folio; void *entry; vm_fault_t ret; xas_lock_irq(&xas); - entry = get_unlocked_entry(&xas, order); + entry = get_next_unlocked_entry(&xas, order); /* Did we race with someone splitting entry or so? */ if (!entry || dax_is_conflict(entry) || (order == 0 && !dax_is_pte_entry(entry))) { @@ -1966,14 +2154,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); + folio = pfn_folio(pfn_t_to_pfn(pfn)); + folio_ref_inc(folio); if (order == 0) - ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); + ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) - ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); + ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; + folio_put(folio); dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index f608f6554b95..2c9b762925c7 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -642,7 +642,7 @@ retry: goto retry; if (folio) - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); out: if (folio) { folio_unlock(folio); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index bcb96caf77c0..1dc09ed5d403 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -182,6 +182,8 @@ void ext4_evict_inode(struct inode *inode) trace_ext4_evict_inode(inode); + dax_break_layout_final(inode); + if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) ext4_evict_ea_inode(inode); if (inode->i_nlink) { @@ -3981,24 +3983,10 @@ static void ext4_wait_dax_page(struct inode *inode) int ext4_break_layouts(struct inode *inode) { - struct page *page; - int error; - if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock))) return -EINVAL; - do { - page = dax_layout_busy_page(inode->i_mapping); - if (!page) - return 0; - - error = ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, - TASK_INTERRUPTIBLE, 0, 0, - ext4_wait_dax_page(inode)); - } while (error == 0); - - return error; + return dax_break_layout_inode(inode, ext4_wait_dax_page); } /* diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 898443e98efc..48649be64d6a 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -399,7 +399,7 @@ data_copy: bh = bh->b_this_page; } - block_commit_write(&folio[0]->page, from, from + replaced_size); + block_commit_write(folio[0], from, from + replaced_size); /* Even in case of data=writeback it is reasonable to pin * inode to transaction, to prevent unexpected data loss */ diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 0b6ee6dd1fd6..0502bf3cdf6a 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -666,36 +666,12 @@ static void fuse_wait_dax_page(struct inode *inode) filemap_invalidate_lock(inode->i_mapping); } -/* Should be called with mapping->invalidate_lock held exclusively */ -static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, - loff_t start, loff_t end) -{ - struct page *page; - - page = dax_layout_busy_page_range(inode->i_mapping, start, end); - if (!page) - return 0; - - *retry = true; - return ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, - 0, 0, fuse_wait_dax_page(inode)); -} - -/* dmap_end == 0 leads to unmapping of whole file */ +/* Should be called with mapping->invalidate_lock held exclusively. */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { - bool retry; - int ret; - - do { - retry = false; - ret = __fuse_dax_break_layouts(inode, &retry, dmap_start, - dmap_end); - } while (ret == 0 && retry); - - return ret; + return dax_break_layout(inode, dmap_start, dmap_end, + fuse_wait_dax_page); } ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index fa8f1141ea74..85e4f894a59f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1954,7 +1954,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d63e56fd3dd2..754378dd9f71 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file) if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3205,7 +3205,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 82afe78ec542..2c7b24cb67ad 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1017,8 +1017,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (kaddr) *kaddr = fs->window_kaddr + offset; if (pfn) - *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, - PFN_DEV | PFN_MAP); + *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0); return nr_pages > max_nr_pages ? max_nr_pages : nr_pages; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d98caedbb723..e4de5425838d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -193,19 +193,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } /* - * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset. + * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. * Returns the maximum number of bytes one can read without touching the 1st raw - * HWPOISON subpage. + * HWPOISON page. * * The implementation borrows the iteration logic from copy_page_to_iter*. */ -static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) +static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, + size_t bytes) { + struct page *page; size_t n = 0; size_t res = 0; - /* First subpage to start the loop. */ - page = nth_page(page, offset / PAGE_SIZE); + /* First page to start the loop. */ + page = folio_page(folio, offset / PAGE_SIZE); offset %= PAGE_SIZE; while (1) { if (is_raw_hwpoison_page_in_hugepage(page)) @@ -278,10 +280,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) else { /* * Adjust how many bytes safe to read without - * touching the 1st raw HWPOISON subpage after + * touching the 1st raw HWPOISON page after * offset. */ - want = adjust_range_hwpoison(&folio->page, offset, nr); + want = adjust_range_hwpoison(folio, offset, nr); if (want == 0) { folio_put(folio); retval = -EIO; @@ -338,8 +340,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio) * mutex for the page in the mapping. So, we can not race with page being * faulted into the vma. */ -static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, - unsigned long addr, struct page *page) +static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) { pte_t *ptep, pte; @@ -351,7 +353,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, if (huge_pte_none(pte) || !pte_present(pte)) return false; - if (pte_page(pte) == page) + if (pte_pfn(pte) == pfn) return true; return false; @@ -396,7 +398,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h, { struct rb_root_cached *root = &mapping->i_mmap; struct hugetlb_vma_lock *vma_lock; - struct page *page = &folio->page; + unsigned long pfn = folio_pfn(folio); struct vm_area_struct *vma; unsigned long v_start; unsigned long v_end; @@ -412,7 +414,7 @@ retry: v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (!hugetlb_vma_maps_page(vma, v_start, page)) + if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; if (!hugetlb_vma_trylock_write(vma)) { @@ -462,7 +464,7 @@ retry: */ v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (hugetlb_vma_maps_page(vma, v_start, page)) + if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 814b7f679486..31553372b33a 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1480,7 +1480,7 @@ static int iomap_folio_mkwrite_iter(struct iomap_iter *iter, &iter->iomap); if (ret) return ret; - block_commit_write(&folio->page, 0, length); + block_commit_write(folio, 0, length); } else { WARN_ON_ONCE(!folio_test_uptodate(folio)); folio_mark_dirty(folio); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 5bbeb6fbb1ac..ee1d92ed950f 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode, ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); } } } @@ -2012,7 +2012,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); } } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e54f2c4b5a90..2056cf08ac1e 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -813,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, /* must not update i_size! */ - block_commit_write(&folio->page, block_start + 1, block_start + 1); + block_commit_write(folio, block_start + 1, block_start + 1); } /* diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 77a517f91821..96122e91c645 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -157,6 +157,7 @@ unsigned name_to_int(const struct qstr *qstr); /* Worst case buffer size needed for holding an integer. */ #define PROC_NUMBUF 13 +#ifdef CONFIG_PAGE_MAPCOUNT /** * folio_precise_page_mapcount() - Number of mappings of this folio page. * @folio: The folio. @@ -187,7 +188,49 @@ static inline int folio_precise_page_mapcount(struct folio *folio, return mapcount; } +#else /* !CONFIG_PAGE_MAPCOUNT */ +static inline int folio_precise_page_mapcount(struct folio *folio, + struct page *page) +{ + BUILD_BUG(); +} +#endif /* CONFIG_PAGE_MAPCOUNT */ +/** + * folio_average_page_mapcount() - Average number of mappings per page in this + * folio + * @folio: The folio. + * + * The average number of user page table entries that reference each page in + * this folio as tracked via the RMAP: either referenced directly (PTE) or + * as part of a larger area that covers this page (e.g., PMD). + * + * The average is calculated by rounding to the nearest integer; however, + * to avoid duplicated code in current callers, the average is at least + * 1 if any page of the folio is mapped. + * + * Returns: The average number of mappings per page in this folio. + */ +static inline int folio_average_page_mapcount(struct folio *folio) +{ + int mapcount, entire_mapcount, avg; + + if (!folio_test_large(folio)) + return atomic_read(&folio->_mapcount) + 1; + + mapcount = folio_large_mapcount(folio); + if (unlikely(mapcount <= 0)) + return 0; + entire_mapcount = folio_entire_mapcount(folio); + if (mapcount <= entire_mapcount) + return entire_mapcount; + mapcount -= entire_mapcount; + + /* Round to closest integer ... */ + avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio); + /* ... but return at least 1. */ + return max_t(int, avg + entire_mapcount, 1); +} /* * array.c */ diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 8ba9b1472390..83be312159c9 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -162,6 +162,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "Unaccepted: ", global_zone_page_state(NR_UNACCEPTED)); #endif + show_val_kb(m, "Balloon: ", + global_node_page_state(NR_BALLOON_PAGES)); hugetlb_report_meminfo(m); diff --git a/fs/proc/page.c b/fs/proc/page.c index a55f5acefa97..23fc771100ae 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -67,9 +67,14 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, * memmaps that were actually initialized. */ page = pfn_to_online_page(pfn); - if (page) - mapcount = folio_precise_page_mapcount(page_folio(page), - page); + if (page) { + struct folio *folio = page_folio(page); + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + mapcount = folio_precise_page_mapcount(folio, page); + else + mapcount = folio_average_page_mapcount(folio); + } if (put_user(mapcount, out)) { ret = -EFAULT; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f02cd362309a..994cde10e3f4 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -707,6 +707,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, struct folio *folio = page_folio(page); int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; + bool exclusive; + int mapcount; /* * First accumulate quantities that depend only on |size| and the type @@ -747,18 +749,29 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, dirty, locked, present); return; } + + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + mapcount = folio_average_page_mapcount(folio); + exclusive = !folio_maybe_mapped_shared(folio); + } + /* * We obtain a snapshot of the mapcount. Without holding the folio lock * this snapshot can be slightly wrong as we cannot always read the * mapcount atomically. */ for (i = 0; i < nr; i++, page++) { - int mapcount = folio_precise_page_mapcount(folio, page); unsigned long pss = PAGE_SIZE << PSS_SHIFT; + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { + mapcount = folio_precise_page_mapcount(folio, page); + exclusive = mapcount < 2; + } + if (mapcount >= 2) pss /= mapcount; smaps_page_accumulate(mss, folio, PAGE_SIZE, pss, - dirty, locked, mapcount < 2); + dirty, locked, exclusive); } } @@ -1023,7 +1036,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, if (folio) { /* We treat non-present entries as "maybe shared". */ - if (!present || folio_likely_mapped_shared(folio) || + if (!present || folio_maybe_mapped_shared(folio) || hugetlb_pmd_shared(pte)) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else @@ -1632,6 +1645,7 @@ struct pagemapread { #define PM_SOFT_DIRTY BIT_ULL(55) #define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_UFFD_WP BIT_ULL(57) +#define PM_GUARD_REGION BIT_ULL(58) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) @@ -1651,6 +1665,13 @@ static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm) return 0; } +static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page) +{ + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + return folio_precise_page_mapcount(folio, page) == 1; + return !folio_maybe_mapped_shared(folio); +} + static int pagemap_pte_hole(unsigned long start, unsigned long end, __always_unused int depth, struct mm_walk *walk) { @@ -1732,6 +1753,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, page = pfn_swap_entry_to_page(entry); if (pte_marker_entry_uffd_wp(entry)) flags |= PM_UFFD_WP; + if (is_guard_swp_entry(entry)) + flags |= PM_GUARD_REGION; } if (page) { @@ -1739,7 +1762,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (!folio_test_anon(folio)) flags |= PM_FILE; if ((flags & PM_PRESENT) && - folio_precise_page_mapcount(folio, page) == 1) + __folio_page_mapped_exclusively(folio, page)) flags |= PM_MMAP_EXCLUSIVE; } if (vma->vm_flags & VM_SOFTDIRTY) @@ -1814,7 +1837,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, pagemap_entry_t pme; if (folio && (flags & PM_PRESENT) && - folio_precise_page_mapcount(folio, page + idx) == 1) + __folio_page_mapped_exclusively(folio, page)) cur_flags |= PM_MMAP_EXCLUSIVE; pme = make_pme(frame, cur_flags); @@ -1879,7 +1902,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, if (!folio_test_anon(folio)) flags |= PM_FILE; - if (!folio_likely_mapped_shared(folio) && + if (!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(ptep)) flags |= PM_MMAP_EXCLUSIVE; @@ -1931,7 +1954,8 @@ static const struct mm_walk_ops pagemap_ops = { * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) * Bit 56 page exclusively mapped * Bit 57 pte is uffd-wp write-protected - * Bits 58-60 zero + * Bit 58 pte is a guard region + * Bits 59-60 zero * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present @@ -2455,22 +2479,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, spinlock_t *ptl; int ret; - arch_enter_lazy_mmu_mode(); - ret = pagemap_scan_thp_entry(pmd, start, end, walk); - if (ret != -ENOENT) { - arch_leave_lazy_mmu_mode(); + if (ret != -ENOENT) return ret; - } ret = 0; start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); if (!pte) { - arch_leave_lazy_mmu_mode(); walk->action = ACTION_AGAIN; return 0; } + arch_enter_lazy_mmu_mode(); + if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { /* Fast path for performing exclusive WP */ for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { @@ -2539,8 +2560,8 @@ flush_and_return: if (flush_end) flush_tlb_range(vma, start, addr); - pte_unmap_unlock(start_pte, ptl); arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); cond_resched(); return ret; @@ -2855,7 +2876,12 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) { struct folio *folio = page_folio(page); - int count = folio_precise_page_mapcount(folio, page); + int count; + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + count = folio_precise_page_mapcount(folio, page); + else + count = folio_average_page_mapcount(folio); md->pages += nr_pages; if (pte_dirty || folio_test_dirty(folio)) diff --git a/fs/udf/file.c b/fs/udf/file.c index 412fe7c4d348..0d76c4f37b3e 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -69,7 +69,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf) goto out_unlock; } - block_commit_write(&folio->page, 0, end); + block_commit_write(folio, 0, end); out_dirty: folio_mark_dirty(folio); folio_wait_stable(folio); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index ce6b8ffbaa2c..ee3e0f284287 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2734,21 +2734,16 @@ xfs_mmaplock_two_inodes_and_break_dax_layout( struct xfs_inode *ip2) { int error; - bool retry; - struct page *page; if (ip1->i_ino > ip2->i_ino) swap(ip1, ip2); again: - retry = false; /* Lock the first inode */ xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); - error = xfs_break_dax_layouts(VFS_I(ip1), &retry); - if (error || retry) { + error = xfs_break_dax_layouts(VFS_I(ip1)); + if (error) { xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); - if (error == 0 && retry) - goto again; return error; } @@ -2762,8 +2757,8 @@ again: * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable * for this nested lock case. */ - page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); - if (page && page_ref_count(page) != 1) { + error = dax_break_layout(VFS_I(ip2), 0, -1, NULL); + if (error) { xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); goto again; @@ -3007,21 +3002,11 @@ xfs_wait_dax_page( int xfs_break_dax_layouts( - struct inode *inode, - bool *retry) + struct inode *inode) { - struct page *page; - xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL); - page = dax_layout_busy_page(inode->i_mapping); - if (!page) - return 0; - - *retry = true; - return ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, - 0, 0, xfs_wait_dax_page(inode)); + return dax_break_layout_inode(inode, xfs_wait_dax_page); } int @@ -3039,8 +3024,8 @@ xfs_break_layouts( retry = false; switch (reason) { case BREAK_UNMAP: - error = xfs_break_dax_layouts(inode, &retry); - if (error || retry) + error = xfs_break_dax_layouts(inode); + if (error) break; fallthrough; case BREAK_WRITE: diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 4bb7a99e0dc4..eae0159983ca 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -603,7 +603,7 @@ xfs_itruncate_extents( return xfs_itruncate_extents_flags(tpp, ip, whichfork, new_size, 0); } -int xfs_break_dax_layouts(struct inode *inode, bool *retry); +int xfs_break_dax_layouts(struct inode *inode); int xfs_break_layouts(struct inode *inode, uint *iolock, enum layout_break_reason reason); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 53944cc7af24..b2dd0c0bf509 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -768,6 +768,17 @@ xfs_fs_drop_inode( return generic_drop_inode(inode); } +STATIC void +xfs_fs_evict_inode( + struct inode *inode) +{ + if (IS_DAX(inode)) + dax_break_layout_final(inode); + + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); +} + static void xfs_mount_free( struct xfs_mount *mp) @@ -1275,6 +1286,7 @@ static const struct super_operations xfs_super_operations = { .destroy_inode = xfs_fs_destroy_inode, .dirty_inode = xfs_fs_dirty_inode, .drop_inode = xfs_fs_drop_inode, + .evict_inode = xfs_fs_evict_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 3c61c29ff6ab..11abad6c87e1 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1111,7 +1111,7 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, pgprot_t prot); void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot); + pgprot_t prot); void iounmap(volatile void __iomem *addr); void generic_iounmap(volatile void __iomem *addr); @@ -1120,7 +1120,7 @@ void generic_iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) { /* _PAGE_IOREMAP needs to be supplied by the architecture */ - return ioremap_prot(addr, size, _PAGE_IOREMAP); + return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP)); } #endif #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 6d1fb6162ac1..a3b5029aebbd 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -19,11 +19,12 @@ #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) +/* avoid <linux/mm.h> include hell */ +extern unsigned long max_mapnr; + #ifndef pfn_valid static inline int pfn_valid(unsigned long pfn) { - /* avoid <linux/mm.h> include hell */ - extern unsigned long max_mapnr; unsigned long pfn_offset = ARCH_PFN_OFFSET; return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 94cbd50cc870..02aeca21479a 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -6,6 +6,19 @@ #include <linux/threads.h> #include <linux/percpu-defs.h> +/* + * __percpu_qual is the qualifier for the percpu named address space. + * + * Most arches use generic named address space for percpu variables but + * some arches define percpu variables in different named address space + * (on the x86 arch, percpu variable may be declared as being relative + * to the %fs or %gs segments using __seg_fs or __seg_gs named address + * space qualifier). + */ +#ifndef __percpu_qual +# define __percpu_qual +#endif + #ifdef CONFIG_SMP /* @@ -74,7 +87,7 @@ do { \ #define raw_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ \ *__p += val; \ *__p; \ @@ -82,8 +95,8 @@ do { \ #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __ret; \ __ret = *__p; \ *__p = nval; \ __ret; \ @@ -91,7 +104,7 @@ do { \ #define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \ ({ \ - typeof(pcp) __val, __old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \ __val = _cmpxchg(pcp, __old, nval); \ if (__val != __old) \ *(ovalp) = __val; \ @@ -100,8 +113,8 @@ do { \ #define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __val = *__p, ___old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \ bool __ret; \ if (__val == ___old) { \ *__p = nval; \ @@ -115,14 +128,14 @@ do { \ #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __old = (oval); \ + TYPEOF_UNQUAL(pcp) __old = (oval); \ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \ __old; \ }) #define __this_cpu_generic_read_nopreempt(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ preempt_disable_notrace(); \ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ preempt_enable_notrace(); \ @@ -131,7 +144,7 @@ do { \ #define __this_cpu_generic_read_noirq(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ unsigned long ___flags; \ raw_local_irq_save(___flags); \ ___ret = raw_cpu_generic_read(pcp); \ @@ -141,7 +154,7 @@ do { \ #define this_cpu_generic_read(pcp) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ if (__native_word(pcp)) \ __ret = __this_cpu_generic_read_nopreempt(pcp); \ else \ @@ -160,7 +173,7 @@ do { \ #define this_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_add_return(pcp, val); \ @@ -170,7 +183,7 @@ do { \ #define this_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ @@ -190,7 +203,7 @@ do { \ #define this_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e402aef79c93..d1adfba8387e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -67,22 +67,21 @@ * * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. * - * - tlb_remove_page() / __tlb_remove_page() - * - tlb_remove_page_size() / __tlb_remove_page_size() - * - __tlb_remove_folio_pages() + * - tlb_remove_page() / tlb_remove_page_size() + * - __tlb_remove_folio_pages() / __tlb_remove_page_size() + * - __tlb_remove_folio_pages_size() * - * __tlb_remove_page_size() is the basic primitive that queues a page for - * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a - * boolean indicating if the queue is (now) full and a call to - * tlb_flush_mmu() is required. + * __tlb_remove_folio_pages_size() is the basic primitive that queues pages + * for freeing. It will return a boolean indicating if the queue is (now) + * full and a call to tlb_flush_mmu() is required. * * tlb_remove_page() and tlb_remove_page_size() imply the call to * tlb_flush_mmu() when required and has no return value. * - * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however, - * instead of removing a single page, remove the given number of consecutive - * pages that are all part of the same (large) folio: just like calling - * __tlb_remove_page() on each page individually. + * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(), + * however, instead of removing a single page, assume PAGE_SIZE and remove + * the given number of consecutive pages that are all part of the + * same (large) folio. * * - tlb_change_page_size() * @@ -489,16 +488,6 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, tlb_flush_mmu(tlb); } -static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, - struct page *page, bool delay_rmap) -{ - return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE); -} - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return tlb_remove_page_size(tlb, page, PAGE_SIZE); diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index bbc4730a6505..c0989b5b0407 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -13,7 +13,7 @@ * Don't use this unless you really need to: spin_lock() and spin_unlock() * are significantly faster. */ -static inline void bit_spin_lock(int bitnum, unsigned long *addr) +static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) { /* * Assuming the lock is uncontended, this never enters @@ -38,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr) /* * Return true if it was acquired */ -static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -54,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr) /* * bit-based spin_unlock() */ -static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -71,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) * non-atomic version, which can be used eg. if the bit lock itself is * protecting the rest of the flags in the word. */ -static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index d8a8d245824a..4c506e76a808 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -18,6 +18,8 @@ enum bootmem_type { #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void __init register_page_bootmem_info_node(struct pglist_data *pgdat); +void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, + unsigned long nr_pages); void get_page_bootmem(unsigned long info, struct page *page, enum bootmem_type type); @@ -58,6 +60,11 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { } +static inline void register_page_bootmem_memmap(unsigned long section_nr, + struct page *map, unsigned long nr_pages) +{ +} + static inline void put_page_bootmem(struct page *page) { } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index fab70b26e131..f0a4ad7839b6 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -270,7 +270,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, struct folio **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); -void block_commit_write(struct page *page, unsigned int from, unsigned int to); +void block_commit_write(struct folio *folio, size_t from, size_t to); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); diff --git a/include/linux/cma.h b/include/linux/cma.h index d15b64f51336..62d9c1cf6326 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -40,6 +40,9 @@ static inline int __init cma_declare_contiguous(phys_addr_t base, return cma_declare_contiguous_nid(base, size, limit, alignment, order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); } +extern int __init cma_declare_contiguous_multi(phys_addr_t size, + phys_addr_t align, unsigned int order_per_bit, + const char *name, struct cma **res_cma, int nid); extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, @@ -50,12 +53,14 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); +extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end); extern void cma_reserve_pages_on_error(struct cma *cma); #ifdef CONFIG_CMA struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); +bool cma_validate_zones(struct cma *cma); #else static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) { @@ -66,6 +71,10 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio) { return false; } +static inline bool cma_validate_zones(struct cma *cma) +{ + return false; +} #endif #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7bf0c521db63..173d9c07a895 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -95,7 +95,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern bool compaction_suitable(struct zone *zone, int order, - int highest_zoneidx); + unsigned long watermark, int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); @@ -113,7 +113,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline bool compaction_suitable(struct zone *zone, int order, - int highest_zoneidx) + unsigned long watermark, + int highest_zoneidx) { return false; } diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 2e7c2c282f3a..4fc8e26914ad 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -128,3 +128,11 @@ */ #define ASM_INPUT_G "ir" #define ASM_INPUT_RM "r" + +/* + * Declare compiler support for __typeof_unqual__() operator. + * + * Bindgen uses LLVM even if our C compiler is GCC, so we cannot + * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL. + */ +#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index c9b58188ec61..32048052c64a 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -137,3 +137,11 @@ #if GCC_VERSION < 90100 #undef __alloc_size__ #endif + +/* + * Declare compiler support for __typeof_unqual__() operator. + * + * Bindgen uses LLVM even if our C compiler is GCC, so we cannot + * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL. + */ +#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 9fc30b6b80c9..27725f1ab5ab 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -226,6 +226,26 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \ "must be non-C-string (not NUL-terminated)") +/* + * Use __typeof_unqual__() when available. + * + * XXX: Remove test for __CHECKER__ once + * sparse learns about __typeof_unqual__(). + */ +#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__) +# define USE_TYPEOF_UNQUAL 1 +#endif + +/* + * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof + * operator when available, to return an unqualified type of the exp. + */ +#if defined(USE_TYPEOF_UNQUAL) +# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp) +#else +# define TYPEOF_UNQUAL(exp) __typeof__(exp) +#endif + #endif /* __KERNEL__ */ #if defined(CONFIG_CFI_CLANG) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e09d323be845..501cffddc2f4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -57,7 +57,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __user BTF_TYPE_TAG(user) # endif # define __iomem -# define __percpu BTF_TYPE_TAG(percpu) +# define __percpu __percpu_qual BTF_TYPE_TAG(percpu) # define __rcu BTF_TYPE_TAG(rcu) # define __chk_user_ptr(x) (void)0 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6cc5e484547c..1987400000b4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -116,7 +116,6 @@ enum cpuhp_state { CPUHP_NET_IUCV_PREPARE, CPUHP_ARM_BL_PREPARE, CPUHP_TRACE_RB_PREPARE, - CPUHP_MM_ZS_PREPARE, CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, diff --git a/include/linux/damon.h b/include/linux/damon.h index c9074d569596..47e36e6ea203 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -36,6 +36,16 @@ struct damon_addr_range { }; /** + * struct damon_size_range - Represents size for filter to operate on [@min, @max]. + * @min: Min size (inclusive). + * @max: Max size (inclusive). + */ +struct damon_size_range { + unsigned long min; + unsigned long max; +}; + +/** * struct damon_region - Represents a monitoring target region. * @ar: The address range of the region. * @sampling_addr: Address of the sample for the next access check. @@ -324,8 +334,11 @@ struct damos_stat { /** * enum damos_filter_type - Type of memory for &struct damos_filter * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. + * @DAMOS_FILTER_TYPE_ACTIVE: Active pages. * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages. + * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage. + * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages. * @DAMOS_FILTER_TYPE_ADDR: Address range. * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. * @NR_DAMOS_FILTER_TYPES: Number of filter types. @@ -343,8 +356,11 @@ struct damos_stat { */ enum damos_filter_type { DAMOS_FILTER_TYPE_ANON, + DAMOS_FILTER_TYPE_ACTIVE, DAMOS_FILTER_TYPE_MEMCG, DAMOS_FILTER_TYPE_YOUNG, + DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, + DAMOS_FILTER_TYPE_UNMAPPED, DAMOS_FILTER_TYPE_ADDR, DAMOS_FILTER_TYPE_TARGET, NR_DAMOS_FILTER_TYPES, @@ -360,6 +376,7 @@ enum damos_filter_type { * @target_idx: Index of the &struct damon_target of * &damon_ctx->adaptive_targets if @type is * DAMOS_FILTER_TYPE_TARGET. + * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE. * @list: List head for siblings. * * Before applying the &damos->action to a memory region, DAMOS checks if each @@ -376,6 +393,7 @@ struct damos_filter { unsigned short memcg_id; struct damon_addr_range addr_range; int target_idx; + struct damon_size_range sz_range; }; struct list_head list; }; @@ -432,6 +450,8 @@ struct damos_access_pattern { * @wmarks: Watermarks for automated (in)activation of this scheme. * @target_nid: Destination node if @action is "migrate_{hot,cold}". * @filters: Additional set of &struct damos_filter for &action. + * @ops_filters: ops layer handling &struct damos_filter objects list. + * @last_applied: Last @action applied ops-managing entity. * @stat: Statistics of this scheme. * @list: List head for siblings. * @@ -454,6 +474,15 @@ struct damos_access_pattern { * implementation could check pages of the region and skip &action to respect * &filters * + * The minimum entity that @action can be applied depends on the underlying + * &struct damon_operations. Since it may not be aligned with the core layer + * abstract, namely &struct damon_region, &struct damon_operations could apply + * @action to same entity multiple times. Large folios that underlying on + * multiple &struct damon region objects could be such examples. The &struct + * damon_operations can use @last_applied to avoid that. DAMOS core logic + * unsets @last_applied when each regions walking for applying the scheme is + * finished. + * * After applying the &action to each region, &stat_count and &stat_sz is * updated to reflect the number of regions and total size of regions that the * &action is applied. @@ -475,6 +504,9 @@ struct damos { * layer-handled filters. If true, operations layer allows it, too. */ bool core_filters_allowed; + /* whether to reject core/ops filters umatched regions */ + bool core_filters_default_reject; + bool ops_filters_default_reject; /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; @@ -482,6 +514,8 @@ struct damos { int target_nid; }; struct list_head filters; + struct list_head ops_filters; + void *last_applied; struct damos_stat stat; struct list_head list; }; @@ -510,7 +544,6 @@ enum damon_ops_id { * @update: Update operations-related data structures. * @prepare_access_checks: Prepare next access check of target regions. * @check_accesses: Check the accesses to target regions. - * @reset_aggregated: Reset aggregated accesses monitoring results. * @get_scheme_score: Get the score of a region for a scheme. * @apply_scheme: Apply a DAMON-based operation scheme. * @target_valid: Determine if the target is valid. @@ -522,8 +555,7 @@ enum damon_ops_id { * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting * the monitoring, @update after each &damon_attrs.ops_update_interval, and * @check_accesses, @target_valid and @prepare_access_checks after each - * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after - * each &damon_attrs.aggr_interval. + * &damon_attrs.sample_interval. * * Each &struct damon_operations instance having valid @id can be registered * via damon_register_ops() and selected by damon_select_ops() later. @@ -538,8 +570,6 @@ enum damon_ops_id { * last preparation and update the number of observed accesses of each region. * It should also return max number of observed accesses that made as a result * of its update. The value will be used for regions adjustment threshold. - * @reset_aggregated should reset the access monitoring results that aggregated - * by @check_accesses. * @get_scheme_score should return the priority score of a region for a scheme * as an integer in [0, &DAMOS_MAX_SCORE]. * @apply_scheme is called from @kdamond when a region for user provided @@ -557,7 +587,6 @@ struct damon_operations { void (*update)(struct damon_ctx *context); void (*prepare_access_checks)(struct damon_ctx *context); unsigned int (*check_accesses)(struct damon_ctx *context); - void (*reset_aggregated)(struct damon_ctx *context); int (*get_scheme_score)(struct damon_ctx *context, struct damon_target *t, struct damon_region *r, struct damos *scheme); @@ -571,43 +600,28 @@ struct damon_operations { /** * struct damon_callback - Monitoring events notification callbacks. * - * @before_start: Called before starting the monitoring. * @after_wmarks_check: Called after each schemes' watermarks check. - * @after_sampling: Called after each sampling. * @after_aggregation: Called after each aggregation. - * @before_damos_apply: Called before applying DAMOS action. * @before_terminate: Called before terminating the monitoring. - * @private: User private data. * - * The monitoring thread (&damon_ctx.kdamond) calls @before_start and - * @before_terminate just before starting and finishing the monitoring, - * respectively. Therefore, those are good places for installing and cleaning - * @private. + * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just + * before finishing the monitoring. * * The monitoring thread calls @after_wmarks_check after each DAMON-based * operation schemes' watermarks check. If users need to make changes to the * attributes of the monitoring context while it's deactivated due to the * watermarks, this is the good place to do. * - * The monitoring thread calls @after_sampling and @after_aggregation for each - * of the sampling intervals and aggregation intervals, respectively. - * Therefore, users can safely access the monitoring results without additional - * protection. For the reason, users are recommended to use these callback for - * the accesses to the results. + * The monitoring thread calls @after_aggregation for each of the aggregation + * intervals. Therefore, users can safely access the monitoring results + * without additional protection. For the reason, users are recommended to use + * these callback for the accesses to the results. * * If any callback returns non-zero, monitoring stops. */ struct damon_callback { - void *private; - - int (*before_start)(struct damon_ctx *context); int (*after_wmarks_check)(struct damon_ctx *context); - int (*after_sampling)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); - int (*before_damos_apply)(struct damon_ctx *context, - struct damon_target *target, - struct damon_region *region, - struct damos *scheme); void (*before_terminate)(struct damon_ctx *context); }; @@ -633,11 +647,37 @@ struct damon_call_control { }; /** + * struct damon_intervals_goal - Monitoring intervals auto-tuning goal. + * + * @access_bp: Access events observation ratio to achieve in bp. + * @aggrs: Number of aggregations to acheive @access_bp within. + * @min_sample_us: Minimum resulting sampling interval in microseconds. + * @max_sample_us: Maximum resulting sampling interval in microseconds. + * + * DAMON automatically tunes &damon_attrs->sample_interval and + * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of + * DAMON-observed access events to theoretical maximum amount within @aggrs + * aggregations be same to @access_bp. The logic increases + * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same + * ratio if the current access events observation ratio is lower than the + * target for each @aggrs aggregations, and vice versa. + * + * If @aggrs is zero, the tuning is disabled and hence this struct is ignored. + */ +struct damon_intervals_goal { + unsigned long access_bp; + unsigned long aggrs; + unsigned long min_sample_us; + unsigned long max_sample_us; +}; + +/** * struct damon_attrs - Monitoring attributes for accuracy/overhead control. * * @sample_interval: The time between access samplings. * @aggr_interval: The time between monitor results aggregations. * @ops_update_interval: The time between monitoring operations updates. + * @intervals_goal: Intervals auto-tuning goal. * @min_nr_regions: The minimum number of adaptive monitoring * regions. * @max_nr_regions: The maximum number of adaptive monitoring @@ -657,8 +697,20 @@ struct damon_attrs { unsigned long sample_interval; unsigned long aggr_interval; unsigned long ops_update_interval; + struct damon_intervals_goal intervals_goal; unsigned long min_nr_regions; unsigned long max_nr_regions; +/* private: internal use only */ + /* + * @aggr_interval to @sample_interval ratio. + * Core-external components call damon_set_attrs() with &damon_attrs + * that this field is unset. In the case, damon_set_attrs() sets this + * field of resulting &damon_attrs. Core-internal components such as + * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs + * that this field is set. In the case, damon_set_attrs() just keep + * it. + */ + unsigned long aggr_samples; }; /** @@ -707,6 +759,11 @@ struct damon_ctx { * update */ unsigned long next_ops_update_sis; + /* + * number of sample intervals that should be passed before next + * intervals tuning + */ + unsigned long next_intervals_tune_sis; /* for waiting until the execution of the kdamond_fn is started */ struct completion kdamond_started; /* for scheme quotas prioritization */ @@ -788,6 +845,12 @@ static inline unsigned long damon_sz_region(struct damon_region *r) #define damos_for_each_filter_safe(f, next, scheme) \ list_for_each_entry_safe(f, next, &(scheme)->filters, list) +#define damos_for_each_ops_filter(f, scheme) \ + list_for_each_entry(f, &(scheme)->ops_filters, list) + +#define damos_for_each_ops_filter_safe(f, next, scheme) \ + list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list) + #ifdef CONFIG_DAMON struct damon_region *damon_new_region(unsigned long start, unsigned long end); @@ -813,6 +876,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed, struct damos_filter *damos_new_filter(enum damos_filter_type type, bool matching, bool allow); void damos_add_filter(struct damos *s, struct damos_filter *f); +bool damos_filter_for_ops(enum damos_filter_type type); void damos_destroy_filter(struct damos_filter *f); struct damos_quota_goal *damos_new_quota_goal( diff --git a/include/linux/dax.h b/include/linux/dax.h index df41a0017b31..dcc9fcdf14e4 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -207,6 +207,11 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); +static inline bool dax_page_is_idle(struct page *page) +{ + return page && page_ref_count(page) == 0; +} + #if IS_ENABLED(CONFIG_DAX) int dax_read_lock(void); void dax_read_unlock(int id); @@ -220,6 +225,19 @@ static inline void dax_read_unlock(int id) { } #endif /* CONFIG_DAX */ + +#if !IS_ENABLED(CONFIG_FS_DAX) +static inline int __must_check dax_break_layout(struct inode *inode, + loff_t start, loff_t end, void (cb)(struct inode *)) +{ + return 0; +} + +static inline void dax_break_layout_final(struct inode *inode) +{ +} +#endif + bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, @@ -241,8 +259,18 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +void dax_delete_mapping_range(struct address_space *mapping, + loff_t start, loff_t end); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int __must_check dax_break_layout(struct inode *inode, loff_t start, + loff_t end, void (cb)(struct inode *)); +static inline int __must_check dax_break_layout_inode(struct inode *inode, + void (cb)(struct inode *)) +{ + return dax_break_layout(inode, 0, LLONG_MAX, cb); +} +void dax_break_layout_final(struct inode *inode); int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same, diff --git a/include/linux/fb.h b/include/linux/fb.h index 5ba187e08cf7..cd653862ab99 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -225,6 +225,7 @@ struct fb_deferred_io { int open_count; /* number of opened files; protected by fb_info lock */ struct mutex lock; /* mutex that protects the pageref list */ struct list_head pagereflist; /* list of pagerefs for touched pages */ + struct address_space *mapping; /* page cache object for fb device */ /* callback */ struct page *(*get_page)(struct fb_info *info, unsigned long offset); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index 4d3f8074c137..45ad2408a80c 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -15,6 +15,7 @@ #define _LINUX_FOLIO_QUEUE_H #include <linux/pagevec.h> +#include <linux/mm.h> /* * Segment in a queue of running buffers. Each segment can hold a number of @@ -216,13 +217,6 @@ static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot) clear_bit(slot, &folioq->marks3); } -static inline unsigned int __folio_order(struct folio *folio) -{ - if (!folio_test_large(folio)) - return 0; - return folio->_flags_1 & 0xff; -} - /** * folioq_append: Add a folio to a folio queue segment * @folioq: The segment to add to @@ -241,7 +235,7 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli unsigned int slot = folioq->vec.nr++; folioq->vec.folios[slot] = folio; - folioq->orders[slot] = __folio_order(folio); + folioq->orders[slot] = folio_order(folio); return slot; } @@ -263,7 +257,7 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct unsigned int slot = folioq->vec.nr++; folioq->vec.folios[slot] = folio; - folioq->orders[slot] = __folio_order(folio); + folioq->orders[slot] = folio_order(folio); folioq_mark(folioq, slot); return slot; } diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 93e509b6c00e..e893d546a49f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -39,6 +39,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write); +vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, + bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_UNSUPPORTED, @@ -341,6 +345,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); +bool uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns); +bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns); +int folio_split(struct folio *folio, unsigned int new_order, struct page *page, + struct list_head *list); +/* + * try_folio_split - try to split a @folio at @page using non uniform split. + * @folio: folio to be split + * @page: split to order-0 at the given page + * @list: store the after-split folios + * + * Try to split a @folio at @page using non uniform split to order-0, if + * non uniform split is not supported, fall back to uniform split. + * + * Return: 0: split is successful, otherwise split failed. + */ +static inline int try_folio_split(struct folio *folio, struct page *page, + struct list_head *list) +{ + int ret = min_order_for_split(folio); + + if (ret < 0) + return ret; + + if (!non_uniform_split_supported(folio, 0, false)) + return split_huge_page_to_list_to_order(&folio->page, list, + ret); + return folio_split(folio, ret, page, list); +} static inline int split_huge_page(struct page *page) { struct folio *folio = page_folio(page); @@ -404,7 +438,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end); void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, - unsigned long end, long adjust_next); + unsigned long end, struct vm_area_struct *next); spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); @@ -533,6 +567,12 @@ static inline int split_folio_to_list(struct folio *folio, struct list_head *lis return 0; } +static inline int try_folio_split(struct folio *folio, struct page *page, + struct list_head *list) +{ + return 0; +} + static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) @@ -571,7 +611,7 @@ static inline int madvise_collapse(struct vm_area_struct *vma, static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { } static inline int is_swap_pmd(pmd_t pmd) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 76a75ec03dd6..8f3ac832ee7f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -174,6 +174,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; +void hugetlb_bootmem_alloc(void); +bool hugetlb_bootmem_allocated(void); + /* arch callbacks */ #ifndef CONFIG_HIGHPTE @@ -588,6 +591,7 @@ enum hugetlb_page_flags { HPG_freed, HPG_vmemmap_optimized, HPG_raw_hwp_unreliable, + HPG_cma, __NR_HPAGEFLAGS, }; @@ -647,6 +651,7 @@ HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) +HPAGEFLAG(Cma, cma) #ifdef CONFIG_HUGETLB_PAGE @@ -675,11 +680,21 @@ struct hstate { char name[HSTATE_NAME_LEN]; }; +struct cma; + struct huge_bootmem_page { struct list_head list; struct hstate *hstate; + unsigned long flags; + struct cma *cma; }; +#define HUGE_BOOTMEM_HVO 0x0001 +#define HUGE_BOOTMEM_ZONES_VALID 0x0002 +#define HUGE_BOOTMEM_CMA 0x0004 + +bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); + int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); @@ -815,6 +830,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, } #endif +#ifndef arch_has_huge_bootmem_alloc +/* + * Some architectures do their own bootmem allocation, so they can't use + * early CMA allocation. + */ +static inline bool arch_has_huge_bootmem_alloc(void) +{ + return false; +} +#endif + static inline struct hstate *folio_hstate(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); @@ -1257,6 +1283,15 @@ static inline bool hugetlbfs_pagecache_present( { return false; } + +static inline void hugetlb_bootmem_alloc(void) +{ +} + +static inline bool hugetlb_bootmem_allocated(void) +{ + return false; +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e79eb6ac516f..ef5a1ecc6e59 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -133,7 +133,6 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size); -void memblock_free_all(void); void memblock_free(void *ptr, size_t size); void reset_all_zones_managed_pages(void); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6e74b8254d9b..53364526d877 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -438,9 +438,7 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) */ static inline bool folio_memcg_charged(struct folio *folio) { - if (folio_memcg_kmem(folio)) - return __folio_objcg(folio) != NULL; - return __folio_memcg(folio) != NULL; + return folio->memcg_data != 0; } /* @@ -649,8 +647,6 @@ int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); - void __mem_cgroup_uncharge(struct folio *folio); /** @@ -1040,7 +1036,9 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); } -void split_page_memcg(struct page *head, int old_order, int new_order); +void split_page_memcg(struct page *first, unsigned order); +void folio_split_memcg_refs(struct folio *folio, unsigned old_order, + unsigned new_order); static inline u64 cgroup_id_from_mm(struct mm_struct *mm) { @@ -1165,10 +1163,6 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) -{ -} - static inline void mem_cgroup_uncharge(struct folio *folio) { } @@ -1465,7 +1459,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } -static inline void split_page_memcg(struct page *head, int old_order, int new_order) +static inline void split_page_memcg(struct page *first, unsigned order) +{ +} + +static inline void folio_split_memcg_refs(struct folio *folio, + unsigned old_order, unsigned new_order) { } @@ -1848,6 +1847,9 @@ static inline void mem_cgroup_exit_user_fault(void) current->in_user_fault = 0; } +void memcg1_swapout(struct folio *folio, swp_entry_t entry); +void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); + #else /* CONFIG_MEMCG_V1 */ static inline unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, @@ -1875,6 +1877,14 @@ static inline void mem_cgroup_exit_user_fault(void) { } +static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) +{ +} + +static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +{ +} + #endif /* CONFIG_MEMCG_V1 */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index c0afee5d126e..12daa6ec7d09 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -25,7 +25,7 @@ /** * struct memory_group - a logical group of memory blocks * @nid: The node id for all memory blocks inside the memory group. - * @blocks: List of all memory blocks belonging to this memory group. + * @memory_blocks: List of all memory blocks belonging to this memory group. * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this * memory group. * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 3f7143ade32c..4aa151914eab 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -161,7 +161,7 @@ static inline bool is_device_private_page(const struct page *page) { return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; + page_pgmap(page)->type == MEMORY_DEVICE_PRIVATE; } static inline bool folio_is_device_private(const struct folio *folio) @@ -173,13 +173,13 @@ static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; + page_pgmap(page)->type == MEMORY_DEVICE_PCI_P2PDMA; } static inline bool is_device_coherent_page(const struct page *page) { return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_COHERENT; + page_pgmap(page)->type == MEMORY_DEVICE_COHERENT; } static inline bool folio_is_device_coherent(const struct folio *folio) @@ -187,6 +187,17 @@ static inline bool folio_is_device_coherent(const struct folio *folio) return is_device_coherent_page(&folio->page); } +static inline bool is_fsdax_page(const struct page *page) +{ + return is_zone_device_page(page) && + page_pgmap(page)->type == MEMORY_DEVICE_FS_DAX; +} + +static inline bool folio_is_fsdax(const struct folio *folio) +{ + return is_fsdax_page(&folio->page); +} + #ifdef CONFIG_ZONE_DEVICE void zone_device_page_init(struct page *page); void *memremap_pages(struct dev_pagemap *pgmap, int nid); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 80891120cca9..aaa2114498d6 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -205,8 +205,8 @@ struct migrate_vma { unsigned long end; /* - * Set to the owner value also stored in page->pgmap->owner for - * migrating out of device private memory. The flags also need to + * Set to the owner value also stored in page_pgmap(page)->owner + * for migrating out of device private memory. The flags also need to * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. * The caller should always set this field when using mmu notifier * callbacks to avoid device MMU invalidations for device private diff --git a/include/linux/mm.h b/include/linux/mm.h index beba5ba0fd97..32ba0e33422b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -32,6 +32,7 @@ #include <linux/memremap.h> #include <linux/slab.h> #include <linux/cacheinfo.h> +#include <linux/rcuwait.h> struct mempolicy; struct anon_vma; @@ -40,20 +41,10 @@ struct user_struct; struct pt_regs; struct folio_batch; +void arch_mm_preinit(void); void mm_core_init(void); void init_mm_internals(void); -#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ -extern unsigned long max_mapnr; - -static inline void set_max_mapnr(unsigned long limit) -{ - max_mapnr = limit; -} -#else -static inline void set_max_mapnr(unsigned long limit) { } -#endif - extern atomic_long_t _totalram_pages; static inline unsigned long totalram_pages(void) { @@ -242,8 +233,6 @@ void setup_initial_init_mm(void *start_code, void *end_code, struct vm_area_struct *vm_area_alloc(struct mm_struct *); struct vm_area_struct *vm_area_dup(struct vm_area_struct *); void vm_area_free(struct vm_area_struct *); -/* Use only if VMA has no other users */ -void __vm_area_free(struct vm_area_struct *vma); #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; @@ -682,13 +671,57 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_PER_VMA_LOCK +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + static struct lock_class_key lockdep_key; + + lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); +#endif + if (reset_refcnt) + refcount_set(&vma->vm_refcnt, 0); + vma->vm_lock_seq = UINT_MAX; +} + +static inline bool is_vma_writer_only(int refcnt) +{ + /* + * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma + * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on + * a detached vma happens only in vma_mark_detached() and is a rare + * case, therefore most of the time there will be no unnecessary wakeup. + */ + return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1; +} + +static inline void vma_refcount_put(struct vm_area_struct *vma) +{ + /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ + struct mm_struct *mm = vma->vm_mm; + int oldcnt; + + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); + if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { + + if (is_vma_writer_only(oldcnt - 1)) + rcuwait_wake_up(&mm->vma_writer_wait); + } +} + /* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to * using mmap_lock. The function should never yield false unlocked result. + * False locked result is possible if mm_lock_seq overflows or if vma gets + * reused and attached to a different mm before we lock it. + * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got + * detached. */ -static inline bool vma_start_read(struct vm_area_struct *vma) +static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, + struct vm_area_struct *vma) { + int oldcnt; + /* * Check before locking. A race might cause false locked result. * We can use READ_ONCE() for the mm_lock_seq here, and don't need @@ -696,16 +729,26 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below. */ - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) - return false; + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) + return NULL; - if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) - return false; + /* + * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire() + * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET. + * Acquire fence is required here to avoid reordering against later + * vm_lock_seq check and checks inside lock_vma_under_rcu(). + */ + if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) { + /* return EAGAIN if vma got detached from under us */ + return oldcnt ? NULL : ERR_PTR(-EAGAIN); + } + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); /* - * Overflow might produce false locked result. + * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result. * False unlocked result is impossible because we modify and check - * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq + * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq * modification invalidates all existing locks. * * We must use ACQUIRE semantics for the mm_lock_seq so that if we are @@ -713,18 +756,47 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { - up_read(&vma->vm_lock->lock); - return false; + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { + vma_refcount_put(vma); + return NULL; } + + return vma; +} + +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) +{ + int oldcnt; + + mmap_assert_locked(vma->vm_mm); + if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) + return false; + + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); return true; } +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline bool vma_start_read_locked(struct vm_area_struct *vma) +{ + return vma_start_read_locked_nested(vma, 0); +} + static inline void vma_end_read(struct vm_area_struct *vma) { - rcu_read_lock(); /* keeps vma alive till the end of up_read */ - up_read(&vma->vm_lock->lock); - rcu_read_unlock(); + vma_refcount_put(vma); } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ @@ -740,6 +812,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l return (vma->vm_lock_seq == *mm_lock_seq); } +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq); + /* * Begin writing to a VMA. * Exclude concurrent readers under the per-VMA lock until the currently @@ -752,15 +826,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - down_write(&vma->vm_lock->lock); - /* - * We should use WRITE_ONCE() here because we can have concurrent reads - * from the early lockless pessimistic check in vma_start_read(). - * We don't really care about the correctness of that early check, but - * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. - */ - WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock->lock); + __vma_start_write(vma, mm_lock_seq); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) @@ -772,18 +838,36 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_assert_locked(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_lock->lock)) - vma_assert_write_locked(vma); + unsigned int mm_lock_seq; + + VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 && + !__is_vma_write_locked(vma, &mm_lock_seq), vma); +} + +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ +static inline void vma_assert_attached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); } -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_assert_detached(struct vm_area_struct *vma) { - /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); } +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_detached(vma); + refcount_set_release(&vma->vm_refcnt, 1); +} + +void vma_mark_detached(struct vm_area_struct *vma); + static inline void release_fault_lock(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_VMA_LOCK) @@ -805,14 +889,18 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ -static inline bool vma_start_read(struct vm_area_struct *vma) - { return false; } +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} +static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, + struct vm_area_struct *vma) + { return NULL; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } -static inline void vma_mark_detached(struct vm_area_struct *vma, - bool detached) {} +static inline void vma_assert_attached(struct vm_area_struct *vma) {} +static inline void vma_assert_detached(struct vm_area_struct *vma) {} +static inline void vma_mark_attached(struct vm_area_struct *vma) {} +static inline void vma_mark_detached(struct vm_area_struct *vma) {} static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) @@ -839,18 +927,13 @@ static inline void assert_fault_locked(struct vm_fault *vmf) extern const struct vm_operations_struct vma_dummy_vm_ops; -/* - * WARNING: vma_init does not initialize vma->vm_lock. - * Use vm_area_alloc()/vm_area_free() if vma needs locking. - */ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); - vma_numab_state_init(vma); + vma_lock_init(vma, false); } /* Use when VMA is not part of the VMA tree and needs no locking */ @@ -1043,6 +1126,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } @@ -1083,6 +1167,25 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; +extern void prep_compound_page(struct page *page, unsigned int order); + +static inline unsigned int folio_large_order(const struct folio *folio) +{ + return folio->_flags_1 & 0xff; +} + +#ifdef NR_PAGES_IN_LARGE_FOLIO +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return folio->_nr_pages; +} +#else +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return 1L << folio_large_order(folio); +} +#endif + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -1096,7 +1199,7 @@ static inline unsigned int compound_order(struct page *page) if (!test_bit(PG_head, &folio->flags)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } /** @@ -1112,7 +1215,7 @@ static inline unsigned int folio_order(const struct folio *folio) { if (!folio_test_large(folio)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } #include <linux/huge_mm.h> @@ -1205,6 +1308,8 @@ static inline int is_vmalloc_or_module_addr(const void *x) static inline int folio_entire_mapcount(const struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1)) + return 0; return atomic_read(&folio->_entire_mapcount) + 1; } @@ -1404,25 +1509,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf); * back into memory. */ -#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) -DECLARE_STATIC_KEY_FALSE(devmap_managed_key); - -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs); -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (!static_branch_unlikely(&devmap_managed_key)) - return false; - if (!folio_is_zone_device(folio)) - return false; - return __put_devmap_managed_folio_refs(folio, refs); -} -#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - return false; -} -#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ - /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) @@ -1543,12 +1629,6 @@ static inline void put_page(struct page *page) if (folio_test_slab(folio)) return; - /* - * For some devmap managed pages we need to catch refcount transition - * from 2 to 1: - */ - if (put_devmap_managed_folio_refs(folio, 1)) - return; folio_put(folio); } @@ -1907,6 +1987,13 @@ static inline struct folio *pfn_folio(unsigned long pfn) return page_folio(pfn_to_page(pfn)); } +static inline bool folio_has_pincount(const struct folio *folio) +{ + if (IS_ENABLED(CONFIG_64BIT)) + return folio_test_large(folio); + return folio_order(folio) > 1; +} + /** * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. * @folio: The folio. @@ -1923,7 +2010,7 @@ static inline struct folio *pfn_folio(unsigned long pfn) * get that many refcounts, and b) all the callers of this routine are * expected to be able to deal gracefully with a false positive. * - * For large folios, the result will be exactly correct. That's because + * For most large folios, the result will be exactly correct. That's because * we have more tracking data available: the _pincount field is used * instead of the GUP_PIN_COUNTING_BIAS scheme. * @@ -1934,7 +2021,7 @@ static inline struct folio *pfn_folio(unsigned long pfn) */ static inline bool folio_maybe_dma_pinned(struct folio *folio) { - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) return atomic_read(&folio->_pincount) > 0; /* @@ -2006,6 +2093,13 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio) if (folio_is_device_coherent(folio)) return false; + /* + * Filesystems can only tolerate transient delays to truncate and + * hole-punch operations + */ + if (folio_is_fsdax(folio)) + return false; + /* Otherwise, non-movable zone folios can be pinned. */ return !folio_is_zone_movable(folio); @@ -2049,11 +2143,7 @@ static inline long folio_nr_pages(const struct folio *folio) { if (!folio_test_large(folio)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << (folio->_flags_1 & 0xff); -#endif + return folio_large_nr_pages(folio); } /* Only hugetlbfs can allocate folios larger than MAX_ORDER */ @@ -2068,24 +2158,20 @@ static inline long folio_nr_pages(const struct folio *folio) * page. compound_nr() can be called on a tail page, and is defined to * return 1 in that case. */ -static inline unsigned long compound_nr(struct page *page) +static inline long compound_nr(struct page *page) { struct folio *folio = (struct folio *)page; if (!test_bit(PG_head, &folio->flags)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << (folio->_flags_1 & 0xff); -#endif + return folio_large_nr_pages(folio); } /** * thp_nr_pages - The number of regular pages in this huge page. * @page: The head page of a huge page. */ -static inline int thp_nr_pages(struct page *page) +static inline long thp_nr_pages(struct page *page) { return folio_nr_pages((struct folio *)page); } @@ -2140,23 +2226,18 @@ static inline size_t folio_size(const struct folio *folio) } /** - * folio_likely_mapped_shared - Estimate if the folio is mapped into the page - * tables of more than one MM + * folio_maybe_mapped_shared - Whether the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * This function checks if the folio is currently mapped into more than one - * MM ("mapped shared"), or if the folio is only mapped into a single MM - * ("mapped exclusively"). + * This function checks if the folio maybe currently mapped into more than one + * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single + * MM ("mapped exclusively"). * * For KSM folios, this function also returns "mapped shared" when a folio is * mapped multiple times into the same MM, because the individual page mappings * are independent. * - * As precise information is not easily available for all folios, this function - * estimates the number of MMs ("sharers") that are currently mapping a folio - * using the number of times the first page of the folio is currently mapped - * into page tables. - * * For small anonymous folios and anonymous hugetlb folios, the return * value will be exactly correct: non-KSM folios can only be mapped at most once * into an MM, and they cannot be partially mapped. KSM folios are @@ -2164,8 +2245,8 @@ static inline size_t folio_size(const struct folio *folio) * * For other folios, the result can be fuzzy: * #. For partially-mappable large folios (THP), the return value can wrongly - * indicate "mapped exclusively" (false negative) when the folio is - * only partially mapped into at least one MM. + * indicate "mapped shared" (false positive) if a folio was mapped by + * more than two MMs at one point in time. * #. For pagecache folios (including hugetlb), the return value can wrongly * indicate "mapped shared" (false positive) when two VMAs in the same MM * cover the same file range. @@ -2182,7 +2263,7 @@ static inline size_t folio_size(const struct folio *folio) * * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline bool folio_likely_mapped_shared(struct folio *folio) +static inline bool folio_maybe_mapped_shared(struct folio *folio) { int mapcount = folio_mapcount(folio); @@ -2190,16 +2271,22 @@ static inline bool folio_likely_mapped_shared(struct folio *folio) if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio))) return mapcount > 1; - /* A single mapping implies "mapped exclusively". */ - if (mapcount <= 1) - return false; - - /* If any page is mapped more than once we treat it "mapped shared". */ - if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio)) + /* + * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ... + * simply assume "mapped shared", nobody should really care + * about this for arbitrary kernel allocations. + */ + if (!IS_ENABLED(CONFIG_MM_ID)) return true; - /* Let's guess based on the first subpage. */ - return atomic_read(&folio->_mapcount) > 0; + /* + * A single mapping implies "mapped exclusively", even if the + * folio flag says something different: it's easier to handle this + * case here instead of on the RMAP hot path. + */ + if (mapcount <= 1) + return false; + return folio_test_large_maybe_mapped_shared(folio); } #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE @@ -3179,7 +3266,6 @@ extern void reserve_bootmem_region(phys_addr_t start, /* Free the reserved page into the buddy system, so it gets managed. */ void free_reserved_page(struct page *page); -#define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { @@ -3539,6 +3625,8 @@ int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num); +vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, + bool write); vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, @@ -3817,6 +3905,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) #endif void *sparse_buffer_alloc(unsigned long size); +unsigned long section_map_size(void); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); @@ -3825,7 +3914,8 @@ p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, - struct vmem_altmap *altmap, struct page *reuse); + struct vmem_altmap *altmap, unsigned long ptpfn, + unsigned long flags); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node, @@ -3841,6 +3931,12 @@ int vmemmap_populate_hugepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); +int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); +int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); +void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); void vmemmap_populate_print_last(void); #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, @@ -3907,9 +4003,6 @@ static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap, } #endif -void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, - unsigned long nr_pages); - enum mf_flags { MF_COUNT_INCREASED = 1 << 0, MF_ACTION_REQUIRED = 1 << 1, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 75e8850cec3a..56d07edd01f9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -19,6 +19,7 @@ #include <linux/workqueue.h> #include <linux/seqlock.h> #include <linux/percpu_counter.h> +#include <linux/types.h> #include <asm/mmu.h> @@ -133,8 +134,11 @@ struct page { unsigned long compound_head; /* Bit zero is set */ }; struct { /* ZONE_DEVICE pages */ - /** @pgmap: Points to the hosting device page map. */ - struct dev_pagemap *pgmap; + /* + * The first word is used for compound_head or folio + * pgmap + */ + void *_unused_pgmap_compound_head; void *zone_device_data; /* * ZONE_DEVICE private pages are counted as being @@ -287,6 +291,49 @@ typedef struct { unsigned long val; } swp_entry_t; +#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT) +/* We have some extra room after the refcount in tail pages. */ +#define NR_PAGES_IN_LARGE_FOLIO +#endif + +/* + * On 32bit, we can cut the required metadata in half, because: + * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have, + * so we can limit MM IDs to 15 bit (32767). + * (b) We don't expect folios where even a single complete PTE mapping by + * one MM would exceed 15 bits (order-15). + */ +#ifdef CONFIG_64BIT +typedef int mm_id_mapcount_t; +#define MM_ID_MAPCOUNT_MAX INT_MAX +typedef unsigned int mm_id_t; +#else /* !CONFIG_64BIT */ +typedef short mm_id_mapcount_t; +#define MM_ID_MAPCOUNT_MAX SHRT_MAX +typedef unsigned short mm_id_t; +#endif /* CONFIG_64BIT */ + +/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */ +#define MM_ID_DUMMY 0 +#define MM_ID_MIN (MM_ID_DUMMY + 1) + +/* + * We leave the highest bit of each MM id unused, so we can store a flag + * in the highest bit of each folio->_mm_id[]. + */ +#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1) +#define MM_ID_MASK ((1U << MM_ID_BITS) - 1) +#define MM_ID_MAX MM_ID_MASK + +/* + * In order to use bit_spin_lock(), which requires an unsigned long, we + * operate on folio->_mm_ids when working on flags. + */ +#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS +#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM) +#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1) +#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM) + /** * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. @@ -296,6 +343,8 @@ typedef struct { * anonymous memory. * @index: Offset within the file, in units of pages. For anonymous memory, * this is the index from the beginning of the mmap. + * @share: number of DAX mappings that reference this folio. See + * dax_associate_entry. * @private: Filesystem per-folio data (see folio_attach_private()). * @swap: Used for swp_entry_t if folio_test_swapcache(). * @_mapcount: Do not access this member directly. Use folio_mapcount() to @@ -303,13 +352,17 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @pgmap: Metadata for ZONE_DEVICE mappings * @virtual: Virtual address in the kernel direct map. * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_large_mapcount: Do not use directly, call folio_mapcount(). * @_nr_pages_mapped: Do not use outside of rmap and debug code. * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). - * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). + * @_nr_pages: Do not use directly, call folio_nr_pages(). + * @_mm_id: Do not use outside of rmap code. + * @_mm_ids: Do not use outside of rmap code. + * @_mm_id_mapcount: Do not use outside of rmap code. * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h. * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. @@ -341,9 +394,13 @@ struct folio { /* private: */ }; /* public: */ + struct dev_pagemap *pgmap; }; struct address_space *mapping; - pgoff_t index; + union { + pgoff_t index; + unsigned long share; + }; union { void *private; swp_entry_t swap; @@ -369,14 +426,30 @@ struct folio { struct { unsigned long _flags_1; unsigned long _head_1; + union { + struct { /* public: */ - atomic_t _large_mapcount; - atomic_t _entire_mapcount; - atomic_t _nr_pages_mapped; - atomic_t _pincount; + atomic_t _large_mapcount; + atomic_t _nr_pages_mapped; #ifdef CONFIG_64BIT - unsigned int _folio_nr_pages; -#endif + atomic_t _entire_mapcount; + atomic_t _pincount; +#endif /* CONFIG_64BIT */ + mm_id_mapcount_t _mm_id_mapcount[2]; + union { + mm_id_t _mm_id[2]; + unsigned long _mm_ids; + }; + /* private: the union with struct page is transitional */ + }; + unsigned long _usable_1[4]; + }; + atomic_t _mapcount_1; + atomic_t _refcount_1; + /* public: */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + unsigned int _nr_pages; +#endif /* NR_PAGES_IN_LARGE_FOLIO */ /* private: the union with struct page is transitional */ }; struct page __page_1; @@ -386,20 +459,27 @@ struct folio { unsigned long _flags_2; unsigned long _head_2; /* public: */ - void *_hugetlb_subpool; - void *_hugetlb_cgroup; - void *_hugetlb_cgroup_rsvd; - void *_hugetlb_hwpoison; + struct list_head _deferred_list; +#ifndef CONFIG_64BIT + atomic_t _entire_mapcount; + atomic_t _pincount; +#endif /* !CONFIG_64BIT */ /* private: the union with struct page is transitional */ }; + struct page __page_2; + }; + union { struct { - unsigned long _flags_2a; - unsigned long _head_2a; + unsigned long _flags_3; + unsigned long _head_3; /* public: */ - struct list_head _deferred_list; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; /* private: the union with struct page is transitional */ }; - struct page __page_2; + struct page __page_3; }; }; @@ -428,14 +508,20 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid); offsetof(struct page, pg) + sizeof(struct page)) FOLIO_MATCH(flags, _flags_1); FOLIO_MATCH(compound_head, _head_1); +FOLIO_MATCH(_mapcount, _mapcount_1); +FOLIO_MATCH(_refcount, _refcount_1); #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ offsetof(struct page, pg) + 2 * sizeof(struct page)) FOLIO_MATCH(flags, _flags_2); FOLIO_MATCH(compound_head, _head_2); -FOLIO_MATCH(flags, _flags_2a); -FOLIO_MATCH(compound_head, _head_2a); +#undef FOLIO_MATCH +#define FOLIO_MATCH(pg, fl) \ + static_assert(offsetof(struct folio, fl) == \ + offsetof(struct page, pg) + 3 * sizeof(struct page)) +FOLIO_MATCH(flags, _flags_3); +FOLIO_MATCH(compound_head, _head_3); #undef FOLIO_MATCH /** @@ -578,6 +664,12 @@ static inline void *folio_get_private(struct folio *folio) typedef unsigned long vm_flags_t; /* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + +/* * A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that * map parts of them. @@ -633,9 +725,8 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) } #endif -struct vma_lock { - struct rw_semaphore lock; -}; +#define VMA_LOCK_OFFSET 0x40000000 +#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1) struct vma_numab_state { /* @@ -681,6 +772,9 @@ struct vma_numab_state { * * Only explicitly marked struct members may be accessed by RCU readers before * getting a stable reference. + * + * WARNING: when adding new members, please update vm_area_init_from() to copy + * them during vm_area_struct content duplication. */ struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -691,9 +785,7 @@ struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; }; -#ifdef CONFIG_PER_VMA_LOCK - struct rcu_head vm_rcu; /* Used for deferred freeing. */ -#endif + freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ }; /* @@ -714,18 +806,12 @@ struct vm_area_struct { #ifdef CONFIG_PER_VMA_LOCK /* - * Flag to indicate areas detached from the mm->mm_mt tree. - * Unstable RCU readers are allowed to read this. - */ - bool detached; - - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -734,20 +820,7 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - /* Unstable RCU readers are allowed to read this. */ - struct vma_lock *vm_lock; #endif - - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. - * - */ - struct { - struct rb_node rb; - unsigned long rb_subtree_last; - } shared; - /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma @@ -767,14 +840,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_ANON_VMA_NAME - /* - * For private and shared anonymous mappings, a pointer to a null - * terminated string containing the name given to the vma, or NULL if - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. - */ - struct anon_vma_name *anon_name; -#endif #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif @@ -787,6 +852,30 @@ struct vm_area_struct { #ifdef CONFIG_NUMA_BALANCING struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + refcount_t vm_refcnt ____cacheline_aligned_in_smp; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map vmlock_dep_map; +#endif +#endif + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + * + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; +#ifdef CONFIG_ANON_VMA_NAME + /* + * For private and shared anonymous mappings, a pointer to a null + * terminated string containing the name given to the vma, or NULL if + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. + */ + struct anon_vma_name *anon_name; +#endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; @@ -922,6 +1011,7 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + struct rcuwait vma_writer_wait; /* * This field has lock-like semantics, meaning it is sometimes * accessed with ACQUIRE/RELEASE semantics. @@ -1074,6 +1164,9 @@ struct mm_struct { #endif } lru_gen; #endif /* CONFIG_LRU_GEN_WALKS_MMU */ +#ifdef CONFIG_MM_ID + mm_id_t mm_id; +#endif /* CONFIG_MM_ID */ } __randomize_layout; /* diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 45a21faa3ff6..4706c6769902 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -122,12 +122,6 @@ static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int #endif /* CONFIG_PER_VMA_LOCK */ -static inline void mmap_init_lock(struct mm_struct *mm) -{ - init_rwsem(&mm->mmap_lock); - mm_lock_seqcount_init(mm); -} - static inline void mmap_write_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, true); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index e2dd57ca368b..bc2402a45741 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -43,10 +43,10 @@ struct mmu_interval_notifier; * a device driver to possibly ignore the invalidation if the * owner field matches the driver's device private pgmap owner. * - * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no - * longer have exclusive access to the page. When sent during creation of an - * exclusive range the owner will be initialised to the value provided by the - * caller of make_device_exclusive_range(), otherwise the owner will be NULL. + * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive. + * The owner is initialized to the value provided by the caller of + * make_device_exclusive(), such that this caller can filter out these + * events. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e16939553930..25e80b2ca7f4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -138,6 +138,7 @@ enum numa_stat_item { enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, + NR_FREE_PAGES_BLOCKS, NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, NR_ZONE_ACTIVE_ANON, @@ -220,9 +221,11 @@ enum node_stat_item { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + PGDEMOTE_PROACTIVE, #ifdef CONFIG_HUGETLB_PAGE NR_HUGETLB, #endif + NR_BALLOON_PAGES, NR_VM_NODE_STAT_ITEMS }; @@ -1161,6 +1164,12 @@ static inline bool is_zone_device_page(const struct page *page) return page_zonenum(page) == ZONE_DEVICE; } +static inline struct dev_pagemap *page_pgmap(const struct page *page) +{ + VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page); + return page_folio(page)->pgmap; +} + /* * Consecutive zone device pages should not be merged into the same sgl * or bvec segment with other types of pages or if they belong to different @@ -1176,7 +1185,7 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a, return false; if (!is_zone_device_page(a)) return true; - return a->pgmap == b->pgmap; + return page_pgmap(a) == page_pgmap(b); } extern void memmap_init_zone_device(struct zone *, unsigned long, @@ -1191,6 +1200,10 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a, { return true; } +static inline struct dev_pagemap *page_pgmap(const struct page *page) +{ + return NULL; +} #endif static inline bool folio_is_zone_device(const struct folio *folio) @@ -1937,6 +1950,9 @@ enum { #ifdef CONFIG_ZONE_DEVICE SECTION_TAINT_ZONE_DEVICE_BIT, #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT + SECTION_IS_VMEMMAP_PREINIT_BIT, +#endif SECTION_MAP_LAST_BIT, }; @@ -1947,6 +1963,9 @@ enum { #ifdef CONFIG_ZONE_DEVICE #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT) +#endif #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT @@ -2001,6 +2020,30 @@ static inline int online_device_section(struct mem_section *section) } #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +static inline int preinited_vmemmap_section(struct mem_section *section) +{ + return (section && + (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); +} + +void sparse_vmemmap_init_nid_early(int nid); +void sparse_vmemmap_init_nid_late(int nid); + +#else +static inline int preinited_vmemmap_section(struct mem_section *section) +{ + return 0; +} +static inline void sparse_vmemmap_init_nid_early(int nid) +{ +} + +static inline void sparse_vmemmap_init_nid_late(int nid) +{ +} +#endif + static inline int online_section_nr(unsigned long nr) { return online_section(__nr_to_section(nr)); @@ -2038,6 +2081,9 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) } #endif +void sparse_init_early_section(int nid, struct page *map, unsigned long pnum, + unsigned long flags); + #ifndef CONFIG_HAVE_ARCH_PFN_VALID /** * pfn_valid - check if there is a valid memory map entry for a PFN @@ -2100,6 +2146,11 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr) return -1; } +#define for_each_present_section_nr(start, section_nr) \ + for (section_nr = next_present_section_nr(start - 1); \ + section_nr != -1; \ + section_nr = next_present_section_nr(section_nr)) + /* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate @@ -2119,6 +2170,8 @@ void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) +#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0) +#define sparse_vmemmap_init_nid_late(_nid) do {} while (0) #define pfn_in_present_section pfn_valid #define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index df9234e5f478..5bd9492a66ee 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -673,12 +673,6 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) -/* - * Different with flags above, this flag is used only for fsdax mode. It - * indicates that this page->mapping is now under reflink case. - */ -#define PAGE_MAPPING_DAX_SHARED ((void *)0x1) - static __always_inline bool folio_mapping_flags(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; @@ -1106,6 +1100,12 @@ static inline bool is_page_hwpoison(const struct page *page) return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } +static inline bool folio_contain_hwpoisoned_page(struct folio *folio) +{ + return folio_test_hwpoison(folio) || + (folio_test_large(folio) && folio_test_has_hwpoisoned(folio)); +} + bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); @@ -1193,6 +1193,10 @@ static inline int folio_has_private(const struct folio *folio) return !!(folio->flags & PAGE_FLAGS_PRIVATE); } +static inline bool folio_test_large_maybe_mapped_shared(const struct folio *folio) +{ + return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids); +} #undef PF_ANY #undef PF_HEAD #undef PF_NO_TAIL diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 46406f3fe34d..d649b6bbbc87 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -9,10 +9,12 @@ struct page_counter { /* - * Make sure 'usage' does not share cacheline with any other field. The - * memcg->memory.usage is a hot member of struct mem_cgroup. + * Make sure 'usage' does not share cacheline with any other field in + * v2. The memcg->memory.usage is a hot member of struct mem_cgroup. */ atomic_long_t usage; + unsigned long failcnt; /* v1-only field */ + CACHELINE_PADDING(_pad1_); /* effective memory.min and memory.min usage tracking */ @@ -28,12 +30,12 @@ struct page_counter { unsigned long watermark; /* Latest cg2 reset watermark */ unsigned long local_watermark; - unsigned long failcnt; /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); bool protection_support; + bool track_failcnt; unsigned long min; unsigned long low; unsigned long high; @@ -58,6 +60,7 @@ static inline void page_counter_init(struct page_counter *counter, counter->max = PAGE_COUNTER_MAX; counter->parent = parent; counter->protection_support = protection_support; + counter->track_failcnt = false; } static inline unsigned long page_counter_read(struct page_counter *counter) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e4b48a0dda24..76c817162d2f 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #define __LINUX_PAGE_EXT_H #include <linux/types.h> +#include <linux/mmzone.h> #include <linux/stacktrace.h> struct pglist_data; @@ -69,16 +70,31 @@ extern void page_ext_init(void); static inline void page_ext_init_flatmem_late(void) { } + +static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) +{ + /* + * page_ext is allocated per memory section. Once we cross a + * memory section, we have to fetch the new pointer. + */ + return next_pfn % PAGES_PER_SECTION; +} #else extern void page_ext_init_flatmem(void); extern void page_ext_init_flatmem_late(void); static inline void page_ext_init(void) { } + +static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) +{ + return true; +} #endif extern struct page_ext *page_ext_get(const struct page *page); extern void page_ext_put(struct page_ext *page_ext); +extern struct page_ext *page_ext_lookup(unsigned long pfn); static inline void *page_ext_data(struct page_ext *page_ext, struct page_ext_operations *ops) @@ -93,6 +109,83 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr) return next; } +struct page_ext_iter { + unsigned long index; + unsigned long start_pfn; + struct page_ext *page_ext; +}; + +/** + * page_ext_iter_begin() - Prepare for iterating through page extensions. + * @iter: page extension iterator. + * @pfn: PFN of the page we're interested in. + * + * Must be called with RCU read lock taken. + * + * Return: NULL if no page_ext exists for this page. + */ +static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter, + unsigned long pfn) +{ + iter->index = 0; + iter->start_pfn = pfn; + iter->page_ext = page_ext_lookup(pfn); + + return iter->page_ext; +} + +/** + * page_ext_iter_next() - Get next page extension + * @iter: page extension iterator. + * + * Must be called with RCU read lock taken. + * + * Return: NULL if no next page_ext exists. + */ +static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter) +{ + unsigned long pfn; + + if (WARN_ON_ONCE(!iter->page_ext)) + return NULL; + + iter->index++; + pfn = iter->start_pfn + iter->index; + + if (page_ext_iter_next_fast_possible(pfn)) + iter->page_ext = page_ext_next(iter->page_ext); + else + iter->page_ext = page_ext_lookup(pfn); + + return iter->page_ext; +} + +/** + * page_ext_iter_get() - Get current page extension + * @iter: page extension iterator. + * + * Return: NULL if no page_ext exists for this iterator. + */ +static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter) +{ + return iter->page_ext; +} + +/** + * for_each_page_ext(): iterate through page_ext objects. + * @__page: the page we're interested in + * @__pgcount: how many pages to iterate through + * @__page_ext: struct page_ext pointer where the current page_ext + * object is returned + * @__iter: struct page_ext_iter object (defined in the stack) + * + * IMPORTANT: must be called with RCU read lock taken. + */ +#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \ + for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\ + __page_ext && __iter.index < __pgcount; \ + __page_ext = page_ext_iter_next(&__iter)) + #else /* !CONFIG_PAGE_EXTENSION */ struct page_ext; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7661be85136c..26baa78f1ca7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -536,26 +536,6 @@ struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); /** - * folio_file_mapping - Find the mapping this folio belongs to. - * @folio: The folio. - * - * For folios which are in the page cache, return the mapping that this - * page belongs to. Folios in the swap cache return the mapping of the - * swap file or swap device where the data is stored. This is different - * from the mapping returned by folio_mapping(). The only reason to - * use it is if, like NFS, you return 0 from ->activate_swapfile. - * - * Do not call this for folios which aren't in the page cache or swap cache. - */ -static inline struct address_space *folio_file_mapping(struct folio *folio) -{ - if (unlikely(folio_test_swapcache(folio))) - return swapcache_mapping(folio); - - return folio->mapping; -} - -/** * folio_flush_mapping - Find the file mapping this folio belongs to. * @folio: The folio. * @@ -575,11 +555,6 @@ static inline struct address_space *folio_flush_mapping(struct folio *folio) return folio_mapping(folio); } -static inline struct address_space *page_file_mapping(struct page *page) -{ - return folio_file_mapping(page_folio(page)); -} - /** * folio_inode - Get the host inode for this folio. * @folio: The folio. diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index ac8c44dd8237..c5e9cac0575e 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -33,7 +33,7 @@ struct disk_stats { #define part_stat_read(part, field) \ ({ \ - typeof((part)->bd_stats->field) res = 0; \ + TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \ unsigned int _cpu; \ for_each_possible_cpu(_cpu) \ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 0fcacb909778..0aeb0e276a3e 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -222,7 +222,7 @@ do { \ } while (0) #define PERCPU_PTR(__p) \ - (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) + (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p)) #ifdef CONFIG_SMP @@ -318,7 +318,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return(stem, variable) \ ({ \ - typeof(variable) pscr_ret__; \ + TYPEOF_UNQUAL(variable) pscr_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr_ret__ = stem##1(variable); break; \ @@ -333,7 +333,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return2(stem, variable, ...) \ ({ \ - typeof(variable) pscr2_ret__; \ + TYPEOF_UNQUAL(variable) pscr2_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 3469c4b20105..c74077977830 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -162,74 +162,32 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code } } -static inline void clear_page_tag_ref(struct page *page) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - set_codetag_empty(&ref); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} - -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page); -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +static inline void clear_page_tag_ref(struct page *page) { - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub(&ref, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } + if (mem_alloc_profiling_enabled()) + __clear_page_tag_ref(page); } -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) { struct alloc_tag *tag = NULL; - - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub_check(&ref); - if (ref.ct) - tag = ct_to_alloc_tag(ref.ct); - put_page_tag_ref(handle); - } + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub_check(&ref); + if (ref.ct) + tag = ct_to_alloc_tag(ref.ct); + put_page_tag_ref(handle); } return tag; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) -{ - if (mem_alloc_profiling_enabled() && tag) - this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); -} - void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); void pgalloc_tag_swap(struct folio *new, struct folio *old); @@ -238,11 +196,6 @@ void __init alloc_tag_sec_init(void); #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void clear_page_tag_ref(struct page *page) {} -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) {} -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 4c107e17c547..e2b705c14945 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -222,10 +222,14 @@ static inline int pmd_dirty(pmd_t pmd) * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. + * up to date. + * + * In the general case, no lock is guaranteed to be held between entry and exit + * of the lazy mode. So the implementation must assume preemption may be enabled + * and cpu migration is possible; it must take steps to be robust against this. + * (In practice, for user PTE updates, the appropriate page table lock(s) are + * held, but for kernel PTE updates, no lock is held). Nesting is not permitted + * and the mode cannot be used in interrupt context. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) @@ -287,7 +291,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, { page_table_check_ptes_set(mm, ptep, pte, nr); - arch_enter_lazy_mmu_mode(); for (;;) { set_pte(ptep, pte); if (--nr == 0) @@ -295,7 +298,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, ptep++; pte = pte_next_pfn(pte); } - arch_leave_lazy_mmu_mode(); } #endif #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 27343424225c..9ad134a04b41 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -4,18 +4,7 @@ #include <linux/rcupdate.h> #include <linux/sched/signal.h> - -/* - * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner. - * - * The only time @task is non-nil is when a user is blocked (or - * checking if it needs to) on a condition, and reset as soon as we - * know that the condition has succeeded and are awoken. - */ -struct rcuwait { - struct task_struct __rcu *task; -}; +#include <linux/types.h> #define __RCUWAIT_INITIALIZER(name) \ { .task = NULL, } diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 35f039ecb272..80dc023ac2bf 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -87,6 +87,15 @@ * The decrements dec_and_test() and sub_and_test() also provide acquire * ordering on success. * + * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide + * acquire and release ordering for cases when the memory occupied by the + * object might be reused to store another object. This is important for the + * cases where secondary validation is required to detect such reuse, e.g. + * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after + * the refcount is taken, hence acquire order is necessary. Similarly, when the + * object is initialized, all stores to its attributes should be visible before + * the refcount is set, otherwise a stale attribute value might be used by + * another task which succeeds in taking a refcount to the new object. */ #ifndef _LINUX_REFCOUNT_H @@ -126,6 +135,31 @@ static inline void refcount_set(refcount_t *r, int n) } /** + * refcount_set_release - set a refcount's value with release ordering + * @r: the refcount + * @n: value to which the refcount will be set + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides release memory ordering which will order previous memory operations + * against this store. This ensures all updates to this object are visible + * once the refcount is set and stale values from the object previously + * occupying this memory are overwritten with new ones. + * + * This function should be called only after new object is fully initialized. + * After this call the object should be considered visible to other tasks even + * if it was not yet added into an object collection normally used to discover + * it. This is because other tasks might have discovered the object previously + * occupying the same memory and after memory reuse they can succeed in taking + * refcount to the new object and start using it. + */ +static inline void refcount_set_release(refcount_t *r, int n) +{ + atomic_set_release(&r->refs, n); +} + +/** * refcount_read - get a refcount's value * @r: the refcount * @@ -178,6 +212,71 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) return __refcount_add_not_zero(i, r, NULL); } +static inline __must_check __signed_wrap +bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp, + int limit) +{ + int old = refcount_read(r); + + do { + if (!old) + break; + + if (i > limit - old) { + if (oldp) + *oldp = old; + return false; + } + } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i)); + + if (oldp) + *oldp = old; + + if (unlikely(old < 0 || old + i < 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); + + return old; +} + +static inline __must_check bool +__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit) +{ + return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit); +} + +static inline __must_check __signed_wrap +bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX); +} + +/** + * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0 + * + * @i: the value to add to the refcount + * @r: the refcount + * + * Will saturate at REFCOUNT_SATURATED and WARN. + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides acquire memory ordering on success, it is assumed the caller has + * guaranteed the object memory to be stable (RCU, etc.). It does provide a + * control dependency and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc_not_zero_acquire() should instead be used to increment a + * reference count. + * + * Return: false if the passed refcount is 0, true otherwise + */ +static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r) +{ + return __refcount_add_not_zero_acquire(i, r, NULL); +} + static inline __signed_wrap void __refcount_add(int i, refcount_t *r, int *oldp) { @@ -236,6 +335,32 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) return __refcount_inc_not_zero(r, NULL); } +static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero_acquire(1, r, oldp); +} + +/** + * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0 + * @r: the refcount to increment + * + * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on + * success. + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides acquire memory ordering on success, it is assumed the caller has + * guaranteed the object memory to be stable (RCU, etc.). It does provide a + * control dependency and thereby orders future stores. See the comment on top. + * + * Return: true if the increment was successful, false otherwise + */ +static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r) +{ + return __refcount_inc_not_zero_acquire(r, NULL); +} + static inline void __refcount_inc(refcount_t *r, int *oldp) { __refcount_add(1, r, oldp); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 683a04088f3f..6b82b618846e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -13,6 +13,7 @@ #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/memremap.h> +#include <linux/bit_spinlock.h> /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -173,6 +174,214 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, struct anon_vma *folio_get_anon_vma(const struct folio *folio); +#ifdef CONFIG_MM_ID +static __always_inline void folio_lock_large_mapcount(struct folio *folio) +{ + bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); +} + +static __always_inline void folio_unlock_large_mapcount(struct folio *folio) +{ + __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); +} + +static inline unsigned int folio_mm_id(const struct folio *folio, int idx) +{ + VM_WARN_ON_ONCE(idx != 0 && idx != 1); + return folio->_mm_id[idx] & MM_ID_MASK; +} + +static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id) +{ + VM_WARN_ON_ONCE(idx != 0 && idx != 1); + folio->_mm_id[idx] &= ~MM_ID_MASK; + folio->_mm_id[idx] |= id; +} + +static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio, + int diff, mm_id_t mm_id) +{ + VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio)); + VM_WARN_ON_ONCE(diff <= 0); + VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX); + + /* + * Make sure we can detect at least one complete PTE mapping of the + * folio in a single MM as "exclusively mapped". This is primarily + * a check on 32bit, where we currently reduce the size of the per-MM + * mapcount to a short. + */ + VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio)); + VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX); + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY && + folio->_mm_id_mapcount[0] != -1); + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY && + folio->_mm_id_mapcount[0] < 0); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY && + folio->_mm_id_mapcount[1] != -1); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY && + folio->_mm_id_mapcount[1] < 0); + VM_WARN_ON_ONCE(!folio_mapped(folio) && + folio_test_large_maybe_mapped_shared(folio)); +} + +static __always_inline void folio_set_large_mapcount(struct folio *folio, + int mapcount, struct vm_area_struct *vma) +{ + __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id); + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY); + + /* Note: mapcounts start at -1. */ + atomic_set(&folio->_large_mapcount, mapcount - 1); + folio->_mm_id_mapcount[0] = mapcount - 1; + folio_set_mm_id(folio, 0, vma->vm_mm->mm_id); +} + +static __always_inline int folio_add_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + const mm_id_t mm_id = vma->vm_mm->mm_id; + int new_mapcount_val; + + folio_lock_large_mapcount(folio); + __folio_large_mapcount_sanity_checks(folio, diff, mm_id); + + new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff; + atomic_set(&folio->_large_mapcount, new_mapcount_val); + + /* + * If a folio is mapped more than once into an MM on 32bit, we + * can in theory overflow the per-MM mapcount (although only for + * fairly large folios), turning it negative. In that case, just + * free up the slot and mark the folio "mapped shared", otherwise + * we might be in trouble when unmapping pages later. + */ + if (folio_mm_id(folio, 0) == mm_id) { + folio->_mm_id_mapcount[0] += diff; + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) { + folio->_mm_id_mapcount[0] = -1; + folio_set_mm_id(folio, 0, MM_ID_DUMMY); + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + } else if (folio_mm_id(folio, 1) == mm_id) { + folio->_mm_id_mapcount[1] += diff; + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) { + folio->_mm_id_mapcount[1] = -1; + folio_set_mm_id(folio, 1, MM_ID_DUMMY); + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) { + folio_set_mm_id(folio, 0, mm_id); + folio->_mm_id_mapcount[0] = diff - 1; + /* We might have other mappings already. */ + if (new_mapcount_val != diff - 1) + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) { + folio_set_mm_id(folio, 1, mm_id); + folio->_mm_id_mapcount[1] = diff - 1; + /* Slot 0 certainly has mappings as well. */ + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + folio_unlock_large_mapcount(folio); + return new_mapcount_val + 1; +} +#define folio_add_large_mapcount folio_add_return_large_mapcount + +static __always_inline int folio_sub_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + const mm_id_t mm_id = vma->vm_mm->mm_id; + int new_mapcount_val; + + folio_lock_large_mapcount(folio); + __folio_large_mapcount_sanity_checks(folio, diff, mm_id); + + new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff; + atomic_set(&folio->_large_mapcount, new_mapcount_val); + + /* + * There are valid corner cases where we might underflow a per-MM + * mapcount (some mappings added when no slot was free, some mappings + * added once a slot was free), so we always set it to -1 once we go + * negative. + */ + if (folio_mm_id(folio, 0) == mm_id) { + folio->_mm_id_mapcount[0] -= diff; + if (folio->_mm_id_mapcount[0] >= 0) + goto out; + folio->_mm_id_mapcount[0] = -1; + folio_set_mm_id(folio, 0, MM_ID_DUMMY); + } else if (folio_mm_id(folio, 1) == mm_id) { + folio->_mm_id_mapcount[1] -= diff; + if (folio->_mm_id_mapcount[1] >= 0) + goto out; + folio->_mm_id_mapcount[1] = -1; + folio_set_mm_id(folio, 1, MM_ID_DUMMY); + } + + /* + * If one MM slot owns all mappings, the folio is mapped exclusively. + * Note that if the folio is now unmapped (new_mapcount_val == -1), both + * slots must be free (mapcount == -1), and we'll also mark it as + * exclusive. + */ + if (folio->_mm_id_mapcount[0] == new_mapcount_val || + folio->_mm_id_mapcount[1] == new_mapcount_val) + folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT; +out: + folio_unlock_large_mapcount(folio); + return new_mapcount_val + 1; +} +#define folio_sub_large_mapcount folio_sub_return_large_mapcount +#else /* !CONFIG_MM_ID */ +/* + * See __folio_rmap_sanity_checks(), we might map large folios even without + * CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now. + */ +static inline void folio_set_large_mapcount(struct folio *folio, int mapcount, + struct vm_area_struct *vma) +{ + /* Note: mapcounts start at -1. */ + atomic_set(&folio->_large_mapcount, mapcount - 1); +} + +static inline void folio_add_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + atomic_add(diff, &folio->_large_mapcount); +} + +static inline int folio_add_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + BUILD_BUG(); +} + +static inline void folio_sub_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + atomic_sub(diff, &folio->_large_mapcount); +} + +static inline int folio_sub_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + BUILD_BUG(); +} +#endif /* CONFIG_MM_ID */ + +#define folio_inc_large_mapcount(folio, vma) \ + folio_add_large_mapcount(folio, 1, vma) +#define folio_inc_return_large_mapcount(folio, vma) \ + folio_add_return_large_mapcount(folio, 1, vma) +#define folio_dec_large_mapcount(folio, vma) \ + folio_sub_large_mapcount(folio, 1, vma) +#define folio_dec_return_large_mapcount(folio, vma) \ + folio_sub_return_large_mapcount(folio, 1, vma) + /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; @@ -192,6 +401,7 @@ typedef int __bitwise rmap_t; enum rmap_level { RMAP_LEVEL_PTE = 0, RMAP_LEVEL_PMD, + RMAP_LEVEL_PUD, }; static inline void __folio_rmap_sanity_checks(const struct folio *folio, @@ -228,6 +438,14 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio, VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); break; + case RMAP_LEVEL_PUD: + /* + * Assume that we are creating a single "entire" mapping of the + * folio. + */ + VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio); + VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio); + break; default: VM_WARN_ON_ONCE(true); } @@ -251,12 +469,16 @@ void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, folio_add_file_rmap_ptes(folio, page, 1, vma) void folio_add_file_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); +void folio_add_file_rmap_pud(struct folio *, struct page *, + struct vm_area_struct *); void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_remove_rmap_pte(folio, page, vma) \ folio_remove_rmap_ptes(folio, page, 1, vma) void folio_remove_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); +void folio_remove_rmap_pud(struct folio *, struct page *, + struct vm_area_struct *); void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); @@ -322,7 +544,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio) } static __always_inline void __folio_dup_file_rmap(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + enum rmap_level level) { const int orig_nr_pages = nr_pages; @@ -335,14 +558,17 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, break; } - do { - atomic_inc(&page->_mapcount); - } while (page++, --nr_pages > 0); - atomic_add(orig_nr_pages, &folio->_large_mapcount); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { + do { + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + } + folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: atomic_inc(&folio->_entire_mapcount); - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, dst_vma); break; } } @@ -352,45 +578,47 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, * @folio: The folio to duplicate the mappings of * @page: The first page to duplicate the mappings of * @nr_pages: The number of pages of which the mapping will be duplicated + * @dst_vma: The destination vm area * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ static inline void folio_dup_file_rmap_ptes(struct folio *folio, - struct page *page, int nr_pages) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma) { - __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE); } static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, - struct page *page) + struct page *page, struct vm_area_struct *dst_vma) { - __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE); } /** * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio * @folio: The folio to duplicate the mapping of * @page: The first page to duplicate the mapping of + * @dst_vma: The destination vm area * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ static inline void folio_dup_file_rmap_pmd(struct folio *folio, - struct page *page) + struct page *page, struct vm_area_struct *dst_vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE); #else WARN_ON_ONCE(true); #endif } static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, - struct page *page, int nr_pages, struct vm_area_struct *src_vma, - enum rmap_level level) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, enum rmap_level level) { const int orig_nr_pages = nr_pages; bool maybe_pinned; @@ -432,18 +660,20 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, do { if (PageAnonExclusive(page)) ClearPageAnonExclusive(page); - atomic_inc(&page->_mapcount); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); - atomic_add(orig_nr_pages, &folio->_large_mapcount); + folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: if (PageAnonExclusive(page)) { if (unlikely(maybe_pinned)) return -EBUSY; ClearPageAnonExclusive(page); } atomic_inc(&folio->_entire_mapcount); - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, dst_vma); break; } return 0; @@ -455,6 +685,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, * @folio: The folio to duplicate the mappings of * @page: The first page to duplicate the mappings of * @nr_pages: The number of pages of which the mapping will be duplicated + * @dst_vma: The destination vm area * @src_vma: The vm area from which the mappings are duplicated * * The page range of the folio is defined by [page, page + nr_pages) @@ -473,16 +704,18 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. */ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, - struct page *page, int nr_pages, struct vm_area_struct *src_vma) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { - return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, - RMAP_LEVEL_PTE); + return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma, + src_vma, RMAP_LEVEL_PTE); } static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, - struct page *page, struct vm_area_struct *src_vma) + struct page *page, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { - return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, + return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, RMAP_LEVEL_PTE); } @@ -491,6 +724,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, * of a folio * @folio: The folio to duplicate the mapping of * @page: The first page to duplicate the mapping of + * @dst_vma: The destination vm area * @src_vma: The vm area from which the mapping is duplicated * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) @@ -509,11 +743,12 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. */ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, - struct page *page, struct vm_area_struct *src_vma) + struct page *page, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, - RMAP_LEVEL_PMD); + return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma, + src_vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); return -EBUSY; @@ -663,9 +898,8 @@ int folio_referenced(struct folio *, int is_locked, void try_to_migrate(struct folio *folio, enum ttu_flags flags); void try_to_unmap(struct folio *, enum ttu_flags flags); -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, - unsigned long end, struct page **pages, - void *arg); +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, + void *owner, struct folio **foliop); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) @@ -739,6 +973,9 @@ unsigned long page_address_in_vma(const struct folio *folio, */ int folio_mkclean(struct folio *); +int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, + unsigned long pfn, unsigned long nr_pages); + int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma); diff --git a/include/linux/slab.h b/include/linux/slab.h index 98e07e9e9e58..d5a8ab98035c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -137,6 +137,15 @@ enum _slab_flag_bits { * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. * + * Note that object identity check has to be done *after* acquiring a + * reference, therefore user has to ensure proper ordering for loads. + * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU, + * the newly allocated object has to be fully initialized *before* its + * refcount gets initialized and proper ordering for stores is required. + * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are + * designed with the proper fences required for reference counting objects + * allocated with SLAB_TYPESAFE_BY_RCU. + * * Note that it is not possible to acquire a lock within a structure * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages @@ -236,12 +245,6 @@ enum _slab_flag_bits { #endif /* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - -/* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. diff --git a/include/linux/swap.h b/include/linux/swap.h index a98c757400fe..db46b25a65ae 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -24,7 +24,6 @@ struct pagevec; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff -#define SWAP_FLAG_PRIO_SHIFT 0 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ @@ -74,14 +73,13 @@ static inline int current_is_kswapd(void) * to a special SWP_DEVICE_{READ|WRITE} entry. * * When a page is mapped by the device for exclusive access we set the CPU page - * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. + * table entries to a special SWP_DEVICE_EXCLUSIVE entry. */ #ifdef CONFIG_DEVICE_PRIVATE -#define SWP_DEVICE_NUM 4 +#define SWP_DEVICE_NUM 3 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) -#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) -#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) +#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) #else #define SWP_DEVICE_NUM 0 #endif @@ -286,12 +284,10 @@ enum swap_cluster_flags { #endif /* - * We assign a cluster to each CPU, so each CPU can allocate swap entry from - * its own cluster and swapout sequentially. The purpose is to optimize swapout - * throughput. + * We keep using same cluster for rotational device so IO will be sequential. + * The purpose is to optimize SWAP throughput on these device. */ -struct percpu_cluster { - local_lock_t lock; /* Protect the percpu_cluster above */ +struct swap_sequential_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; @@ -317,8 +313,7 @@ struct swap_info_struct { atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ - struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ - struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */ + struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ @@ -461,7 +456,6 @@ void free_pages_and_swap_cache(struct encoded_page **, int); extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern atomic_t nr_rotate_swap; -extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) @@ -475,24 +469,22 @@ static inline long get_nr_swap_pages(void) } extern void si_swapinfo(struct sysinfo *); -swp_entry_t folio_alloc_swap(struct folio *folio); +int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); -extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); -extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); +extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); struct swap_info_struct *swp_swap_info(swp_entry_t entry); struct backing_dev_info; @@ -575,9 +567,9 @@ static inline int __swap_count(swp_entry_t entry) return 0; } -static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { - return 0; + return false; } static inline int swp_swapcount(swp_entry_t entry) @@ -585,11 +577,9 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -static inline swp_entry_t folio_alloc_swap(struct folio *folio) +static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) { - swp_entry_t entry; - entry.val = 0; - return entry; + return -EINVAL; } static inline bool folio_free_swap(struct folio *folio) @@ -650,7 +640,6 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) @@ -671,10 +660,6 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else -static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) -{ -} - static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h deleted file mode 100644 index 840aec3523b2..000000000000 --- a/include/linux/swap_slots.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SWAP_SLOTS_H -#define _LINUX_SWAP_SLOTS_H - -#include <linux/swap.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> - -#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH -#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE) -#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE) - -struct swap_slots_cache { - bool lock_initialized; - struct mutex alloc_lock; /* protects slots, nr, cur */ - swp_entry_t *slots; - int nr; - int cur; - int n_ret; -}; - -void disable_swap_slots_cache_lock(void); -void reenable_swap_slots_cache_unlock(void); -void enable_swap_slots_cache(void); - -extern bool swap_slot_cache_enabled; - -#endif /* _LINUX_SWAP_SLOTS_H */ diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 96f26e29fefe..64ea151a7ae3 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -186,26 +186,16 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry) return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) +static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { - return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); + return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); } static inline bool is_device_exclusive_entry(swp_entry_t entry) { - return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || - swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; + return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; } -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); -} #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { @@ -227,12 +217,7 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry) return false; } -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) +static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(0, 0); } @@ -242,10 +227,6 @@ static inline bool is_device_exclusive_entry(swp_entry_t entry) return false; } -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION diff --git a/include/linux/types.h b/include/linux/types.h index 1c509ce8f7f6..a3d2182c2686 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -248,5 +248,17 @@ typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); typedef int (*cmp_func_t)(const void *a, const void *b); +/* + * rcuwait provides a way of blocking and waking up a single + * task in an rcu-safe manner. + * + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. + */ +struct rcuwait { + struct task_struct __rcu *task; +}; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 5a37cb2b6f93..9e15a088ba38 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -41,9 +41,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSTEAL_KHUGEPAGED, + PGSTEAL_PROACTIVE, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, + PGSCAN_PROACTIVE, PGSCAN_DIRECT_THROTTLE, PGSCAN_ANON, PGSCAN_FILE, diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 4751e3ecc467..b2ccb6845595 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -504,7 +504,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index caf4f0b12235..eda4b62511f7 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -313,6 +313,30 @@ static inline void cgroup_writeback_umount(struct super_block *sb) /* * mm/page-writeback.c */ +/* consolidated parameters for balance_dirty_pages() and its subroutines */ +struct dirty_throttle_control { +#ifdef CONFIG_CGROUP_WRITEBACK + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ +#endif + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + + unsigned long avail; /* dirtyable */ + unsigned long dirty; /* file_dirty + write + nfs */ + unsigned long thresh; /* dirty threshold */ + unsigned long bg_thresh; /* dirty background threshold */ + unsigned long limit; /* hard dirty limit */ + + unsigned long wb_dirty; /* per-wb counterparts */ + unsigned long wb_thresh; + unsigned long wb_bg_thresh; + + unsigned long pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); void laptop_mode_timer_fn(struct timer_list *t); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 0b618ec04115..78eede109b1a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1555,6 +1555,8 @@ int xa_get_order(struct xarray *, unsigned long index); int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order); +unsigned int xas_try_split_min_order(unsigned int order); #else static inline int xa_get_order(struct xarray *xa, unsigned long index) { @@ -1576,6 +1578,17 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { } + +static inline void xas_try_split(struct xa_state *xas, void *entry, + unsigned int order) +{ +} + +static inline unsigned int xas_try_split_min_order(unsigned int order) +{ + return 0; +} + #endif /** diff --git a/include/linux/zpool.h b/include/linux/zpool.h index a67d62b79698..52f30e526607 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -4,9 +4,8 @@ * * Copyright (C) 2014 Dan Streetman * - * This is a common frontend for the zbud and zsmalloc memory - * storage pool implementations. Typically, this is used to - * store compressed memory. + * This is a common frontend for the zswap compressed memory storage + * implementations. */ #ifndef _ZPOOL_H_ @@ -14,25 +13,6 @@ struct zpool; -/* - * Control how a handle is mapped. It will be ignored if the - * implementation does not support it. Its use is optional. - * Note that this does not refer to memory protection, it - * refers to how the memory will be copied in/out if copying - * is necessary during mapping; read-write is the safest as - * it copies the existing memory in on map, and copies the - * changed memory back out on unmap. Write-only does not copy - * in the memory and should only be used for initialization. - * If in doubt, use ZPOOL_MM_DEFAULT which is read-write. - */ -enum zpool_mapmode { - ZPOOL_MM_RW, /* normal read-write mapping */ - ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */ - ZPOOL_MM_WO, /* write-only (no copy-in at map time) */ - - ZPOOL_MM_DEFAULT = ZPOOL_MM_RW -}; - bool zpool_has_pool(char *type); struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp); @@ -41,17 +21,19 @@ const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); -bool zpool_malloc_support_movable(struct zpool *pool); - int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, unsigned long *handle); void zpool_free(struct zpool *pool, unsigned long handle); -void *zpool_map_handle(struct zpool *pool, unsigned long handle, - enum zpool_mapmode mm); +void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle, + void *local_copy); + +void zpool_obj_read_end(struct zpool *zpool, unsigned long handle, + void *handle_mem); -void zpool_unmap_handle(struct zpool *pool, unsigned long handle); +void zpool_obj_write(struct zpool *zpool, unsigned long handle, + void *handle_mem, size_t mem_len); u64 zpool_get_total_pages(struct zpool *pool); @@ -81,15 +63,16 @@ struct zpool_driver { void *(*create)(const char *name, gfp_t gfp); void (*destroy)(void *pool); - bool malloc_support_movable; int (*malloc)(void *pool, size_t size, gfp_t gfp, unsigned long *handle); void (*free)(void *pool, unsigned long handle); - bool sleep_mapped; - void *(*map)(void *pool, unsigned long handle, - enum zpool_mapmode mm); - void (*unmap)(void *pool, unsigned long handle); + void *(*obj_read_begin)(void *pool, unsigned long handle, + void *local_copy); + void (*obj_read_end)(void *pool, unsigned long handle, + void *handle_mem); + void (*obj_write)(void *pool, unsigned long handle, + void *handle_mem, size_t mem_len); u64 (*total_pages)(void *pool); }; diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index a48cd0ffe57d..c26baf9fb331 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -16,23 +16,6 @@ #include <linux/types.h> -/* - * zsmalloc mapping modes - * - * NOTE: These only make a difference when a mapped object spans pages. - */ -enum zs_mapmode { - ZS_MM_RW, /* normal read-write mapping */ - ZS_MM_RO, /* read-only (no copy-out at unmap time) */ - ZS_MM_WO /* write-only (no copy-in at map time) */ - /* - * NOTE: ZS_MM_WO should only be used for initializing new - * (uninitialized) allocations. Partial writes to already - * initialized allocations should use ZS_MM_RW to preserve the - * existing data. - */ -}; - struct zs_pool_stats { /* How many pages were migrated (freed) */ atomic_long_t pages_compacted; @@ -48,14 +31,18 @@ void zs_free(struct zs_pool *pool, unsigned long obj); size_t zs_huge_class_size(struct zs_pool *pool); -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm); -void zs_unmap_object(struct zs_pool *pool, unsigned long handle); - unsigned long zs_get_total_pages(struct zs_pool *pool); unsigned long zs_compact(struct zs_pool *pool); unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size); void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); + +void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, + void *local_copy); +void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, + void *handle_mem); +void zs_obj_write(struct zs_pool *pool, unsigned long handle, + void *handle_mem, size_t mem_len); + #endif diff --git a/include/linux/zswap.h b/include/linux/zswap.h index d961ead91bf1..30c193a1207e 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -26,7 +26,7 @@ struct zswap_lruvec_state { unsigned long zswap_total_pages(void); bool zswap_store(struct folio *folio); -bool zswap_load(struct folio *folio); +int zswap_load(struct folio *folio); void zswap_invalidate(swp_entry_t swp); int zswap_swapon(int type, unsigned long nr_pages); void zswap_swapoff(int type); @@ -44,9 +44,9 @@ static inline bool zswap_store(struct folio *folio) return false; } -static inline bool zswap_load(struct folio *folio) +static inline int zswap_load(struct folio *folio) { - return false; + return -ENOENT; } static inline void zswap_invalidate(swp_entry_t swp) {} diff --git a/include/net/snmp.h b/include/net/snmp.h index 468a67836e2f..4cb4326dfebe 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -159,7 +159,7 @@ struct linux_tls_mib { #define __SNMP_ADD_STATS64(mib, field, addend) \ do { \ - __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[field] += addend; \ u64_stats_update_end(&ptr->syncp); \ @@ -176,8 +176,7 @@ struct linux_tls_mib { #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ - __typeof__(*mib) *ptr; \ - ptr = raw_cpu_ptr((mib)); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend; \ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index b37eb0a7060f..f74925a6cf69 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -342,6 +342,84 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info, __entry->nr_mapped) ); +TRACE_EVENT(mm_setup_per_zone_wmarks, + + TP_PROTO(struct zone *zone), + + TP_ARGS(zone), + + TP_STRUCT__entry( + __field(int, node_id) + __string(name, zone->name) + __field(unsigned long, watermark_min) + __field(unsigned long, watermark_low) + __field(unsigned long, watermark_high) + __field(unsigned long, watermark_promo) + ), + + TP_fast_assign( + __entry->node_id = zone->zone_pgdat->node_id; + __assign_str(name); + __entry->watermark_min = zone->_watermark[WMARK_MIN]; + __entry->watermark_low = zone->_watermark[WMARK_LOW]; + __entry->watermark_high = zone->_watermark[WMARK_HIGH]; + __entry->watermark_promo = zone->_watermark[WMARK_PROMO]; + ), + + TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu", + __entry->node_id, + __get_str(name), + __entry->watermark_min, + __entry->watermark_low, + __entry->watermark_high, + __entry->watermark_promo) +); + +TRACE_EVENT(mm_setup_per_zone_lowmem_reserve, + + TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve), + + TP_ARGS(zone, upper_zone, lowmem_reserve), + + TP_STRUCT__entry( + __field(int, node_id) + __string(name, zone->name) + __string(upper_name, upper_zone->name) + __field(long, lowmem_reserve) + ), + + TP_fast_assign( + __entry->node_id = zone->zone_pgdat->node_id; + __assign_str(name); + __assign_str(upper_name); + __entry->lowmem_reserve = lowmem_reserve; + ), + + TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld", + __entry->node_id, + __get_str(name), + __get_str(upper_name), + __entry->lowmem_reserve) +); + +TRACE_EVENT(mm_calculate_totalreserve_pages, + + TP_PROTO(unsigned long totalreserve_pages), + + TP_ARGS(totalreserve_pages), + + TP_STRUCT__entry( + __field(unsigned long, totalreserve_pages) + ), + + TP_fast_assign( + __entry->totalreserve_pages = totalreserve_pages; + ), + + TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages) +); + + /* * Required for uniquely and securely identifying mm in rss_stat tracepoint. */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index a261e86e61fa..0ff388131fc9 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -629,11 +629,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, TRACE_EVENT(balance_dirty_pages, TP_PROTO(struct bdi_writeback *wb, - unsigned long thresh, - unsigned long bg_thresh, - unsigned long dirty, - unsigned long bdi_thresh, - unsigned long bdi_dirty, + struct dirty_throttle_control *dtc, unsigned long dirty_ratelimit, unsigned long task_ratelimit, unsigned long dirtied, @@ -641,7 +637,7 @@ TRACE_EVENT(balance_dirty_pages, long pause, unsigned long start_time), - TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + TP_ARGS(wb, dtc, dirty_ratelimit, task_ratelimit, dirtied, period, pause, start_time), @@ -650,8 +646,8 @@ TRACE_EVENT(balance_dirty_pages, __field(unsigned long, limit) __field(unsigned long, setpoint) __field(unsigned long, dirty) - __field(unsigned long, bdi_setpoint) - __field(unsigned long, bdi_dirty) + __field(unsigned long, wb_setpoint) + __field(unsigned long, wb_dirty) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned int, dirtied) @@ -664,16 +660,15 @@ TRACE_EVENT(balance_dirty_pages, ), TP_fast_assign( - unsigned long freerun = (thresh + bg_thresh) / 2; + unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); - __entry->limit = global_wb_domain.dirty_limit; - __entry->setpoint = (global_wb_domain.dirty_limit + - freerun) / 2; - __entry->dirty = dirty; - __entry->bdi_setpoint = __entry->setpoint * - bdi_thresh / (thresh + 1); - __entry->bdi_dirty = bdi_dirty; + __entry->limit = dtc->limit; + __entry->setpoint = (dtc->limit + freerun) / 2; + __entry->dirty = dtc->dirty; + __entry->wb_setpoint = __entry->setpoint * + dtc->wb_thresh / (dtc->thresh + 1); + __entry->wb_dirty = dtc->wb_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; @@ -689,7 +684,7 @@ TRACE_EVENT(balance_dirty_pages, TP_printk("bdi %s: " "limit=%lu setpoint=%lu dirty=%lu " - "bdi_setpoint=%lu bdi_dirty=%lu " + "wb_setpoint=%lu wb_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", @@ -697,8 +692,8 @@ TRACE_EVENT(balance_dirty_pages, __entry->limit, __entry->setpoint, __entry->dirty, - __entry->bdi_setpoint, - __entry->bdi_dirty, + __entry->wb_setpoint, + __entry->wb_dirty, __entry->dirty_ratelimit, __entry->task_ratelimit, __entry->dirtied, diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config index 20552f163930..8aafd050b754 100644 --- a/kernel/configs/debug.config +++ b/kernel/configs/debug.config @@ -73,7 +73,6 @@ CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_VM_RB=y CONFIG_DEBUG_VM_VMACACHE=y -CONFIG_GENERIC_PTDUMP=y CONFIG_KASAN=y CONFIG_KASAN_GENERIC=y CONFIG_KASAN_INLINE=y diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 70c84b9d7be3..2746791ce1e2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -173,6 +173,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; + pte_t pte; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); @@ -192,6 +193,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); + pte = ptep_get(pvmw.pte); + + /* + * Handle PFN swap PTES, such as device-exclusive ones, that actually + * map pages: simply trigger GUP again to fix it up. + */ + if (unlikely(!pte_present(pte))) { + page_vma_mapped_walk_done(&pvmw); + goto unlock; + } if (new_page) { folio_get(new_folio); @@ -206,7 +217,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); + flush_cache_page(vma, addr, pte_pfn(pte)); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at(mm, addr, pvmw.pte, diff --git a/kernel/fork.c b/kernel/fork.c index 1b659b07ecd5..a8e17ba13790 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -311,11 +311,9 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node) * so memcg accounting is performed manually on assigning/releasing * stacks to tasks. Drop __GFP_ACCOUNT. */ - stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, - VMALLOC_START, VMALLOC_END, + stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP & ~__GFP_ACCOUNT, - PAGE_KERNEL, - 0, node, __builtin_return_address(0)); + node, __builtin_return_address(0)); if (!stack) return -ENOMEM; @@ -436,35 +434,6 @@ static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; -#ifdef CONFIG_PER_VMA_LOCK - -/* SLAB cache for vm_area_struct.lock */ -static struct kmem_cache *vma_lock_cachep; - -static bool vma_lock_alloc(struct vm_area_struct *vma) -{ - vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = UINT_MAX; - - return true; -} - -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - kmem_cache_free(vma_lock_cachep, vma->vm_lock); -} - -#else /* CONFIG_PER_VMA_LOCK */ - -static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } -static inline void vma_lock_free(struct vm_area_struct *vma) {} - -#endif /* CONFIG_PER_VMA_LOCK */ - struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma; @@ -474,14 +443,46 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - kmem_cache_free(vm_area_cachep, vma); - return NULL; - } return vma; } +static void vm_area_init_from(const struct vm_area_struct *src, + struct vm_area_struct *dest) +{ + dest->vm_mm = src->vm_mm; + dest->vm_ops = src->vm_ops; + dest->vm_start = src->vm_start; + dest->vm_end = src->vm_end; + dest->anon_vma = src->anon_vma; + dest->vm_pgoff = src->vm_pgoff; + dest->vm_file = src->vm_file; + dest->vm_private_data = src->vm_private_data; + vm_flags_init(dest, src->vm_flags); + memcpy(&dest->vm_page_prot, &src->vm_page_prot, + sizeof(dest->vm_page_prot)); + /* + * src->shared.rb may be modified concurrently when called from + * dup_mmap(), but the clone will reinitialize it. + */ + data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared))); + memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx, + sizeof(dest->vm_userfaultfd_ctx)); +#ifdef CONFIG_ANON_VMA_NAME + dest->anon_name = src->anon_name; +#endif +#ifdef CONFIG_SWAP + memcpy(&dest->swap_readahead_info, &src->swap_readahead_info, + sizeof(dest->swap_readahead_info)); +#endif +#ifndef CONFIG_MMU + dest->vm_region = src->vm_region; +#endif +#ifdef CONFIG_NUMA + dest->vm_policy = src->vm_policy; +#endif +} + struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); @@ -491,15 +492,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); - /* - * orig->shared.rb may be modified concurrently, but the clone - * will be reinitialized. - */ - data_race(memcpy(new, orig, sizeof(*new))); - if (!vma_lock_alloc(new)) { - kmem_cache_free(vm_area_cachep, new); - return NULL; - } + vm_area_init_from(orig, new); + vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); dup_anon_vma_name(orig, new); @@ -511,35 +505,15 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return new; } -void __vm_area_free(struct vm_area_struct *vma) +void vm_area_free(struct vm_area_struct *vma) { + /* The vma should be detached while being destroyed. */ + vma_assert_detached(vma); vma_numab_state_free(vma); free_anon_vma_name(vma); - vma_lock_free(vma); kmem_cache_free(vm_area_cachep, vma); } -#ifdef CONFIG_PER_VMA_LOCK -static void vm_area_free_rcu_cb(struct rcu_head *head) -{ - struct vm_area_struct *vma = container_of(head, struct vm_area_struct, - vm_rcu); - - /* The vma should not be locked while being destroyed. */ - VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); - __vm_area_free(vma); -} -#endif - -void vm_area_free(struct vm_area_struct *vma) -{ -#ifdef CONFIG_PER_VMA_LOCK - call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); -#else - __vm_area_free(vma); -#endif -} - static void account_kernel_stack(struct task_struct *tsk, int account) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { @@ -830,6 +804,36 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ +#ifdef CONFIG_MM_ID +static DEFINE_IDA(mm_ida); + +static inline int mm_alloc_id(struct mm_struct *mm) +{ + int ret; + + ret = ida_alloc_range(&mm_ida, MM_ID_MIN, MM_ID_MAX, GFP_KERNEL); + if (ret < 0) + return ret; + mm->mm_id = ret; + return 0; +} + +static inline void mm_free_id(struct mm_struct *mm) +{ + const mm_id_t id = mm->mm_id; + + mm->mm_id = MM_ID_DUMMY; + if (id == MM_ID_DUMMY) + return; + if (WARN_ON_ONCE(id < MM_ID_MIN || id > MM_ID_MAX)) + return; + ida_free(&mm_ida, id); +} +#else /* !CONFIG_MM_ID */ +static inline int mm_alloc_id(struct mm_struct *mm) { return 0; } +static inline void mm_free_id(struct mm_struct *mm) {} +#endif /* CONFIG_MM_ID */ + static void check_mm(struct mm_struct *mm) { int i; @@ -933,6 +937,7 @@ void __mmdrop(struct mm_struct *mm) WARN_ON_ONCE(mm == current->active_mm); mm_free_pgd(mm); + mm_free_id(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); check_mm(mm); @@ -1267,6 +1272,15 @@ static void mm_init_uprobes_state(struct mm_struct *mm) #endif } +static void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); + mm_lock_seqcount_init(mm); +#ifdef CONFIG_PER_VMA_LOCK + rcuwait_init(&mm->vma_writer_wait); +#endif +} + static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { @@ -1308,6 +1322,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, if (mm_alloc_pgd(mm)) goto fail_nopgd; + if (mm_alloc_id(mm)) + goto fail_noid; + if (init_new_context(p, mm)) goto fail_nocontext; @@ -1327,6 +1344,8 @@ fail_pcpu: fail_cid: destroy_context(mm); fail_nocontext: + mm_free_id(mm); +fail_noid: mm_free_pgd(mm); fail_nopgd: free_mm(mm); @@ -3183,6 +3202,11 @@ void __init mm_cache_init(void) void __init proc_caches_init(void) { + struct kmem_cache_args args = { + .use_freeptr_offset = true, + .freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr), + }; + sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| @@ -3199,11 +3223,10 @@ void __init proc_caches_init(void) sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); -#ifdef CONFIG_PER_VMA_LOCK - vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); -#endif + vm_area_cachep = kmem_cache_create("vm_area_struct", + sizeof(struct vm_area_struct), &args, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| + SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); } diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 6083883c4fe0..d6964fc29f51 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read); #define per_cpu_sum(var) \ ({ \ - typeof(var) __sum = 0; \ + TYPEOF_UNQUAL(var) __sum = 0; \ int cpu; \ compiletime_assert_atomic_type(__sum); \ for_each_possible_cpu(cpu) \ diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 19b45617bdcf..1d893e313614 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) if (!mem_alloc_profiling_enabled()) return; - tag = pgalloc_tag_get(&folio->page); + tag = __pgalloc_tag_get(&folio->page); if (!tag) return; @@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old) if (!mem_alloc_profiling_enabled()) return; - tag_old = pgalloc_tag_get(&old->page); + tag_old = __pgalloc_tag_get(&old->page); if (!tag_old) return; - tag_new = pgalloc_tag_get(&new->page); + tag_new = __pgalloc_tag_get(&new->page); if (!tag_new) return; diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f7153ade1be5..d0bea23fa4bc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -584,13 +584,10 @@ static __always_inline bool ma_dead_node(const struct maple_node *node) */ static __always_inline bool mte_dead_node(const struct maple_enode *enode) { - struct maple_node *parent, *node; + struct maple_node *node; node = mte_to_node(enode); - /* Do not reorder reads from the node prior to the parent check */ - smp_rmb(); - parent = mte_parent(enode); - return (parent == node); + return ma_dead_node(node); } /* @@ -1245,7 +1242,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) if (mas->mas_flags & MA_STATE_PREALLOC) { if (allocated) return; - BUG_ON(!allocated); WARN_ON(!allocated); } @@ -1353,7 +1349,7 @@ static void mas_node_count(struct ma_state *mas, int count) * mas_start() - Sets up maple state for operations. * @mas: The maple state. * - * If mas->status == mas_start, then set the min, max and depth to + * If mas->status == ma_start, then set the min, max and depth to * defaults. * * Return: diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 056f2e411d7b..5b144bc5c4ec 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -195,7 +195,8 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp) static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) { - return container_of(page->pgmap, struct dmirror_chunk, pagemap); + return container_of(page_pgmap(page), struct dmirror_chunk, + pagemap); } static struct dmirror_device *dmirror_page_to_device(struct page *page) @@ -706,34 +707,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start, return 0; } -static int dmirror_atomic_map(unsigned long start, unsigned long end, - struct page **pages, struct dmirror *dmirror) +static int dmirror_atomic_map(unsigned long addr, struct page *page, + struct dmirror *dmirror) { - unsigned long pfn, mapped = 0; - int i; + void *entry; /* Map the migrated pages into the device's page tables. */ mutex_lock(&dmirror->mutex); - for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) { - void *entry; - - if (!pages[i]) - continue; - - entry = pages[i]; - entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC); - entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); - if (xa_is_err(entry)) { - mutex_unlock(&dmirror->mutex); - return xa_err(entry); - } - - mapped++; + entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC); + entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC); + if (xa_is_err(entry)) { + mutex_unlock(&dmirror->mutex); + return xa_err(entry); } mutex_unlock(&dmirror->mutex); - return mapped; + return 0; } static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, @@ -780,10 +770,8 @@ static int dmirror_exclusive(struct dmirror *dmirror, unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; - struct page *pages[64]; struct dmirror_bounce bounce; - unsigned long next; - int ret; + int ret = 0; start = cmd->addr; end = start + size; @@ -795,36 +783,26 @@ static int dmirror_exclusive(struct dmirror *dmirror, return -EINVAL; mmap_read_lock(mm); - for (addr = start; addr < end; addr = next) { - unsigned long mapped = 0; - int i; - - next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)); + for (addr = start; !ret && addr < end; addr += PAGE_SIZE) { + struct folio *folio; + struct page *page; - ret = make_device_exclusive_range(mm, addr, next, pages, NULL); - /* - * Do dmirror_atomic_map() iff all pages are marked for - * exclusive access to avoid accessing uninitialized - * fields of pages. - */ - if (ret == (next - addr) >> PAGE_SHIFT) - mapped = dmirror_atomic_map(addr, next, pages, dmirror); - for (i = 0; i < ret; i++) { - if (pages[i]) { - unlock_page(pages[i]); - put_page(pages[i]); - } + page = make_device_exclusive(mm, addr, NULL, &folio); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + break; } - if (addr + (mapped << PAGE_SHIFT) < next) { - mmap_read_unlock(mm); - mmput(mm); - return -EBUSY; - } + ret = dmirror_atomic_map(addr, page, dmirror); + folio_unlock(folio); + folio_put(folio); } mmap_read_unlock(mm); mmput(mm); + if (ret) + return ret; + /* Return the migrated data for verification. */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0e865bab4a10..080a39d22e73 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1858,6 +1858,54 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } +static void check_split_2(struct xarray *xa, unsigned long index, + unsigned int order, unsigned int new_order) +{ + XA_STATE_ORDER(xas, xa, index, new_order); + unsigned int i, found; + void *entry; + + xa_store_order(xa, index, order, xa, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_1); + + /* allocate a node for xas_try_split() */ + xas_set_err(&xas, -ENOMEM); + XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL)); + + xas_lock(&xas); + xas_try_split(&xas, xa, order); + if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) && + new_order < order - 1) { + XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL); + xas_unlock(&xas); + goto out; + } + for (i = 0; i < (1 << order); i += (1 << new_order)) + __xa_store(xa, index + i, xa_mk_index(index + i), 0); + xas_unlock(&xas); + + for (i = 0; i < (1 << order); i++) { + unsigned int val = index + (i & ~((1 << new_order) - 1)); + XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); + } + + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + + xas_set_order(&xas, index, 0); + found = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { + found++; + XA_BUG_ON(xa, xa_is_internal(entry)); + } + rcu_read_unlock(); + XA_BUG_ON(xa, found != 1 << (order - new_order)); +out: + xas_destroy(&xas); + xa_destroy(xa); +} + static noinline void check_split(struct xarray *xa) { unsigned int order, new_order; @@ -1869,6 +1917,10 @@ static noinline void check_split(struct xarray *xa) check_split_1(xa, 0, order, new_order); check_split_1(xa, 1UL << order, order, new_order); check_split_1(xa, 3UL << order, order, new_order); + + check_split_2(xa, 0, order, new_order); + check_split_2(xa, 1UL << order, order, new_order); + check_split_2(xa, 3UL << order, order, new_order); } } } diff --git a/lib/xarray.c b/lib/xarray.c index 116e9286c64e..9644b18af18d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas) xas->xa_alloc = node = next; } } +EXPORT_SYMBOL_GPL(xas_destroy); /** * xas_nomem() - Allocate memory if needed. @@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset, } } +static void __xas_init_node_for_split(struct xa_state *xas, + struct xa_node *node, void *entry) +{ + unsigned int i; + void *sibling = NULL; + unsigned int mask = xas->xa_sibs; + + if (!node) + return; + node->array = xas->xa; + for (i = 0; i < XA_CHUNK_SIZE; i++) { + if ((i & mask) == 0) { + RCU_INIT_POINTER(node->slots[i], entry); + sibling = xa_mk_sibling(i); + } else { + RCU_INIT_POINTER(node->slots[i], sibling); + } + } +} + /** * xas_split_alloc() - Allocate memory for splitting an entry. * @xas: XArray operation state. @@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; - unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) @@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, return; do { - unsigned int i; - void *sibling = NULL; struct xa_node *node; node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; - node->array = xas->xa; - for (i = 0; i < XA_CHUNK_SIZE; i++) { - if ((i & mask) == 0) { - RCU_INIT_POINTER(node->slots[i], entry); - sibling = xa_mk_sibling(i); - } else { - RCU_INIT_POINTER(node->slots[i], sibling); - } - } + + __xas_init_node_for_split(xas, node, entry); RCU_INIT_POINTER(node->parent, xas->xa_alloc); xas->xa_alloc = node; } while (sibs-- > 0); @@ -1122,6 +1133,128 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); + +/** + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept + * @order: Current entry order. + * + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if + * no new xa_node is needed. This function provides the minimal order + * xas_try_split() supports. + * + * Return: the minimal order xas_try_split() supports + * + * Context: Any context. + * + */ +unsigned int xas_try_split_min_order(unsigned int order) +{ + if (order % XA_CHUNK_SHIFT == 0) + return order == 0 ? 0 : order - 1; + + return order - (order % XA_CHUNK_SHIFT); +} +EXPORT_SYMBOL_GPL(xas_try_split_min_order); + +/** + * xas_try_split() - Try to split a multi-index entry. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: Current entry order. + * + * The size of the new entries is set in @xas. The value in @entry is + * copied to all the replacement entries. If and only if one new xa_node is + * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is + * NULL. If more new xa_node are needed, the function gives EINVAL error. + * + * NOTE: use xas_try_split_min_order() to get next split order instead of + * @order - 1 if you want to minmize xas_try_split() calls. + * + * Context: Any context. The caller should hold the xa_lock. + */ +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int offset, marks; + struct xa_node *node; + void *curr = xas_load(xas); + int values = 0; + gfp_t gfp = GFP_NOWAIT; + + node = xas->xa_node; + if (xas_top(node)) + return; + + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + + marks = node_get_marks(node, xas->xa_offset); + + offset = xas->xa_offset + sibs; + + if (xas->xa_shift < node->shift) { + struct xa_node *child = xas->xa_alloc; + unsigned int expected_sibs = + (1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1; + + /* + * No support for splitting sibling entries + * (horizontally) or cascade split (vertically), which + * requires two or more new xa_nodes. + * Since if one xa_node allocation fails, + * it is hard to free the prior allocations. + */ + if (sibs || xas->xa_sibs != expected_sibs) { + xas_destroy(xas); + xas_set_err(xas, -EINVAL); + return; + } + + if (!child) { + child = kmem_cache_alloc_lru(radix_tree_node_cachep, + xas->xa_lru, gfp); + if (!child) { + xas_destroy(xas); + xas_set_err(xas, -ENOMEM); + return; + } + RCU_INIT_POINTER(child->parent, xas->xa_alloc); + } + __xas_init_node_for_split(xas, child, entry); + + xas->xa_alloc = rcu_dereference_raw(child->parent); + child->shift = node->shift - XA_CHUNK_SHIFT; + child->offset = offset; + child->count = XA_CHUNK_SIZE; + child->nr_values = xa_is_value(entry) ? + XA_CHUNK_SIZE : 0; + RCU_INIT_POINTER(child->parent, node); + node_set_marks(node, offset, child, xas->xa_sibs, + marks); + rcu_assign_pointer(node->slots[offset], + xa_mk_node(child)); + if (xa_is_value(curr)) + values--; + xas_update(xas, child); + + } else { + do { + unsigned int canon = offset - xas->xa_sibs; + + node_set_marks(node, canon, NULL, 0, marks); + rcu_assign_pointer(node->slots[canon], entry); + while (offset > canon) + rcu_assign_pointer(node->slots[offset--], + xa_mk_sibling(canon)); + values += (xa_is_value(entry) - xa_is_value(curr)) * + (xas->xa_sibs + 1); + } while (offset-- > xas->xa_offset); + } + + node->nr_values += values; + xas_update(xas, node); +} +EXPORT_SYMBOL_GPL(xas_try_split); #endif /** diff --git a/mm/Kconfig b/mm/Kconfig index 0b7f4bb5cb80..d3fb3762887b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -129,7 +129,6 @@ choice prompt "Default allocator" depends on ZSWAP default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU - default ZSWAP_ZPOOL_DEFAULT_ZBUD help Selects the default allocator for the compressed cache for swap pages. @@ -140,21 +139,6 @@ choice The selection made here can be overridden by using the kernel command line 'zswap.zpool=' option. -config ZSWAP_ZPOOL_DEFAULT_ZBUD - bool "zbud" - select ZBUD - help - Use the zbud allocator as the default allocator. - -config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED - bool "z3foldi (DEPRECATED)" - select Z3FOLD_DEPRECATED - help - Use the z3fold allocator as the default allocator. - - Deprecated and scheduled for removal in a few cycles, - see CONFIG_Z3FOLD_DEPRECATED. - config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC bool "zsmalloc" select ZSMALLOC @@ -165,40 +149,9 @@ endchoice config ZSWAP_ZPOOL_DEFAULT string depends on ZSWAP - default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD - default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC default "" -config ZBUD - tristate "2:1 compression allocator (zbud)" - depends on ZSWAP - help - A special purpose allocator for storing compressed pages. - It is designed to store up to two compressed pages per physical - page. While this design limits storage density, it has simple and - deterministic reclaim properties that make it preferable to a higher - density approach when reclaim will be used. - -config Z3FOLD_DEPRECATED - tristate "3:1 compression allocator (z3fold) (DEPRECATED)" - depends on ZSWAP - help - Deprecated and scheduled for removal in a few cycles. If you have - a good reason for using Z3FOLD over ZSMALLOC, please contact - linux-mm@kvack.org and the zswap maintainers. - - A special purpose allocator for storing compressed pages. - It is designed to store up to three compressed pages per physical - page. It is a ZBUD derivative so the simplicity and determinism are - still there. - -config Z3FOLD - tristate - default y if Z3FOLD_DEPRECATED=y - default m if Z3FOLD_DEPRECATED=m - depends on Z3FOLD_DEPRECATED - config ZSMALLOC tristate prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM) @@ -493,6 +446,9 @@ config SPARSEMEM_VMEMMAP SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise pfn_to_page and page_to_pfn operations. This is the most efficient option when sufficient kernel resources are available. + +config SPARSEMEM_VMEMMAP_PREINIT + bool # # Select this config option from the architecture Kconfig, if it is preferred # to enable the feature of HugeTLB/dev_dax vmemmap optimization. @@ -503,6 +459,9 @@ config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP bool +config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT + bool + config HAVE_MEMBLOCK_PHYS_MAP bool @@ -860,11 +819,15 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_WANTS_THP_SWAP def_bool n +config MM_ID + def_bool n + menuconfig TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT select COMPACTION select XARRAY_MULTI + select MM_ID help Transparent Hugepages allows the kernel to use huge pages and huge tlb transparently to the applications whenever possible. @@ -928,8 +891,25 @@ config READ_ONLY_THP_FOR_FS support of file THPs will be developed in the next few release cycles. +config NO_PAGE_MAPCOUNT + bool "No per-page mapcount (EXPERIMENTAL)" + help + Do not maintain per-page mapcounts for pages part of larger + allocations, such as transparent huge pages. + + When this config option is enabled, some interfaces that relied on + this information will rely on less-precise per-allocation information + instead: for example, using the average per-page mapcount in such + a large allocation instead of the per-page mapcount. + + EXPERIMENTAL because the impact of some changes is still unclear. + endif # TRANSPARENT_HUGEPAGE +# simple helper to make the code a bit easier to read +config PAGE_MAPCOUNT + def_bool !NO_PAGE_MAPCOUNT + # # The architecture supports pgtable leaves that is larger than PAGE_SIZE # diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 41a58536531d..32b65073d0cc 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -186,8 +186,9 @@ config ARCH_HAS_DEBUG_WX config DEBUG_WX bool "Warn on W+X mappings at boot" depends on ARCH_HAS_DEBUG_WX + depends on ARCH_HAS_PTDUMP depends on MMU - select PTDUMP_CORE + select PTDUMP help Generate a warning if any W+X mappings are found at boot. @@ -212,18 +213,18 @@ config DEBUG_WX If in doubt, say "Y". -config GENERIC_PTDUMP +config ARCH_HAS_PTDUMP bool -config PTDUMP_CORE +config PTDUMP bool config PTDUMP_DEBUGFS bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL depends on DEBUG_FS - depends on GENERIC_PTDUMP - select PTDUMP_CORE + depends on ARCH_HAS_PTDUMP + select PTDUMP help Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers diff --git a/mm/Makefile b/mm/Makefile index 850386a67b3e..e7f6bbf8ae5f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -75,10 +75,13 @@ ifdef CONFIG_MMU obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o endif -obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o +obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o +ifdef CONFIG_CMA +obj-$(CONFIG_HUGETLBFS) += hugetlb_cma.o +endif obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o @@ -113,9 +116,7 @@ obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o -obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o -obj-$(CONFIG_Z3FOLD) += z3fold.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_NUMA) += numa.o @@ -138,7 +139,7 @@ obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_HMM_MIRROR) += hmm.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o obj-$(CONFIG_IO_MAPPING) += io-mapping.o obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 6597ebea8ae2..d3e00731e262 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -24,6 +24,7 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, balloon_page_insert(b_dev_info, page); unlock_page(page); __count_vm_event(BALLOON_INFLATE); + inc_node_page_state(page, NR_BALLOON_PAGES); } /** @@ -103,6 +104,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, __count_vm_event(BALLOON_DEFLATE); list_add(&page->lru, pages); unlock_page(page); + dec_node_page_state(page, NR_BALLOON_PAGES); n_pages++; } spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c index 95f288169a38..b0e2a9fa641f 100644 --- a/mm/bootmem_info.c +++ b/mm/bootmem_info.c @@ -88,7 +88,9 @@ static void __init register_page_bootmem_info_section(unsigned long start_pfn) memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); - register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); + if (!preinited_vmemmap_section(ms)) + register_page_bootmem_memmap(section_nr, memmap, + PAGES_PER_SECTION); usage = ms->usage; page = virt_to_page(usage); @@ -18,6 +18,7 @@ #include <linux/memblock.h> #include <linux/err.h> +#include <linux/list.h> #include <linux/mm.h> #include <linux/sizes.h> #include <linux/slab.h> @@ -33,11 +34,17 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned int cma_area_count; -static DEFINE_MUTEX(cma_mutex); + +static int __init __cma_declare_contiguous_nid(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid); phys_addr_t cma_get_base(const struct cma *cma) { - return PFN_PHYS(cma->base_pfn); + WARN_ON_ONCE(cma->nranges != 1); + return PFN_PHYS(cma->ranges[0].base_pfn); } unsigned long cma_get_size(const struct cma *cma) @@ -63,9 +70,10 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, * The value returned is represented in order_per_bits. */ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, + const struct cma_memrange *cmr, unsigned int align_order) { - return (cma->base_pfn & ((1UL << align_order) - 1)) + return (cmr->base_pfn & ((1UL << align_order) - 1)) >> cma->order_per_bit; } @@ -75,65 +83,122 @@ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; } -static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned long count) +static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr, + unsigned long pfn, unsigned long count) { unsigned long bitmap_no, bitmap_count; unsigned long flags; - bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; + bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); spin_lock_irqsave(&cma->lock, flags); - bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); + bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count); + cma->available_count += count; spin_unlock_irqrestore(&cma->lock, flags); } -static void __init cma_activate_area(struct cma *cma) +/* + * Check if a CMA area contains no ranges that intersect with + * multiple zones. Store the result in the flags in case + * this gets called more than once. + */ +bool cma_validate_zones(struct cma *cma) { - unsigned long base_pfn = cma->base_pfn, pfn; - struct zone *zone; - - cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); - if (!cma->bitmap) - goto out_error; + int r; + unsigned long base_pfn; + struct cma_memrange *cmr; + bool valid_bit_set; /* - * alloc_contig_range() requires the pfn range specified to be in the - * same zone. Simplify by forcing the entire CMA resv range to be in the - * same zone. + * If already validated, return result of previous check. + * Either the valid or invalid bit will be set if this + * check has already been done. If neither is set, the + * check has not been performed yet. */ - WARN_ON_ONCE(!pfn_valid(base_pfn)); - zone = page_zone(pfn_to_page(base_pfn)); - for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { - WARN_ON_ONCE(!pfn_valid(pfn)); - if (page_zone(pfn_to_page(pfn)) != zone) - goto not_in_zone; + valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); + if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) + return valid_bit_set; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + base_pfn = cmr->base_pfn; + + /* + * alloc_contig_range() requires the pfn range specified + * to be in the same zone. Simplify by forcing the entire + * CMA resv range to be in the same zone. + */ + WARN_ON_ONCE(!pfn_valid(base_pfn)); + if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { + set_bit(CMA_ZONES_INVALID, &cma->flags); + return false; + } + } + + set_bit(CMA_ZONES_VALID, &cma->flags); + + return true; +} + +static void __init cma_activate_area(struct cma *cma) +{ + unsigned long pfn, end_pfn; + int allocrange, r; + struct cma_memrange *cmr; + unsigned long bitmap_count, count; + + for (allocrange = 0; allocrange < cma->nranges; allocrange++) { + cmr = &cma->ranges[allocrange]; + cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), + GFP_KERNEL); + if (!cmr->bitmap) + goto cleanup; } - for (pfn = base_pfn; pfn < base_pfn + cma->count; - pfn += pageblock_nr_pages) - init_cma_reserved_pageblock(pfn_to_page(pfn)); + if (!cma_validate_zones(cma)) + goto cleanup; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + if (cmr->early_pfn != cmr->base_pfn) { + count = cmr->early_pfn - cmr->base_pfn; + bitmap_count = cma_bitmap_pages_to_bits(cma, count); + bitmap_set(cmr->bitmap, 0, bitmap_count); + } + + for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count; + pfn += pageblock_nr_pages) + init_cma_reserved_pageblock(pfn_to_page(pfn)); + } spin_lock_init(&cma->lock); + mutex_init(&cma->alloc_mutex); + #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif + set_bit(CMA_ACTIVATED, &cma->flags); return; -not_in_zone: - bitmap_free(cma->bitmap); -out_error: +cleanup: + for (r = 0; r < allocrange; r++) + bitmap_free(cma->ranges[r].bitmap); + /* Expose all pages to the buddy, they are useless for CMA. */ - if (!cma->reserve_pages_on_error) { - for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) - free_reserved_page(pfn_to_page(pfn)); + if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { + for (r = 0; r < allocrange; r++) { + cmr = &cma->ranges[r]; + end_pfn = cmr->base_pfn + cmr->count; + for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++) + free_reserved_page(pfn_to_page(pfn)); + } } totalcma_pages -= cma->count; - cma->count = 0; + cma->available_count = cma->count = 0; pr_err("CMA area %s could not be activated\n", cma->name); } @@ -150,7 +215,44 @@ core_initcall(cma_init_reserved_areas); void __init cma_reserve_pages_on_error(struct cma *cma) { - cma->reserve_pages_on_error = true; + set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); +} + +static int __init cma_new_area(const char *name, phys_addr_t size, + unsigned int order_per_bit, + struct cma **res_cma) +{ + struct cma *cma; + + if (cma_area_count == ARRAY_SIZE(cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &cma_areas[cma_area_count]; + cma_area_count++; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, "%s", name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->available_count = cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + totalcma_pages += cma->count; + + return 0; +} + +static void __init cma_drop_area(struct cma *cma) +{ + totalcma_pages -= cma->count; + cma_area_count--; } /** @@ -171,13 +273,9 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma) { struct cma *cma; + int ret; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; @@ -194,25 +292,264 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) return -EINVAL; + ret = cma_new_area(name, size, order_per_bit, &cma); + if (ret != 0) + return ret; + + cma->ranges[0].base_pfn = PFN_DOWN(base); + cma->ranges[0].early_pfn = PFN_DOWN(base); + cma->ranges[0].count = cma->count; + cma->nranges = 1; + cma->nid = NUMA_NO_NODE; + + *res_cma = cma; + + return 0; +} + +/* + * Structure used while walking physical memory ranges and finding out + * which one(s) to use for a CMA area. + */ +struct cma_init_memrange { + phys_addr_t base; + phys_addr_t size; + struct list_head list; +}; + +/* + * Work array used during CMA initialization. + */ +static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata; + +static bool __init revsizecmp(struct cma_init_memrange *mlp, + struct cma_init_memrange *mrp) +{ + return mlp->size > mrp->size; +} + +static bool __init basecmp(struct cma_init_memrange *mlp, + struct cma_init_memrange *mrp) +{ + return mlp->base < mrp->base; +} + +/* + * Helper function to create sorted lists. + */ +static void __init list_insert_sorted( + struct list_head *ranges, + struct cma_init_memrange *mrp, + bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh)) +{ + struct list_head *mp; + struct cma_init_memrange *mlp; + + if (list_empty(ranges)) + list_add(&mrp->list, ranges); + else { + list_for_each(mp, ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + if (cmp(mlp, mrp)) + break; + } + __list_add(&mrp->list, mlp->list.prev, &mlp->list); + } +} + +/* + * Create CMA areas with a total size of @total_size. A normal allocation + * for one area is tried first. If that fails, the biggest memblock + * ranges above 4G are selected, and allocated bottom up. + * + * The complexity here is not great, but this function will only be + * called during boot, and the lists operated on have fewer than + * CMA_MAX_RANGES elements (default value: 8). + */ +int __init cma_declare_contiguous_multi(phys_addr_t total_size, + phys_addr_t align, unsigned int order_per_bit, + const char *name, struct cma **res_cma, int nid) +{ + phys_addr_t start, end; + phys_addr_t size, sizesum, sizeleft; + struct cma_init_memrange *mrp, *mlp, *failed; + struct cma_memrange *cmrp; + LIST_HEAD(ranges); + LIST_HEAD(final_ranges); + struct list_head *mp, *next; + int ret, nr = 1; + u64 i; + struct cma *cma; + /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. + * First, try it the normal way, producing just one range. */ - cma = &cma_areas[cma_area_count]; + ret = __cma_declare_contiguous_nid(0, total_size, 0, align, + order_per_bit, false, name, res_cma, nid); + if (ret != -ENOMEM) + goto out; - if (name) - snprintf(cma->name, CMA_MAX_NAME, name); - else - snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + /* + * Couldn't find one range that fits our needs, so try multiple + * ranges. + * + * No need to do the alignment checks here, the call to + * cma_declare_contiguous_nid above would have caught + * any issues. With the checks, we know that: + * + * - @align is a power of 2 + * - @align is >= pageblock alignment + * - @size is aligned to @align and to @order_per_bit + * + * So, as long as we create ranges that have a base + * aligned to @align, and a size that is aligned to + * both @align and @order_to_bit, things will work out. + */ + nr = 0; + sizesum = 0; + failed = NULL; - cma->base_pfn = PFN_DOWN(base); - cma->count = size >> PAGE_SHIFT; - cma->order_per_bit = order_per_bit; + ret = cma_new_area(name, total_size, order_per_bit, &cma); + if (ret != 0) + goto out; + + align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); + /* + * Create a list of ranges above 4G, largest range first. + */ + for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { + if (upper_32_bits(start) == 0) + continue; + + start = ALIGN(start, align); + if (start >= end) + continue; + + end = ALIGN_DOWN(end, align); + if (end <= start) + continue; + + size = end - start; + size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit)); + if (!size) + continue; + sizesum += size; + + pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end); + + /* + * If we don't yet have used the maximum number of + * areas, grab a new one. + * + * If we can't use anymore, see if this range is not + * smaller than the smallest one already recorded. If + * not, re-use the smallest element. + */ + if (nr < CMA_MAX_RANGES) + mrp = &memranges[nr++]; + else { + mrp = list_last_entry(&ranges, + struct cma_init_memrange, list); + if (size < mrp->size) + continue; + list_del(&mrp->list); + sizesum -= mrp->size; + pr_debug("deleted %016llx - %016llx from the list\n", + (u64)mrp->base, (u64)mrp->base + size); + } + mrp->base = start; + mrp->size = size; + + /* + * Now do a sorted insert. + */ + list_insert_sorted(&ranges, mrp, revsizecmp); + pr_debug("added %016llx - %016llx to the list\n", + (u64)mrp->base, (u64)mrp->base + size); + pr_debug("total size now %llu\n", (u64)sizesum); + } + + /* + * There is not enough room in the CMA_MAX_RANGES largest + * ranges, so bail out. + */ + if (sizesum < total_size) { + cma_drop_area(cma); + ret = -ENOMEM; + goto out; + } + + /* + * Found ranges that provide enough combined space. + * Now, sorted them by address, smallest first, because we + * want to mimic a bottom-up memblock allocation. + */ + sizesum = 0; + list_for_each_safe(mp, next, &ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + list_del(mp); + list_insert_sorted(&final_ranges, mlp, basecmp); + sizesum += mlp->size; + if (sizesum >= total_size) + break; + } + + /* + * Walk the final list, and add a CMA range for + * each range, possibly not using the last one fully. + */ + nr = 0; + sizeleft = total_size; + list_for_each(mp, &final_ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + size = min(sizeleft, mlp->size); + if (memblock_reserve(mlp->base, size)) { + /* + * Unexpected error. Could go on to + * the next one, but just abort to + * be safe. + */ + failed = mlp; + break; + } + + pr_debug("created region %d: %016llx - %016llx\n", + nr, (u64)mlp->base, (u64)mlp->base + size); + cmrp = &cma->ranges[nr++]; + cmrp->base_pfn = PHYS_PFN(mlp->base); + cmrp->early_pfn = cmrp->base_pfn; + cmrp->count = size >> PAGE_SHIFT; + + sizeleft -= size; + if (sizeleft == 0) + break; + } + + if (failed) { + list_for_each(mp, &final_ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + if (mlp == failed) + break; + memblock_phys_free(mlp->base, mlp->size); + } + cma_drop_area(cma); + ret = -ENOMEM; + goto out; + } + + cma->nranges = nr; + cma->nid = nid; *res_cma = cma; - cma_area_count++; - totalcma_pages += cma->count; - return 0; +out: + if (ret != 0) + pr_err("Failed to reserve %lu MiB\n", + (unsigned long)total_size / SZ_1M); + else + pr_info("Reserved %lu MiB in %d range%s\n", + (unsigned long)total_size / SZ_1M, nr, + nr > 1 ? "s" : ""); + return ret; } /** @@ -241,6 +578,26 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, bool fixed, const char *name, struct cma **res_cma, int nid) { + int ret; + + ret = __cma_declare_contiguous_nid(base, size, limit, alignment, + order_per_bit, fixed, name, res_cma, nid); + if (ret != 0) + pr_err("Failed to reserve %ld MiB\n", + (unsigned long)size / SZ_1M); + else + pr_info("Reserved %ld MiB at %pa\n", + (unsigned long)size / SZ_1M, &base); + + return ret; +} + +static int __init __cma_declare_contiguous_nid(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid) +{ phys_addr_t memblock_end = memblock_end_of_DRAM(); phys_addr_t highmem_start; int ret; @@ -272,10 +629,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, /* Sanitise input arguments. */ alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); if (fixed && base & (alignment - 1)) { - ret = -EINVAL; pr_err("Region at %pa must be aligned to %pa bytes\n", &base, &alignment); - goto err; + return -EINVAL; } base = ALIGN(base, alignment); size = ALIGN(size, alignment); @@ -293,10 +649,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, * low/high memory boundary. */ if (fixed && base < highmem_start && base + size > highmem_start) { - ret = -EINVAL; pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", &base, &highmem_start); - goto err; + return -EINVAL; } /* @@ -308,18 +663,16 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, limit = memblock_end; if (base + size > limit) { - ret = -EINVAL; pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", &size, &base, &limit); - goto err; + return -EINVAL; } /* Reserve memory */ if (fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { - ret = -EBUSY; - goto err; + return -EBUSY; } } else { phys_addr_t addr = 0; @@ -356,10 +709,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, if (!addr) { addr = memblock_alloc_range_nid(size, alignment, base, limit, nid, true); - if (!addr) { - ret = -ENOMEM; - goto err; - } + if (!addr) + return -ENOMEM; } /* @@ -372,86 +723,89 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); if (ret) - goto free_mem; + memblock_phys_free(base, size); - pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M, - &base, nid); - return 0; + (*res_cma)->nid = nid; -free_mem: - memblock_phys_free(base, size); -err: - pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M, - nid); return ret; } static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; - unsigned long start = 0; - unsigned long nr_part, nr_total = 0; - unsigned long nbits = cma_bitmap_maxno(cma); + unsigned long start; + unsigned long nr_part; + unsigned long nbits; + int r; + struct cma_memrange *cmr; spin_lock_irq(&cma->lock); pr_info("number of available pages: "); - for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); - if (next_zero_bit >= nbits) - break; - next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); - nr_zero = next_set_bit - next_zero_bit; - nr_part = nr_zero << cma->order_per_bit; - pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, - next_zero_bit); - nr_total += nr_part; - start = next_zero_bit + nr_zero; + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + + start = 0; + nbits = cma_bitmap_maxno(cma, cmr); + + pr_info("range %d: ", r); + for (;;) { + next_zero_bit = find_next_zero_bit(cmr->bitmap, + nbits, start); + if (next_zero_bit >= nbits) + break; + next_set_bit = find_next_bit(cmr->bitmap, nbits, + next_zero_bit); + nr_zero = next_set_bit - next_zero_bit; + nr_part = nr_zero << cma->order_per_bit; + pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, + next_zero_bit); + start = next_zero_bit + nr_zero; + } + pr_info("\n"); } - pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); + pr_cont("=> %lu free of %lu total pages\n", cma->available_count, + cma->count); spin_unlock_irq(&cma->lock); } -static struct page *__cma_alloc(struct cma *cma, unsigned long count, - unsigned int align, gfp_t gfp) +static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, + unsigned long count, unsigned int align, + struct page **pagep, gfp_t gfp) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; - unsigned long i; + int ret = -EBUSY; struct page *page = NULL; - int ret = -ENOMEM; - const char *name = cma ? cma->name : NULL; - - trace_cma_alloc_start(name, count, align); - - if (!cma || !cma->count || !cma->bitmap) - return page; - - pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, - (void *)cma, cma->name, count, align); - - if (!count) - return page; mask = cma_bitmap_aligned_mask(cma, align); - offset = cma_bitmap_aligned_offset(cma, align); - bitmap_maxno = cma_bitmap_maxno(cma); + offset = cma_bitmap_aligned_offset(cma, cmr, align); + bitmap_maxno = cma_bitmap_maxno(cma, cmr); bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) - return page; + goto out; for (;;) { spin_lock_irq(&cma->lock); - bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, + /* + * If the request is larger than the available number + * of pages, stop right away. + */ + if (count > cma->available_count) { + spin_unlock_irq(&cma->lock); + break; + } + bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { spin_unlock_irq(&cma->lock); break; } - bitmap_set(cma->bitmap, bitmap_no, bitmap_count); + bitmap_set(cmr->bitmap, bitmap_no, bitmap_count); + cma->available_count -= count; /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the @@ -459,16 +813,16 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, */ spin_unlock_irq(&cma->lock); - pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); + mutex_lock(&cma->alloc_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp); - mutex_unlock(&cma_mutex); + mutex_unlock(&cma->alloc_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; } - cma_clear_bitmap(cma, pfn, count); + cma_clear_bitmap(cma, cmr, pfn, count); if (ret != -EBUSY) break; @@ -480,6 +834,38 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } +out: + *pagep = page; + return ret; +} + +static struct page *__cma_alloc(struct cma *cma, unsigned long count, + unsigned int align, gfp_t gfp) +{ + struct page *page = NULL; + int ret = -ENOMEM, r; + unsigned long i; + const char *name = cma ? cma->name : NULL; + + trace_cma_alloc_start(name, count, align); + + if (!cma || !cma->count) + return page; + + pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, + (void *)cma, cma->name, count, align); + + if (!count) + return page; + + for (r = 0; r < cma->nranges; r++) { + page = NULL; + + ret = cma_range_alloc(cma, &cma->ranges[r], count, align, + &page, gfp); + if (ret != -EBUSY || page) + break; + } /* * CMA can allocate multiple page blocks, which results in different @@ -498,7 +884,8 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, } pr_debug("%s(): returned %p\n", __func__, page); - trace_cma_alloc_finish(name, pfn, page, count, align, ret); + trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0, + page, count, align, ret); if (page) { count_vm_event(CMA_ALLOC_SUCCESS); cma_sysfs_account_success_pages(cma, count); @@ -541,20 +928,31 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count) { - unsigned long pfn; + unsigned long pfn, end; + int r; + struct cma_memrange *cmr; + bool ret; - if (!cma || !pages) + if (!cma || !pages || count > cma->count) return false; pfn = page_to_pfn(pages); + ret = false; - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { - pr_debug("%s(page %p, count %lu)\n", __func__, - (void *)pages, count); - return false; + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + end = cmr->base_pfn + cmr->count; + if (pfn >= cmr->base_pfn && pfn < end) { + ret = pfn + count <= end; + break; + } } - return true; + if (!ret) + pr_debug("%s(page %p, count %lu)\n", + __func__, (void *)pages, count); + + return ret; } /** @@ -570,19 +968,32 @@ bool cma_pages_valid(struct cma *cma, const struct page *pages, bool cma_release(struct cma *cma, const struct page *pages, unsigned long count) { - unsigned long pfn; + struct cma_memrange *cmr; + unsigned long pfn, end_pfn; + int r; + + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); if (!cma_pages_valid(cma, pages, count)) return false; - pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); - pfn = page_to_pfn(pages); + end_pfn = pfn + count; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + if (pfn >= cmr->base_pfn && + pfn < (cmr->base_pfn + cmr->count)) { + VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count); + break; + } + } - VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); + if (r == cma->nranges) + return false; free_contig_range(pfn, count); - cma_clear_bitmap(cma, pfn, count); + cma_clear_bitmap(cma, cmr, pfn, count); cma_sysfs_account_release_pages(cma, count); trace_cma_release(cma->name, pfn, pages, count); @@ -610,3 +1021,86 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end) +{ + int r; + struct cma_memrange *cmr; + unsigned long rstart, rend; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + + rstart = PFN_PHYS(cmr->base_pfn); + rend = PFN_PHYS(cmr->base_pfn + cmr->count); + if (end < rstart) + continue; + if (start >= rend) + continue; + return true; + } + + return false; +} + +/* + * Very basic function to reserve memory from a CMA area that has not + * yet been activated. This is expected to be called early, when the + * system is single-threaded, so there is no locking. The alignment + * checking is restrictive - only pageblock-aligned areas + * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function. + * This keeps things simple, and is enough for the current use case. + * + * The CMA bitmaps have not yet been allocated, so just start + * reserving from the bottom up, using a PFN to keep track + * of what has been reserved. Unreserving is not possible. + * + * The caller is responsible for initializing the page structures + * in the area properly, since this just points to memblock-allocated + * memory. The caller should subsequently use init_cma_pageblock to + * set the migrate type and CMA stats the pageblocks that were reserved. + * + * If the CMA area fails to activate later, memory obtained through + * this interface is not handed to the page allocator, this is + * the responsibility of the caller (e.g. like normal memblock-allocated + * memory). + */ +void __init *cma_reserve_early(struct cma *cma, unsigned long size) +{ + int r; + struct cma_memrange *cmr; + unsigned long available; + void *ret = NULL; + + if (!cma || !cma->count) + return NULL; + /* + * Can only be called early in init. + */ + if (test_bit(CMA_ACTIVATED, &cma->flags)) + return NULL; + + if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES)) + return NULL; + + if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit))) + return NULL; + + size >>= PAGE_SHIFT; + + if (size > cma->available_count) + return NULL; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + available = cmr->count - (cmr->early_pfn - cmr->base_pfn); + if (size <= available) { + ret = phys_to_virt(PFN_PHYS(cmr->early_pfn)); + cmr->early_pfn += size; + cma->available_count -= size; + return ret; + } + } + + return ret; +} @@ -10,18 +10,43 @@ struct cma_kobject { struct cma *cma; }; +/* + * Multi-range support. This can be useful if the size of the allocation + * is not expected to be larger than the alignment (like with hugetlb_cma), + * and the total amount of memory requested, while smaller than the total + * amount of memory available, is large enough that it doesn't fit in a + * single physical memory range because of memory holes. + * + * Fields: + * @base_pfn: physical address of range + * @early_pfn: first PFN not reserved through cma_reserve_early + * @count: size of range + * @bitmap: bitmap of allocated (1 << order_per_bit)-sized chunks. + */ +struct cma_memrange { + unsigned long base_pfn; + unsigned long early_pfn; + unsigned long count; + unsigned long *bitmap; +#ifdef CONFIG_CMA_DEBUGFS + struct debugfs_u32_array dfs_bitmap; +#endif +}; +#define CMA_MAX_RANGES 8 + struct cma { - unsigned long base_pfn; unsigned long count; - unsigned long *bitmap; + unsigned long available_count; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + struct mutex alloc_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; - struct debugfs_u32_array dfs_bitmap; #endif char name[CMA_MAX_NAME]; + int nranges; + struct cma_memrange ranges[CMA_MAX_RANGES]; #ifdef CONFIG_CMA_SYSFS /* the number of CMA page successful allocations */ atomic64_t nr_pages_succeeded; @@ -32,15 +57,25 @@ struct cma { /* kobject requires dynamic object */ struct cma_kobject *cma_kobj; #endif - bool reserve_pages_on_error; + unsigned long flags; + /* NUMA node (NUMA_NO_NODE if unspecified) */ + int nid; +}; + +enum cma_flags { + CMA_RESERVE_PAGES_ON_ERROR, + CMA_ZONES_VALID, + CMA_ZONES_INVALID, + CMA_ACTIVATED, }; extern struct cma cma_areas[MAX_CMA_AREAS]; extern unsigned int cma_area_count; -static inline unsigned long cma_bitmap_maxno(struct cma *cma) +static inline unsigned long cma_bitmap_maxno(struct cma *cma, + struct cma_memrange *cmr) { - return cma->count >> cma->order_per_bit; + return cmr->count >> cma->order_per_bit; } #ifdef CONFIG_CMA_SYSFS diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 602fff89b15f..fdf899532ca0 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -34,13 +34,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n"); static int cma_used_get(void *data, u64 *val) { struct cma *cma = data; - unsigned long used; spin_lock_irq(&cma->lock); - /* pages counter is smaller than sizeof(int) */ - used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); + *val = cma->count - cma->available_count; spin_unlock_irq(&cma->lock); - *val = (u64)used << cma->order_per_bit; return 0; } @@ -49,17 +46,26 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n"); static int cma_maxchunk_get(void *data, u64 *val) { struct cma *cma = data; + struct cma_memrange *cmr; unsigned long maxchunk = 0; - unsigned long start, end = 0; - unsigned long bitmap_maxno = cma_bitmap_maxno(cma); + unsigned long start, end; + unsigned long bitmap_maxno; + int r; spin_lock_irq(&cma->lock); - for (;;) { - start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= bitmap_maxno) - break; - end = find_next_bit(cma->bitmap, bitmap_maxno, start); - maxchunk = max(end - start, maxchunk); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + bitmap_maxno = cma_bitmap_maxno(cma, cmr); + end = 0; + for (;;) { + start = find_next_zero_bit(cmr->bitmap, + bitmap_maxno, end); + if (start >= bitmap_maxno) + break; + end = find_next_bit(cmr->bitmap, bitmap_maxno, + start); + maxchunk = max(end - start, maxchunk); + } } spin_unlock_irq(&cma->lock); *val = (u64)maxchunk << cma->order_per_bit; @@ -162,24 +168,41 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n"); static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { - struct dentry *tmp; + struct dentry *tmp, *dir, *rangedir; + int r; + char rdirname[12]; + struct cma_memrange *cmr; tmp = debugfs_create_dir(cma->name, root_dentry); debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); - debugfs_create_file("base_pfn", 0444, tmp, - &cma->base_pfn, &cma_debugfs_fops); debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); debugfs_create_file("order_per_bit", 0444, tmp, &cma->order_per_bit, &cma_debugfs_fops); debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); - cma->dfs_bitmap.array = (u32 *)cma->bitmap; - cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), - BITS_PER_BYTE * sizeof(u32)); - debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); + rangedir = debugfs_create_dir("ranges", tmp); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + snprintf(rdirname, sizeof(rdirname), "%d", r); + dir = debugfs_create_dir(rdirname, rangedir); + debugfs_create_file("base_pfn", 0444, dir, + &cmr->base_pfn, &cma_debugfs_fops); + cmr->dfs_bitmap.array = (u32 *)cmr->bitmap; + cmr->dfs_bitmap.n_elements = + DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr), + BITS_PER_BYTE * sizeof(u32)); + debugfs_create_u32_array("bitmap", 0444, dir, + &cmr->dfs_bitmap); + } + + /* + * Backward compatible symlinks to range 0 for base_pfn and bitmap. + */ + debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn"); + debugfs_create_symlink("bitmap", tmp, "ranges/0/bitmap"); } static int __init cma_debugfs_init(void) diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c index f50db3973171..97acd3e5a6a5 100644 --- a/mm/cma_sysfs.c +++ b/mm/cma_sysfs.c @@ -62,6 +62,24 @@ static ssize_t release_pages_success_show(struct kobject *kobj, } CMA_ATTR_RO(release_pages_success); +static ssize_t total_pages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%lu\n", cma->count); +} +CMA_ATTR_RO(total_pages); + +static ssize_t available_pages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%lu\n", cma->available_count); +} +CMA_ATTR_RO(available_pages); + static void cma_kobj_release(struct kobject *kobj) { struct cma *cma = cma_from_kobj(kobj); @@ -75,6 +93,8 @@ static struct attribute *cma_attrs[] = { &alloc_pages_success_attr.attr, &alloc_pages_fail_attr.attr, &release_pages_success_attr.attr, + &total_pages_attr.attr, + &available_pages_attr.attr, NULL, }; ATTRIBUTE_GROUPS(cma); diff --git a/mm/compaction.c b/mm/compaction.c index a3203d97123e..139f00c0308a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2328,11 +2328,27 @@ static enum compact_result __compact_finished(struct compact_control *cc) if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE; + /* + * When defrag_mode is enabled, make kcompactd target + * watermarks in whole pageblocks. Because they can be stolen + * without polluting, no further fallback checks are needed. + */ + if (defrag_mode && !cc->direct_compaction) { + if (__zone_watermark_ok(cc->zone, cc->order, + high_wmark_pages(cc->zone), + cc->highest_zoneidx, cc->alloc_flags, + zone_page_state(cc->zone, + NR_FREE_PAGES_BLOCKS))) + return COMPACT_SUCCESS; + + return COMPACT_CONTINUE; + } + /* Direct compactor: Is a suitable page free? */ ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &cc->zone->free_area[order]; - bool can_steal; + bool claim_block; /* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) @@ -2349,7 +2365,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) * other migratetype buddy lists. */ if (find_suitable_fallback(area, order, migratetype, - true, &can_steal) != -1) + true, &claim_block) != -1) /* * Movable pages are OK in any pageblock. If we are * stealing for a non-movable allocation, make sure @@ -2381,40 +2397,42 @@ static enum compact_result compact_finished(struct compact_control *cc) } static bool __compaction_suitable(struct zone *zone, int order, - int highest_zoneidx, - unsigned long wmark_target) + unsigned long watermark, int highest_zoneidx, + unsigned long free_pages) { - unsigned long watermark; /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the - * watermark and alloc_flags have to match, or be more pessimistic than - * the check in __isolate_free_page(). We don't use the direct - * compactor's alloc_flags, as they are not relevant for freepage - * isolation. We however do use the direct compactor's highest_zoneidx - * to skip over zones where lowmem reserves would prevent allocation - * even if compaction succeeds. - * For costly orders, we require low watermark instead of min for - * compaction to proceed to increase its chances. + * watermark have to match, or be more pessimistic than the check in + * __isolate_free_page(). + * + * For costly orders, we require a higher watermark for compaction to + * proceed to increase its chances. + * + * We use the direct compactor's highest_zoneidx to skip over zones + * where lowmem reserves would prevent allocation even if compaction + * succeeds. + * * ALLOC_CMA is used, as pages in CMA pageblocks are considered - * suitable migration targets + * suitable migration targets. */ - watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? - low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); + if (order > PAGE_ALLOC_COSTLY_ORDER) + watermark += low_wmark_pages(zone) - min_wmark_pages(zone); return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target); + ALLOC_CMA, free_pages); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? */ -bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) +bool compaction_suitable(struct zone *zone, int order, unsigned long watermark, + int highest_zoneidx) { enum compact_result compact_result; bool suitable; - suitable = __compaction_suitable(zone, order, highest_zoneidx, + suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to @@ -2452,6 +2470,7 @@ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) return suitable; } +/* Used by direct reclaimers */ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) { @@ -2474,8 +2493,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); - if (__compaction_suitable(zone, order, ac->highest_zoneidx, - available)) + if (__compaction_suitable(zone, order, min_wmark_pages(zone), + ac->highest_zoneidx, available)) return true; } @@ -2492,13 +2511,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, static enum compact_result compaction_suit_allocation_order(struct zone *zone, unsigned int order, int highest_zoneidx, unsigned int alloc_flags, - bool async) + bool async, bool kcompactd) { + unsigned long free_pages; unsigned long watermark; + if (kcompactd && defrag_mode) + free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); + else + free_pages = zone_page_state(zone, NR_FREE_PAGES); + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, - alloc_flags)) + if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx, + alloc_flags, free_pages)) return COMPACT_SUCCESS; /* @@ -2512,13 +2537,13 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order, */ if (order > PAGE_ALLOC_COSTLY_ORDER && async && !(alloc_flags & ALLOC_CMA)) { - watermark = low_wmark_pages(zone) + compact_gap(order); - if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - 0, zone_page_state(zone, NR_FREE_PAGES))) + if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order), + highest_zoneidx, 0, + zone_page_state(zone, NR_FREE_PAGES))) return COMPACT_SKIPPED; } - if (!compaction_suitable(zone, order, highest_zoneidx)) + if (!compaction_suitable(zone, order, watermark, highest_zoneidx)) return COMPACT_SKIPPED; return COMPACT_CONTINUE; @@ -2554,7 +2579,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) ret = compaction_suit_allocation_order(cc->zone, cc->order, cc->highest_zoneidx, cc->alloc_flags, - cc->mode == MIGRATE_ASYNC); + cc->mode == MIGRATE_ASYNC, + !cc->direct_compaction); if (ret != COMPACT_CONTINUE) return ret; } @@ -3048,6 +3074,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) struct zone *zone; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; enum compact_result ret; + unsigned int alloc_flags = defrag_mode ? + ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN; for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { zone = &pgdat->node_zones[zoneid]; @@ -3057,8 +3085,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) ret = compaction_suit_allocation_order(zone, pgdat->kcompactd_max_order, - highest_zoneidx, ALLOC_WMARK_MIN, - false); + highest_zoneidx, alloc_flags, + false, true); if (ret == COMPACT_CONTINUE) return true; } @@ -3081,6 +3109,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, + .alloc_flags = defrag_mode ? ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN, }; enum compact_result ret; @@ -3099,8 +3128,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) continue; ret = compaction_suit_allocation_order(zone, - cc.order, zoneid, ALLOC_WMARK_MIN, - false); + cc.order, zoneid, cc.alloc_flags, + false, true); if (ret != COMPACT_CONTINUE) continue; diff --git a/mm/damon/core.c b/mm/damon/core.c index 384935ef4e65..fc1eba3da419 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -281,9 +281,31 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return filter; } +/** + * damos_filter_for_ops() - Return if the filter is ops-hndled one. + * @type: type of the filter. + * + * Return: true if the filter of @type needs to be handled by ops layer, false + * otherwise. + */ +bool damos_filter_for_ops(enum damos_filter_type type) +{ + switch (type) { + case DAMOS_FILTER_TYPE_ADDR: + case DAMOS_FILTER_TYPE_TARGET: + return false; + default: + break; + } + return true; +} + void damos_add_filter(struct damos *s, struct damos_filter *f) { - list_add_tail(&f->list, &s->filters); + if (damos_filter_for_ops(f->type)) + list_add_tail(&f->list, &s->ops_filters); + else + list_add_tail(&f->list, &s->filters); } static void damos_del_filter(struct damos_filter *f) @@ -375,6 +397,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, scheme->next_apply_sis = 0; scheme->walk_completed = false; INIT_LIST_HEAD(&scheme->filters); + INIT_LIST_HEAD(&scheme->ops_filters); scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); @@ -502,7 +525,7 @@ struct damon_ctx *damon_new_ctx(void) ctx->attrs.ops_update_interval = 60 * 1000 * 1000; ctx->passed_sample_intervals = 0; - /* These will be set from kdamond_init_intervals_sis() */ + /* These will be set from kdamond_init_ctx() */ ctx->next_aggregation_sis = 0; ctx->next_ops_update_sis = 0; @@ -580,11 +603,25 @@ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, } static void damon_update_monitoring_result(struct damon_region *r, - struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) + struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, + bool aggregating) { - r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, - old_attrs, new_attrs); - r->nr_accesses_bp = r->nr_accesses * 10000; + if (!aggregating) { + r->nr_accesses = damon_nr_accesses_for_new_attrs( + r->nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->nr_accesses * 10000; + } else { + /* + * if this is called in the middle of the aggregation, reset + * the aggregations we made so far for this aggregation + * interval. In other words, make the status like + * kdamond_reset_aggregated() is called. + */ + r->last_nr_accesses = damon_nr_accesses_for_new_attrs( + r->last_nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->last_nr_accesses * 10000; + r->nr_accesses = 0; + } r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); } @@ -597,7 +634,7 @@ static void damon_update_monitoring_result(struct damon_region *r, * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. */ static void damon_update_monitoring_results(struct damon_ctx *ctx, - struct damon_attrs *new_attrs) + struct damon_attrs *new_attrs, bool aggregating) { struct damon_attrs *old_attrs = &ctx->attrs; struct damon_target *t; @@ -612,7 +649,26 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, damon_for_each_target(t, ctx) damon_for_each_region(r, t) damon_update_monitoring_result( - r, old_attrs, new_attrs); + r, old_attrs, new_attrs, aggregating); +} + +/* + * damon_valid_intervals_goal() - return if the intervals goal of @attrs is + * valid. + */ +static bool damon_valid_intervals_goal(struct damon_attrs *attrs) +{ + struct damon_intervals_goal *goal = &attrs->intervals_goal; + + /* tuning is disabled */ + if (!goal->aggrs) + return true; + if (goal->min_sample_us > goal->max_sample_us) + return false; + if (attrs->sample_interval < goal->min_sample_us || + goal->max_sample_us < attrs->sample_interval) + return false; + return true; } /** @@ -620,10 +676,10 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, * @ctx: monitoring context * @attrs: monitoring attributes * - * This function should be called while the kdamond is not running, or an - * access check results aggregation is not ongoing (e.g., from - * &struct damon_callback->after_aggregation or - * &struct damon_callback->after_wmarks_check callbacks). + * This function should be called while the kdamond is not running, an access + * check results aggregation is not ongoing (e.g., from &struct + * damon_callback->after_aggregation or &struct + * damon_callback->after_wmarks_check callbacks), or from damon_call(). * * Every time interval is in micro-seconds. * @@ -634,6 +690,11 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) unsigned long sample_interval = attrs->sample_interval ? attrs->sample_interval : 1; struct damos *s; + bool aggregating = ctx->passed_sample_intervals < + ctx->next_aggregation_sis; + + if (!damon_valid_intervals_goal(attrs)) + return -EINVAL; if (attrs->min_nr_regions < 3) return -EINVAL; @@ -642,12 +703,16 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) if (attrs->sample_interval > attrs->aggr_interval) return -EINVAL; + /* calls from core-external doesn't set this. */ + if (!attrs->aggr_samples) + attrs->aggr_samples = attrs->aggr_interval / sample_interval; + ctx->next_aggregation_sis = ctx->passed_sample_intervals + attrs->aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->passed_sample_intervals + attrs->ops_update_interval / sample_interval; - damon_update_monitoring_results(ctx, attrs); + damon_update_monitoring_results(ctx, attrs, aggregating); ctx->attrs = *attrs; damon_for_each_scheme(s, ctx) @@ -777,6 +842,9 @@ static void damos_commit_filter_arg( case DAMOS_FILTER_TYPE_TARGET: dst->target_idx = src->target_idx; break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + dst->sz_range = src->sz_range; + break; default: break; } @@ -790,7 +858,7 @@ static void damos_commit_filter( damos_commit_filter_arg(dst, src); } -static int damos_commit_filters(struct damos *dst, struct damos *src) +static int damos_commit_core_filters(struct damos *dst, struct damos *src) { struct damos_filter *dst_filter, *next, *src_filter, *new_filter; int i = 0, j = 0; @@ -818,6 +886,74 @@ static int damos_commit_filters(struct damos *dst, struct damos *src) return 0; } +static int damos_commit_ops_filters(struct damos *dst, struct damos *src) +{ + struct damos_filter *dst_filter, *next, *src_filter, *new_filter; + int i = 0, j = 0; + + damos_for_each_ops_filter_safe(dst_filter, next, dst) { + src_filter = damos_nth_filter(i++, src); + if (src_filter) + damos_commit_filter(dst_filter, src_filter); + else + damos_destroy_filter(dst_filter); + } + + damos_for_each_ops_filter_safe(src_filter, next, src) { + if (j++ < i) + continue; + + new_filter = damos_new_filter( + src_filter->type, src_filter->matching, + src_filter->allow); + if (!new_filter) + return -ENOMEM; + damos_commit_filter_arg(new_filter, src_filter); + damos_add_filter(dst, new_filter); + } + return 0; +} + +/** + * damos_filters_default_reject() - decide whether to reject memory that didn't + * match with any given filter. + * @filters: Given DAMOS filters of a group. + */ +static bool damos_filters_default_reject(struct list_head *filters) +{ + struct damos_filter *last_filter; + + if (list_empty(filters)) + return false; + last_filter = list_last_entry(filters, struct damos_filter, list); + return last_filter->allow; +} + +static void damos_set_filters_default_reject(struct damos *s) +{ + if (!list_empty(&s->ops_filters)) + s->core_filters_default_reject = false; + else + s->core_filters_default_reject = + damos_filters_default_reject(&s->filters); + s->ops_filters_default_reject = + damos_filters_default_reject(&s->ops_filters); +} + +static int damos_commit_filters(struct damos *dst, struct damos *src) +{ + int err; + + err = damos_commit_core_filters(dst, src); + if (err) + return err; + err = damos_commit_ops_filters(dst, src); + if (err) + return err; + damos_set_filters_default_reject(dst); + return 0; +} + static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) { struct damos *s; @@ -1276,6 +1412,65 @@ static void kdamond_reset_aggregated(struct damon_ctx *c) } } +static unsigned long damon_get_intervals_score(struct damon_ctx *c) +{ + struct damon_target *t; + struct damon_region *r; + unsigned long sz_region, max_access_events = 0, access_events = 0; + unsigned long target_access_events; + unsigned long goal_bp = c->attrs.intervals_goal.access_bp; + + damon_for_each_target(t, c) { + damon_for_each_region(r, t) { + sz_region = damon_sz_region(r); + max_access_events += sz_region * c->attrs.aggr_samples; + access_events += sz_region * r->nr_accesses; + } + } + target_access_events = max_access_events * goal_bp / 10000; + return access_events * 10000 / target_access_events; +} + +static unsigned long damon_feed_loop_next_input(unsigned long last_input, + unsigned long score); + +static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) +{ + unsigned long score_bp, adaptation_bp; + + score_bp = damon_get_intervals_score(c); + adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / + 10000; + /* + * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of + * the intervals by rescaling [1,10,000] to [5000, 10,000]. + */ + if (adaptation_bp <= 10000) + adaptation_bp = 5000 + adaptation_bp / 2; + return adaptation_bp; +} + +static void kdamond_tune_intervals(struct damon_ctx *c) +{ + unsigned long adaptation_bp; + struct damon_attrs new_attrs; + struct damon_intervals_goal *goal; + + adaptation_bp = damon_get_intervals_adaptation_bp(c); + if (adaptation_bp == 10000) + return; + + new_attrs = c->attrs; + goal = &c->attrs.intervals_goal; + new_attrs.sample_interval = min(goal->max_sample_us, + c->attrs.sample_interval * adaptation_bp / 10000); + new_attrs.sample_interval = max(goal->min_sample_us, + new_attrs.sample_interval); + new_attrs.aggr_interval = new_attrs.sample_interval * + c->attrs.aggr_samples; + damon_set_attrs(c, &new_attrs); +} + static void damon_split_region_at(struct damon_target *t, struct damon_region *r, unsigned long sz_r); @@ -1438,7 +1633,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, return !filter->allow; } } - return false; + return s->core_filters_default_reject; } /* @@ -1458,11 +1653,13 @@ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, { struct damos_walk_control *control; - mutex_lock(&ctx->walk_control_lock); + if (s->walk_completed) + return; + control = ctx->walk_control; - mutex_unlock(&ctx->walk_control_lock); if (!control) return; + control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); } @@ -1482,9 +1679,7 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) struct damos *siter; struct damos_walk_control *control; - mutex_lock(&ctx->walk_control_lock); control = ctx->walk_control; - mutex_unlock(&ctx->walk_control_lock); if (!control) return; @@ -1494,10 +1689,11 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) if (!siter->walk_completed) return; } + damon_for_each_scheme(siter, ctx) + siter->walk_completed = false; + complete(&control->completion); - mutex_lock(&ctx->walk_control_lock); ctx->walk_control = NULL; - mutex_unlock(&ctx->walk_control_lock); } /* @@ -1535,7 +1731,6 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct timespec64 begin, end; unsigned long sz_applied = 0; unsigned long sz_ops_filter_passed = 0; - int err = 0; /* * We plan to support multiple context per kdamond, as DAMON sysfs * implies with 'nr_contexts' file. Nevertheless, only single context @@ -1575,14 +1770,10 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (damos_filter_out(c, t, r, s)) return; ktime_get_coarse_ts64(&begin); - if (c->callback.before_damos_apply) - err = c->callback.before_damos_apply(c, t, r, s); - if (!err) { - trace_damos_before_apply(cidx, sidx, tidx, r, - damon_nr_regions(t), do_trace); - sz_applied = c->ops.apply_scheme(c, t, r, s, - &sz_ops_filter_passed); - } + trace_damos_before_apply(cidx, sidx, tidx, r, + damon_nr_regions(t), do_trace); + sz_applied = c->ops.apply_scheme(c, t, r, s, + &sz_ops_filter_passed); damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - @@ -1844,6 +2035,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) if (!has_schemes_to_apply) return; + mutex_lock(&c->walk_control_lock); damon_for_each_target(t, c) { damon_for_each_region_safe(r, next_r, t) damon_do_apply_schemes(c, t, r); @@ -1856,7 +2048,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c) s->next_apply_sis = c->passed_sample_intervals + (s->apply_interval_us ? s->apply_interval_us : c->attrs.aggr_interval) / sample_interval; + s->last_applied = NULL; } + mutex_unlock(&c->walk_control_lock); } /* @@ -2169,7 +2363,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) return -EBUSY; } -static void kdamond_init_intervals_sis(struct damon_ctx *ctx) +static void kdamond_init_ctx(struct damon_ctx *ctx) { unsigned long sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; @@ -2180,11 +2374,14 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / sample_interval; + ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * + ctx->attrs.intervals_goal.aggrs; damon_for_each_scheme(scheme, ctx) { apply_interval = scheme->apply_interval_us ? scheme->apply_interval_us : ctx->attrs.aggr_interval; scheme->next_apply_sis = apply_interval / sample_interval; + damos_set_filters_default_reject(scheme); } } @@ -2202,12 +2399,10 @@ static int kdamond_fn(void *data) pr_debug("kdamond (%d) starts\n", current->pid); complete(&ctx->kdamond_started); - kdamond_init_intervals_sis(ctx); + kdamond_init_ctx(ctx); if (ctx->ops.init) ctx->ops.init(ctx); - if (ctx->callback.before_start && ctx->callback.before_start(ctx)) - goto done; ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, sizeof(*ctx->regions_score_histogram), GFP_KERNEL); if (!ctx->regions_score_histogram) @@ -2232,10 +2427,6 @@ static int kdamond_fn(void *data) if (ctx->ops.prepare_access_checks) ctx->ops.prepare_access_checks(ctx); - if (ctx->callback.after_sampling && - ctx->callback.after_sampling(ctx)) - break; - kdamond_call(ctx, false); kdamond_usleep(sample_interval); ctx->passed_sample_intervals++; @@ -2253,9 +2444,10 @@ static int kdamond_fn(void *data) } /* - * do kdamond_apply_schemes() after kdamond_merge_regions() if - * possible, to reduce overhead + * do kdamond_call() and kdamond_apply_schemes() after + * kdamond_merge_regions() if possible, to reduce overhead */ + kdamond_call(ctx, false); if (!list_empty(&ctx->schemes)) kdamond_apply_schemes(ctx); else @@ -2264,13 +2456,40 @@ static int kdamond_fn(void *data) sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; if (ctx->passed_sample_intervals >= next_aggregation_sis) { + if (ctx->attrs.intervals_goal.aggrs && + ctx->passed_sample_intervals >= + ctx->next_intervals_tune_sis) { + /* + * ctx->next_aggregation_sis might be updated + * from kdamond_call(). In the case, + * damon_set_attrs() which will be called from + * kdamond_tune_interval() may wrongly think + * this is in the middle of the current + * aggregation, and make aggregation + * information reset for all regions. Then, + * following kdamond_reset_aggregated() call + * will make the region information invalid, + * particularly for ->nr_accesses_bp. + * + * Reset ->next_aggregation_sis to avoid that. + * It will anyway correctly updated after this + * if caluse. + */ + ctx->next_aggregation_sis = + next_aggregation_sis; + ctx->next_intervals_tune_sis += + ctx->attrs.aggr_samples * + ctx->attrs.intervals_goal.aggrs; + kdamond_tune_intervals(ctx); + sample_interval = ctx->attrs.sample_interval ? + ctx->attrs.sample_interval : 1; + + } ctx->next_aggregation_sis = next_aggregation_sis + ctx->attrs.aggr_interval / sample_interval; kdamond_reset_aggregated(ctx); kdamond_split_regions(ctx); - if (ctx->ops.reset_aggregated) - ctx->ops.reset_aggregated(ctx); } if (ctx->passed_sample_intervals >= next_ops_update_sis) { diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d25d99cb5f2b..0db1fc70c84d 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -9,6 +9,8 @@ #include <linux/page_idle.h> #include <linux/pagemap.h> #include <linux/rmap.h> +#include <linux/swap.h> +#include <linux/swapops.h> #include "ops-common.h" @@ -24,7 +26,7 @@ struct folio *damon_get_folio(unsigned long pfn) struct page *page = pfn_to_online_page(pfn); struct folio *folio; - if (!page || PageTail(page)) + if (!page) return NULL; folio = page_folio(page); @@ -39,12 +41,29 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); + pte_t pteval = ptep_get(pte); + struct folio *folio; + bool young = false; + unsigned long pfn; + + if (likely(pte_present(pteval))) + pfn = pte_pfn(pteval); + else + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + folio = damon_get_folio(pfn); if (!folio) return; - if (ptep_clear_young_notify(vma, addr, pte)) + /* + * PFN swap PTEs, such as device-exclusive ones, that actually map pages + * are "old" from a CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (likely(pte_present(pteval))) + young |= ptep_test_and_clear_young(vma, addr, pte); + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); + if (young) folio_set_young(folio); folio_set_idle(folio); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index c834aa217835..1b70d3f36046 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -92,12 +92,20 @@ static bool damon_folio_young_one(struct folio *folio, { bool *accessed = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + pte_t pte; *accessed = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(ptep_get(pvmw.pte)) || + pte = ptep_get(pvmw.pte); + + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. + */ + *accessed = (pte_present(pte) && pte_young(pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { @@ -203,11 +211,15 @@ static bool damos_pa_filter_match(struct damos_filter *filter, { bool matched = false; struct mem_cgroup *memcg; + size_t folio_sz; switch (filter->type) { case DAMOS_FILTER_TYPE_ANON: matched = folio_test_anon(folio); break; + case DAMOS_FILTER_TYPE_ACTIVE: + matched = folio_test_active(folio); + break; case DAMOS_FILTER_TYPE_MEMCG: rcu_read_lock(); memcg = folio_memcg_check(folio); @@ -222,6 +234,14 @@ static bool damos_pa_filter_match(struct damos_filter *filter, if (matched) damon_folio_mkold(folio); break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + folio_sz = folio_size(folio); + matched = filter->sz_range.min <= folio_sz && + folio_sz <= filter->sz_range.max; + break; + case DAMOS_FILTER_TYPE_UNMAPPED: + matched = !folio_mapped(folio) || !folio_raw_mapping(folio); + break; default: break; } @@ -239,10 +259,21 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) if (scheme->core_filters_allowed) return false; - damos_for_each_filter(filter, scheme) { + damos_for_each_ops_filter(filter, scheme) { if (damos_pa_filter_match(filter, folio)) return !filter->allow; } + return scheme->ops_filters_default_reject; +} + +static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) +{ + if (!folio) + return true; + if (folio == s->last_applied) { + folio_put(folio); + return true; + } return false; } @@ -253,9 +284,10 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, LIST_HEAD(folio_list); bool install_young_filter = true; struct damos_filter *filter; + struct folio *folio; /* check access in page level again by default */ - damos_for_each_filter(filter, s) { + damos_for_each_ops_filter(filter, s) { if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { install_young_filter = false; break; @@ -269,11 +301,13 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, damos_add_filter(s, filter); } - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -289,12 +323,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, else list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } if (install_young_filter) damos_destroy_filter(filter); applied = reclaim_pages(&folio_list); cond_resched(); + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -303,12 +339,15 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( unsigned long *sz_filter_passed) { unsigned long addr, applied = 0; + struct folio *folio; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -321,8 +360,10 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( folio_deactivate(folio); applied += folio_nr_pages(folio); put_folio: + addr += folio_size(folio); folio_put(folio); } + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -466,12 +507,15 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, { unsigned long addr, applied; LIST_HEAD(folio_list); + struct folio *folio; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) + addr = r->ar.start; + while (addr < r->ar.end) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -482,10 +526,12 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, goto put_folio; list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } applied = damon_pa_migrate_pages(&folio_list, s->target_nid); cond_resched(); + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -493,7 +539,7 @@ static bool damon_pa_scheme_has_filter(struct damos *s) { struct damos_filter *f; - damos_for_each_filter(f, s) + damos_for_each_ops_filter(f, s) return true; return false; } @@ -503,15 +549,15 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, { unsigned long addr; LIST_HEAD(folio_list); + struct folio *folio; if (!damon_pa_scheme_has_filter(s)) return 0; addr = r->ar.start; while (addr < r->ar.end) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; continue; } @@ -521,6 +567,7 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, addr += folio_size(folio); folio_put(folio); } + s->last_applied = folio; return 0; } @@ -577,7 +624,6 @@ static int __init damon_pa_initcall(void) .update = NULL, .prepare_access_checks = damon_pa_prepare_access_checks, .check_accesses = damon_pa_check_accesses, - .reset_aggregated = NULL, .target_valid = NULL, .cleanup = NULL, .apply_scheme = damon_pa_apply_scheme, diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 98f93ae9f59e..23b562df0839 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -309,26 +309,46 @@ static const struct kobj_type damon_sysfs_stats_ktype = { * filter directory */ +/* + * enum damos_sysfs_filter_handle_layer - Layers handling filters of a dir. + */ +enum damos_sysfs_filter_handle_layer { + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, +}; + struct damon_sysfs_scheme_filter { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; enum damos_filter_type type; bool matching; bool allow; char *memcg_path; struct damon_addr_range addr_range; + struct damon_size_range sz_range; int target_idx; }; -static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void) +static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc( + enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + struct damon_sysfs_scheme_filter *filter; + + filter = kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + if (filter) + filter->handle_layer = layer; + return filter; } /* Should match with enum damos_filter_type */ static const char * const damon_sysfs_scheme_filter_type_strs[] = { "anon", + "active", "memcg", "young", + "hugepage_size", + "unmapped", "addr", "target", }; @@ -343,6 +363,23 @@ static ssize_t type_show(struct kobject *kobj, damon_sysfs_scheme_filter_type_strs[filter->type]); } +static bool damos_sysfs_scheme_filter_valid_type( + enum damos_sysfs_filter_handle_layer layer, + enum damos_filter_type type) +{ + switch (layer) { + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH: + return true; + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE: + return !damos_filter_for_ops(type); + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS: + return damos_filter_for_ops(type); + default: + break; + } + return false; +} + static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -354,6 +391,9 @@ static ssize_t type_store(struct kobject *kobj, for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) { if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[ type])) { + if (!damos_sysfs_scheme_filter_valid_type( + filter->handle_layer, type)) + break; filter->type = type; ret = count; break; @@ -473,6 +513,44 @@ static ssize_t addr_end_store(struct kobject *kobj, return err ? err : count; } +static ssize_t min_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.min); +} + +static ssize_t min_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.min); + + return err ? err : count; +} + +static ssize_t max_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.max); +} + +static ssize_t max_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.max); + + return err ? err : count; +} + static ssize_t damon_target_idx_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -519,6 +597,12 @@ static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr = static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr = __ATTR_RW_MODE(addr_end, 0600); +static struct kobj_attribute damon_sysfs_scheme_filter_min_attr = + __ATTR_RW_MODE(min, 0600); + +static struct kobj_attribute damon_sysfs_scheme_filter_max_attr = + __ATTR_RW_MODE(max, 0600); + static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = __ATTR_RW_MODE(damon_target_idx, 0600); @@ -529,6 +613,8 @@ static struct attribute *damon_sysfs_scheme_filter_attrs[] = { &damon_sysfs_scheme_filter_memcg_path_attr.attr, &damon_sysfs_scheme_filter_addr_start_attr.attr, &damon_sysfs_scheme_filter_addr_end_attr.attr, + &damon_sysfs_scheme_filter_min_attr.attr, + &damon_sysfs_scheme_filter_max_attr.attr, &damon_sysfs_scheme_filter_damon_target_idx_attr.attr, NULL, }; @@ -546,14 +632,20 @@ static const struct kobj_type damon_sysfs_scheme_filter_ktype = { struct damon_sysfs_scheme_filters { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; struct damon_sysfs_scheme_filter **filters_arr; int nr; }; static struct damon_sysfs_scheme_filters * -damon_sysfs_scheme_filters_alloc(void) +damon_sysfs_scheme_filters_alloc(enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + struct damon_sysfs_scheme_filters *filters; + + filters = kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + if (filters) + filters->handle_layer = layer; + return filters; } static void damon_sysfs_scheme_filters_rm_dirs( @@ -586,7 +678,8 @@ static int damon_sysfs_scheme_filters_add_dirs( filters->filters_arr = filters_arr; for (i = 0; i < nr_filters; i++) { - filter = damon_sysfs_scheme_filter_alloc(); + filter = damon_sysfs_scheme_filter_alloc( + filters->handle_layer); if (!filter) { damon_sysfs_scheme_filters_rm_dirs(filters); return -ENOMEM; @@ -1379,7 +1472,7 @@ static int damon_sysfs_access_pattern_add_range_dir( if (!range) return -ENOMEM; err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, - &access_pattern->kobj, name); + &access_pattern->kobj, "%s", name); if (err) kobject_put(&range->kobj); else @@ -1455,6 +1548,8 @@ struct damon_sysfs_scheme { unsigned long apply_interval_us; struct damon_sysfs_quotas *quotas; struct damon_sysfs_watermarks *watermarks; + struct damon_sysfs_scheme_filters *core_filters; + struct damon_sysfs_scheme_filters *ops_filters; struct damon_sysfs_scheme_filters *filters; struct damon_sysfs_stats *stats; struct damon_sysfs_scheme_regions *tried_regions; @@ -1555,21 +1650,53 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) return err; } -static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) +static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme, + enum damos_sysfs_filter_handle_layer layer, const char *name, + struct damon_sysfs_scheme_filters **filters_ptr) { struct damon_sysfs_scheme_filters *filters = - damon_sysfs_scheme_filters_alloc(); + damon_sysfs_scheme_filters_alloc(layer); int err; if (!filters) return -ENOMEM; err = kobject_init_and_add(&filters->kobj, &damon_sysfs_scheme_filters_ktype, &scheme->kobj, - "filters"); + "%s", name); if (err) kobject_put(&filters->kobj); else - scheme->filters = filters; + *filters_ptr = filters; + return err; +} + +static int damos_sysfs_set_filter_dirs(struct damon_sysfs_scheme *scheme) +{ + int err; + + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, "filters", + &scheme->filters); + if (err) + return err; + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, "core_filters", + &scheme->core_filters); + if (err) + goto put_filters_out; + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, "ops_filters", + &scheme->ops_filters); + if (err) + goto put_core_filters_out; + return 0; + +put_core_filters_out: + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; +put_filters_out: + kobject_put(&scheme->filters->kobj); + scheme->filters = NULL; return err; } @@ -1621,7 +1748,7 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) err = damon_sysfs_scheme_set_watermarks(scheme); if (err) goto put_quotas_access_pattern_out; - err = damon_sysfs_scheme_set_filters(scheme); + err = damos_sysfs_set_filter_dirs(scheme); if (err) goto put_watermarks_quotas_access_pattern_out; err = damon_sysfs_scheme_set_stats(scheme); @@ -1636,6 +1763,10 @@ put_tried_regions_out: kobject_put(&scheme->tried_regions->kobj); scheme->tried_regions = NULL; put_filters_watermarks_quotas_access_pattern_out: + kobject_put(&scheme->ops_filters->kobj); + scheme->ops_filters = NULL; + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; kobject_put(&scheme->filters->kobj); scheme->filters = NULL; put_watermarks_quotas_access_pattern_out: @@ -1659,6 +1790,10 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) kobject_put(&scheme->watermarks->kobj); damon_sysfs_scheme_filters_rm_dirs(scheme->filters); kobject_put(&scheme->filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->core_filters); + kobject_put(&scheme->core_filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->ops_filters); + kobject_put(&scheme->ops_filters->kobj); kobject_put(&scheme->stats->kobj); damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions); kobject_put(&scheme->tried_regions->kobj); @@ -1953,6 +2088,13 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme, filter->addr_range = sysfs_filter->addr_range; } else if (filter->type == DAMOS_FILTER_TYPE_TARGET) { filter->target_idx = sysfs_filter->target_idx; + } else if (filter->type == DAMOS_FILTER_TYPE_HUGEPAGE_SIZE) { + if (sysfs_filter->sz_range.min > + sysfs_filter->sz_range.max) { + damos_destroy_filter(filter); + return -EINVAL; + } + filter->sz_range = sysfs_filter->sz_range; } damos_add_filter(scheme, filter); @@ -2048,8 +2190,6 @@ static struct damos *damon_sysfs_mk_scheme( struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; - struct damon_sysfs_scheme_filters *sysfs_filters = - sysfs_scheme->filters; struct damos *scheme; int err; @@ -2089,7 +2229,17 @@ static struct damos *damon_sysfs_mk_scheme( return NULL; } - err = damon_sysfs_add_scheme_filters(scheme, sysfs_filters); + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->core_filters); + if (err) { + damon_destroy_scheme(scheme); + return NULL; + } + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->ops_filters); + if (err) { + damon_destroy_scheme(scheme); + return NULL; + } + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->filters); if (err) { damon_destroy_scheme(scheme); return NULL; @@ -2192,7 +2342,6 @@ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, } } -/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes) { diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index deeab04d3b46..1af6aff35d84 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -409,6 +409,164 @@ static const struct kobj_type damon_sysfs_targets_ktype = { }; /* + * intervals goal directory + */ + +struct damon_sysfs_intervals_goal { + struct kobject kobj; + unsigned long access_bp; + unsigned long aggrs; + unsigned long min_sample_us; + unsigned long max_sample_us; +}; + +static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc( + unsigned long access_bp, unsigned long aggrs, + unsigned long min_sample_us, unsigned long max_sample_us) +{ + struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal), + GFP_KERNEL); + + if (!goal) + return NULL; + + goal->kobj = (struct kobject){}; + goal->access_bp = access_bp; + goal->aggrs = aggrs; + goal->min_sample_us = min_sample_us; + goal->max_sample_us = max_sample_us; + return goal; +} + +static ssize_t access_bp_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->access_bp); +} + +static ssize_t access_bp_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->access_bp = nr; + return count; +} + +static ssize_t aggrs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->aggrs); +} + +static ssize_t aggrs_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->aggrs = nr; + return count; +} + +static ssize_t min_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->min_sample_us); +} + +static ssize_t min_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->min_sample_us = nr; + return count; +} + +static ssize_t max_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->max_sample_us); +} + +static ssize_t max_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->max_sample_us = nr; + return count; +} + +static void damon_sysfs_intervals_goal_release(struct kobject *kobj) +{ + kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj)); +} + +static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr = + __ATTR_RW_MODE(access_bp, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr = + __ATTR_RW_MODE(aggrs, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr = + __ATTR_RW_MODE(min_sample_us, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr = + __ATTR_RW_MODE(max_sample_us, 0600); + +static struct attribute *damon_sysfs_intervals_goal_attrs[] = { + &damon_sysfs_intervals_goal_access_bp_attr.attr, + &damon_sysfs_intervals_goal_aggrs_attr.attr, + &damon_sysfs_intervals_goal_min_sample_us_attr.attr, + &damon_sysfs_intervals_goal_max_sample_us_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal); + +static const struct kobj_type damon_sysfs_intervals_goal_ktype = { + .release = damon_sysfs_intervals_goal_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = damon_sysfs_intervals_goal_groups, +}; + +/* * intervals directory */ @@ -417,6 +575,7 @@ struct damon_sysfs_intervals { unsigned long sample_us; unsigned long aggr_us; unsigned long update_us; + struct damon_sysfs_intervals_goal *intervals_goal; }; static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( @@ -436,6 +595,32 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( return intervals; } +static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals) +{ + struct damon_sysfs_intervals_goal *goal; + int err; + + goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0); + if (!goal) + return -ENOMEM; + + err = kobject_init_and_add(&goal->kobj, + &damon_sysfs_intervals_goal_ktype, &intervals->kobj, + "intervals_goal"); + if (err) { + kobject_put(&goal->kobj); + intervals->intervals_goal = NULL; + return err; + } + intervals->intervals_goal = goal; + return 0; +} + +static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals) +{ + kobject_put(&intervals->intervals_goal->kobj); +} + static ssize_t sample_us_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -571,6 +756,9 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs) "intervals"); if (err) goto put_intervals_out; + err = damon_sysfs_intervals_add_dirs(intervals); + if (err) + goto put_intervals_out; attrs->intervals = intervals; nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000); @@ -599,6 +787,7 @@ put_intervals_out: static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs) { kobject_put(&attrs->nr_regions_range->kobj); + damon_sysfs_intervals_rm_dirs(attrs->intervals); kobject_put(&attrs->intervals->kobj); } @@ -1025,6 +1214,11 @@ enum damon_sysfs_cmd { */ DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS, /* + * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring + * intevals. + */ + DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS, + /* * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands. */ NR_DAMON_SYSFS_CMDS, @@ -1041,27 +1235,9 @@ static const char * const damon_sysfs_cmd_strs[] = { "update_schemes_tried_regions", "clear_schemes_tried_regions", "update_schemes_effective_quotas", + "update_tuned_intervals", }; -/* - * struct damon_sysfs_cmd_request - A request to the DAMON callback. - * @cmd: The command that needs to be handled by the callback. - * @kdamond: The kobject wrapper that associated to the kdamond thread. - * - * This structure represents a sysfs command request that need to access some - * DAMON context-internal data. Because DAMON context-internal data can be - * safely accessed from DAMON callbacks without additional synchronization, the - * request will be handled by the DAMON callback. None-``NULL`` @kdamond means - * the request is valid. - */ -struct damon_sysfs_cmd_request { - enum damon_sysfs_cmd cmd; - struct damon_sysfs_kdamond *kdamond; -}; - -/* Current DAMON callback request. Protected by damon_sysfs_lock. */ -static struct damon_sysfs_cmd_request damon_sysfs_cmd_request; - static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -1084,11 +1260,18 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx, struct damon_sysfs_attrs *sys_attrs) { struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals; + struct damon_sysfs_intervals_goal *sys_goal = + sys_intervals->intervals_goal; struct damon_sysfs_ul_range *sys_nr_regions = sys_attrs->nr_regions_range; struct damon_attrs attrs = { .sample_interval = sys_intervals->sample_us, .aggr_interval = sys_intervals->aggr_us, + .intervals_goal = { + .access_bp = sys_goal->access_bp, + .aggrs = sys_goal->aggrs, + .min_sample_us = sys_goal->min_sample_us, + .max_sample_us = sys_goal->max_sample_us}, .ops_update_interval = sys_intervals->update_us, .min_nr_regions = sys_nr_regions->min, .max_nr_regions = sys_nr_regions->max, @@ -1247,11 +1430,12 @@ static struct damon_ctx *damon_sysfs_build_ctx( * damon_sysfs_commit_input() - Commit user inputs to a running kdamond. * @kdamond: The kobject wrapper for the associated kdamond. * - * If the sysfs input is wrong, the kdamond will be terminated. + * Returns error if the sysfs input is wrong. */ -static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_commit_input(void *data) { - struct damon_ctx *param_ctx; + struct damon_sysfs_kdamond *kdamond = data; + struct damon_ctx *param_ctx, *test_ctx; int err; if (!damon_sysfs_kdamond_running(kdamond)) @@ -1263,7 +1447,15 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); if (IS_ERR(param_ctx)) return PTR_ERR(param_ctx); + test_ctx = damon_new_ctx(); + err = damon_commit_ctx(test_ctx, param_ctx); + if (err) { + damon_sysfs_destroy_targets(test_ctx); + damon_destroy_ctx(test_ctx); + goto out; + } err = damon_commit_ctx(kdamond->damon_ctx, param_ctx); +out: damon_sysfs_destroy_targets(param_ctx); damon_destroy_ctx(param_ctx); return err; @@ -1306,69 +1498,16 @@ static int damon_sysfs_upd_schemes_effective_quotas(void *data) return 0; } - -/* - * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. - * @c: The DAMON context of the callback. - * @active: Whether @c is not deactivated due to watermarks. - * @after_aggr: Whether this is called from after_aggregation() callback. - * - * This function is periodically called back from the kdamond thread for @c. - * Then, it checks if there is a waiting DAMON sysfs request and handles it. - */ -static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, - bool after_aggregation) -{ - struct damon_sysfs_kdamond *kdamond; - int err = 0; - - /* avoid deadlock due to concurrent state_store('off') */ - if (!mutex_trylock(&damon_sysfs_lock)) - return 0; - kdamond = damon_sysfs_cmd_request.kdamond; - if (!kdamond || kdamond->damon_ctx != c) - goto out; - switch (damon_sysfs_cmd_request.cmd) { - case DAMON_SYSFS_CMD_COMMIT: - if (!after_aggregation) - goto out; - err = damon_sysfs_commit_input(kdamond); - break; - default: - break; - } - /* Mark the request as invalid now. */ - damon_sysfs_cmd_request.kdamond = NULL; -out: - mutex_unlock(&damon_sysfs_lock); - return err; -} - -static int damon_sysfs_after_wmarks_check(struct damon_ctx *c) +static int damon_sysfs_upd_tuned_intervals(void *data) { - /* - * after_wmarks_check() is called back while the context is deactivated - * by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, false, false); -} - -static int damon_sysfs_after_sampling(struct damon_ctx *c) -{ - /* - * after_sampling() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, false); -} + struct damon_sysfs_kdamond *kdamond = data; + struct damon_ctx *ctx = kdamond->damon_ctx; -static int damon_sysfs_after_aggregation(struct damon_ctx *c) -{ - /* - * after_aggregation() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, true); + kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us = + ctx->attrs.sample_interval; + kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us = + ctx->attrs.aggr_interval; + return 0; } static struct damon_ctx *damon_sysfs_build_ctx( @@ -1386,9 +1525,6 @@ static struct damon_ctx *damon_sysfs_build_ctx( return ERR_PTR(err); } - ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check; - ctx->callback.after_sampling = damon_sysfs_after_sampling; - ctx->callback.after_aggregation = damon_sysfs_after_aggregation; ctx->callback.before_terminate = damon_sysfs_before_terminate; return ctx; } @@ -1400,8 +1536,6 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) if (damon_sysfs_kdamond_running(kdamond)) return -EBUSY; - if (damon_sysfs_cmd_request.kdamond == kdamond) - return -EBUSY; /* TODO: support multiple contexts per kdamond */ if (kdamond->contexts->nr != 1) return -EINVAL; @@ -1491,24 +1625,21 @@ static int damon_sysfs_update_schemes_tried_regions( * @cmd: The command to handle. * @kdamond: The kobject wrapper for the associated kdamond. * - * This function handles a DAMON sysfs command for a kdamond. For commands - * that need to access running DAMON context-internal data, it requests - * handling of the command to the DAMON callback - * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled, - * or the context is completed. + * This function handles a DAMON sysfs command for a kdamond. * * Return: 0 on success, negative error code otherwise. */ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, struct damon_sysfs_kdamond *kdamond) { - bool need_wait = true; - switch (cmd) { case DAMON_SYSFS_CMD_ON: return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_COMMIT: + return damon_sysfs_damon_call( + damon_sysfs_commit_input, kdamond); case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: return damon_sysfs_damon_call( damon_sysfs_commit_schemes_quota_goals, @@ -1527,39 +1658,12 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_damon_call( damon_sysfs_upd_schemes_effective_quotas, kdamond); + case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: + return damon_sysfs_damon_call( + damon_sysfs_upd_tuned_intervals, kdamond); default: - break; - } - - /* Pass the command to DAMON callback for safe DAMON context access */ - if (damon_sysfs_cmd_request.kdamond) - return -EBUSY; - if (!damon_sysfs_kdamond_running(kdamond)) return -EINVAL; - damon_sysfs_cmd_request.cmd = cmd; - damon_sysfs_cmd_request.kdamond = kdamond; - - /* - * wait until damon_sysfs_cmd_request_callback() handles the request - * from kdamond context - */ - mutex_unlock(&damon_sysfs_lock); - while (need_wait) { - schedule_timeout_idle(msecs_to_jiffies(100)); - if (!mutex_trylock(&damon_sysfs_lock)) - continue; - if (!damon_sysfs_cmd_request.kdamond) { - /* damon_sysfs_cmd_request_callback() handled */ - need_wait = false; - } else if (!damon_sysfs_kdamond_running(kdamond)) { - /* kdamond has already finished */ - need_wait = false; - damon_sysfs_cmd_request.kdamond = NULL; - } - mutex_unlock(&damon_sysfs_lock); } - mutex_lock(&damon_sysfs_lock); - return 0; } static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, @@ -1671,8 +1775,7 @@ static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds, int i; for (i = 0; i < nr_kdamonds; i++) { - if (damon_sysfs_kdamond_running(kdamonds[i]) || - damon_sysfs_cmd_request.kdamond == kdamonds[i]) + if (damon_sysfs_kdamond_running(kdamonds[i])) return true; } diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index 532c6a6f21f9..be0fea9ee5fc 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -348,19 +348,19 @@ static void damon_test_update_monitoring_result(struct kunit *test) new_attrs = (struct damon_attrs){ .sample_interval = 100, .aggr_interval = 10000,}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 1000}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 100}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 20); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index a6174f725bd7..e6d99106a7f9 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -710,7 +710,6 @@ static int __init damon_va_initcall(void) .update = damon_va_update, .prepare_access_checks = damon_va_prepare_access_checks, .check_accesses = damon_va_check_accesses, - .reset_aggregated = NULL, .target_valid = damon_va_target_valid, .cleanup = NULL, .apply_scheme = damon_va_apply_scheme, diff --git a/mm/debug.c b/mm/debug.c index 8d2acf432385..db83e381a8ae 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -79,12 +79,17 @@ static void __dump_folio(struct folio *folio, struct page *page, folio_ref_count(folio), mapcount, mapping, folio->index + idx, pfn); if (folio_test_large(folio)) { + int pincount = 0; + + if (folio_has_pincount(folio)) + pincount = atomic_read(&folio->_pincount); + pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", folio_order(folio), folio_mapcount(folio), folio_entire_mapcount(folio), folio_nr_pages_mapped(folio), - atomic_read(&folio->_pincount)); + pincount); } #ifdef CONFIG_MEMCG @@ -146,6 +151,9 @@ again: if (idx < MAX_FOLIO_NR_PAGES) { memcpy(&folio, foliop, 2 * sizeof(struct page)); nr_pages = folio_nr_pages(&folio); + if (nr_pages > 1) + memcpy(&folio.__page_2, &foliop->__page_2, + sizeof(struct page)); foliop = &folio; } @@ -165,7 +173,7 @@ dump: void dump_page(const struct page *page, const char *reason) { if (PagePoisoned(page)) - pr_warn("page:%p is uninitialized and poisoned", page); + pr_warn("page:%p is uninitialized and poisoned\n", page); else __dump_page(page); if (reason) @@ -181,11 +189,17 @@ void dump_vma(const struct vm_area_struct *vma) pr_emerg("vma %px start %px end %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" +#ifdef CONFIG_PER_VMA_LOCK + "refcnt %x\n" +#endif "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, +#ifdef CONFIG_PER_VMA_LOCK + refcount_read(&vma->vm_refcnt), +#endif vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); @@ -261,16 +275,19 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px state: mm %px pgoff %lx\n" "vmi %px [%lx,%lx)\n" - "prev %px next %px vma %px\n" + "prev %px middle %px next %px target %px\n" "start %lx end %lx flags %lx\n" "file %px anon_vma %px policy %px\n" "uffd_ctx %px\n" "anon_name %px\n" - "merge_flags %x state %x\n", + "state %x\n" + "just_expand %d\n" + "__adjust_middle_start %d __adjust_next_start %d\n" + "__remove_middle %d __remove_next %d\n", vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, - vmg->prev, vmg->next, vmg->vma, + vmg->prev, vmg->middle, vmg->next, vmg->target, vmg->start, vmg->end, vmg->flags, vmg->file, vmg->anon_vma, vmg->policy, #ifdef CONFIG_USERFAULTFD @@ -279,7 +296,10 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) (void *)0, #endif vmg->anon_name, - (int)vmg->merge_flags, (int)vmg->state); + (int)vmg->state, + vmg->just_expand, + vmg->__adjust_middle_start, vmg->__adjust_next_start, + vmg->__remove_middle, vmg->__remove_next); if (vmg->mm) { pr_warn("vmg %px mm:\n", vmg); @@ -288,13 +308,6 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px mm: (NULL)\n", vmg); } - if (vmg->vma) { - pr_warn("vmg %px vma:\n", vmg); - dump_vma(vmg->vma); - } else { - pr_warn("vmg %px vma: (NULL)\n", vmg); - } - if (vmg->prev) { pr_warn("vmg %px prev:\n", vmg); dump_vma(vmg->prev); @@ -302,6 +315,13 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px prev: (NULL)\n", vmg); } + if (vmg->middle) { + pr_warn("vmg %px middle:\n", vmg); + dump_vma(vmg->middle); + } else { + pr_warn("vmg %px middle: (NULL)\n", vmg); + } + if (vmg->next) { pr_warn("vmg %px next:\n", vmg); dump_vma(vmg->next); diff --git a/mm/filemap.c b/mm/filemap.c index cd586203e299..b5e784f34d98 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -227,15 +227,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) void filemap_free_folio(struct address_space *mapping, struct folio *folio) { void (*free_folio)(struct folio *); - int refs = 1; free_folio = mapping->a_ops->free_folio; if (free_folio) free_folio(folio); - if (folio_test_large(folio)) - refs = folio_nr_pages(folio); - folio_put_refs(folio, refs); + folio_put_refs(folio, folio_nr_pages(folio)); } /** @@ -860,11 +857,10 @@ EXPORT_SYMBOL_GPL(replace_page_cache_folio); noinline int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) { - XA_STATE(xas, &mapping->i_pages, index); - void *alloced_shadow = NULL; - int alloced_order = 0; + XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); bool huge; long nr; + unsigned int forder = folio_order(folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); @@ -873,7 +869,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, mapping_set_update(&xas, mapping); VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); - xas_set_order(&xas, index, folio_order(folio)); huge = folio_test_hugetlb(folio); nr = folio_nr_pages(folio); @@ -883,7 +878,7 @@ noinline int __filemap_add_folio(struct address_space *mapping, folio->index = xas.xa_index; for (;;) { - int order = -1, split_order = 0; + int order = -1; void *entry, *old = NULL; xas_lock_irq(&xas); @@ -901,21 +896,25 @@ noinline int __filemap_add_folio(struct address_space *mapping, order = xas_get_order(&xas); } - /* entry may have changed before we re-acquire the lock */ - if (alloced_order && (old != alloced_shadow || order != alloced_order)) { - xas_destroy(&xas); - alloced_order = 0; - } - if (old) { - if (order > 0 && order > folio_order(folio)) { + if (order > 0 && order > forder) { + unsigned int split_order = max(forder, + xas_try_split_min_order(order)); + /* How to handle large swap entries? */ BUG_ON(shmem_mapping(mapping)); - if (!alloced_order) { - split_order = order; - goto unlock; + + while (order > forder) { + xas_set_order(&xas, index, split_order); + xas_try_split(&xas, old, order); + if (xas_error(&xas)) + goto unlock; + order = split_order; + split_order = + max(xas_try_split_min_order( + split_order), + forder); } - xas_split(&xas, old, order); xas_reset(&xas); } if (shadowp) @@ -939,17 +938,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, unlock: xas_unlock_irq(&xas); - /* split needed, alloc here and retry. */ - if (split_order) { - xas_split_alloc(&xas, old, split_order, gfp); - if (xas_error(&xas)) - goto error; - alloced_shadow = old; - alloced_order = split_order; - xas_reset(&xas); - continue; - } - if (!xas_nomem(&xas, gfp)) break; } @@ -96,8 +96,7 @@ retry: * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); goto retry; } @@ -110,14 +109,13 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) if (is_zero_folio(folio)) return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) atomic_sub(refs, &folio->_pincount); else refs *= GUP_PIN_COUNTING_BIAS; } - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); } /** @@ -166,7 +164,7 @@ int __must_check try_grab_folio(struct folio *folio, int refs, * Increment the normal page refcount field at least once, * so that the page really is pinned. */ - if (folio_test_large(folio)) { + if (folio_has_pincount(folio)) { folio_ref_add(folio, refs); atomic_add(refs, &folio->_pincount); } else { @@ -225,7 +223,7 @@ void folio_add_pin(struct folio *folio) * page refcount field at least once, so that the page really is * pinned. */ - if (folio_test_large(folio)) { + if (folio_has_pincount(folio)) { WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); folio_ref_inc(folio); atomic_inc(&folio->_pincount); @@ -565,8 +563,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, */ if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); return NULL; } @@ -578,7 +575,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, * is pinned. That's why the refcount from the earlier * try_get_folio() is left intact. */ - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) atomic_add(refs, &folio->_pincount); else folio_ref_add(folio, @@ -1283,6 +1280,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP; + if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) + return -EOPNOTSUPP; + if (vma_is_secretmem(vma)) return -EFAULT; @@ -2757,7 +2757,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * * *) ptes can be read atomically by the architecture. * - * *) access_ok is sufficient to validate userspace address ranges. + * *) valid user addesses are below TASK_MAX_SIZE * * The last two assumptions can be relaxed by the addition of helper functions. * @@ -3010,11 +3010,6 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, break; } - if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { - gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); - break; - } - folio = try_grab_folio_fast(page, 1, flags); if (!folio) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); @@ -3411,8 +3406,6 @@ static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; - if (unlikely(!access_ok((void __user *)start, len))) - return -EFAULT; nr_pinned = gup_fast(start, end, gup_flags, pages); if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) @@ -248,7 +248,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * just report the PFN. */ if (is_device_private_entry(entry) && - pfn_swap_entry_to_page(entry)->pgmap->owner == + page_pgmap(pfn_swap_entry_to_page(entry))->owner == range->dev_private_owner) { cpu_flags = HMM_PFN_VALID; if (is_writable_device_private_entry(entry)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 373781b21e5c..2a47682d1ab7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1309,8 +1309,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, struct folio *zero_folio) { pmd_t entry; - if (!pmd_none(*pmd)) - return; entry = mk_pmd(&zero_folio->page, vma->vm_page_prot); entry = pmd_mkhuge(entry); pgtable_trans_huge_deposit(mm, pmd, pgtable); @@ -1375,20 +1373,20 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) return __do_huge_pmd_anonymous_page(vmf); } -static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, +static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) { struct mm_struct *mm = vma->vm_mm; pmd_t entry; - spinlock_t *ptl; - ptl = pmd_lock(mm, pmd); + lockdep_assert_held(pmd_lockptr(mm, pmd)); + if (!pmd_none(*pmd)) { if (write) { if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); - goto out_unlock; + return -EEXIST; } entry = pmd_mkyoung(*pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); @@ -1396,7 +1394,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache_pmd(vma, addr, pmd); } - goto out_unlock; + return -EEXIST; } entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); @@ -1412,16 +1410,11 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, if (pgtable) { pgtable_trans_huge_deposit(mm, pmd, pgtable); mm_inc_nr_ptes(mm); - pgtable = NULL; } set_pmd_at(mm, addr, pmd, entry); update_mmu_cache_pmd(vma, addr, pmd); - -out_unlock: - spin_unlock(ptl); - if (pgtable) - pte_free(mm, pgtable); + return 0; } /** @@ -1440,6 +1433,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; + spinlock_t *ptl; + int error; /* * If we had pmd_special, we could avoid all these restrictions, @@ -1462,12 +1457,56 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) } track_pfn_insert(vma, &pgprot, pfn); + ptl = pmd_lock(vma->vm_mm, vmf->pmd); + error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, + pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(vma->vm_mm, pgtable); - insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address & PMD_MASK; + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pgtable_t pgtable = NULL; + int error; + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + + if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) + return VM_FAULT_SIGBUS; + + if (arch_needs_pgtable_deposit()) { + pgtable = pte_alloc_one(vma->vm_mm); + if (!pgtable) + return VM_FAULT_OOM; + } + + ptl = pmd_lock(mm, vmf->pmd); + if (pmd_none(*vmf->pmd)) { + folio_get(folio); + folio_add_file_rmap_pmd(folio, &folio->page, vma); + add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR); + } + error = insert_pfn_pmd(vma, addr, vmf->pmd, + pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot, + write, pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(mm, pgtable); + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd); + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) { @@ -1482,19 +1521,17 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, struct mm_struct *mm = vma->vm_mm; pgprot_t prot = vma->vm_page_prot; pud_t entry; - spinlock_t *ptl; - ptl = pud_lock(mm, pud); if (!pud_none(*pud)) { if (write) { if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn))) - goto out_unlock; + return; entry = pud_mkyoung(*pud); entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); if (pudp_set_access_flags(vma, addr, pud, entry, 1)) update_mmu_cache_pud(vma, addr, pud); } - goto out_unlock; + return; } entry = pud_mkhuge(pfn_t_pud(pfn, prot)); @@ -1508,9 +1545,6 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, } set_pud_at(mm, addr, pud, entry); update_mmu_cache_pud(vma, addr, pud); - -out_unlock: - spin_unlock(ptl); } /** @@ -1528,6 +1562,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) unsigned long addr = vmf->address & PUD_MASK; struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; + spinlock_t *ptl; /* * If we had pud_special, we could avoid all these restrictions, @@ -1545,10 +1580,57 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) track_pfn_insert(vma, &pgprot, pfn); + ptl = pud_lock(vma->vm_mm, vmf->pud); insert_pfn_pud(vma, addr, vmf->pud, pfn, write); + spin_unlock(ptl); + return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); + +/** + * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry + * @vmf: Structure describing the fault + * @folio: folio to insert + * @write: whether it's a write fault + * + * Return: vm_fault_t value. + */ +vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, + bool write) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address & PUD_MASK; + pud_t *pud = vmf->pud; + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + + if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) + return VM_FAULT_SIGBUS; + + ptl = pud_lock(mm, pud); + + /* + * If there is already an entry present we assume the folio is + * already mapped, hence no need to take another reference. We + * still call insert_pfn_pud() though in case the mapping needs + * upgrading to writeable. + */ + if (pud_none(*vmf->pud)) { + folio_get(folio); + folio_add_file_rmap_pud(folio, &folio->page, vma); + add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR); + } + insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)), + write); + spin_unlock(ptl); + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ void touch_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -1698,7 +1780,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, src_folio = page_folio(src_page); folio_get(src_folio); - if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { /* Page maybe pinned: split and retry the fault on PTEs. */ folio_put(src_folio); pte_free(dst_mm, pgtable); @@ -2071,7 +2153,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) goto out; if (!folio_trylock(folio)) @@ -2141,12 +2223,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb->fullmm); arch_check_zapped_pmd(vma, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_special_huge(vma)) { + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else if (is_huge_zero_pmd(orig_pmd)) { - zap_deposited_table(tlb->mm, pmd); + if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) + zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else { struct folio *folio = NULL; @@ -2646,12 +2729,24 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); arch_check_zapped_pud(vma, orig_pud); tlb_remove_pud_tlb_entry(tlb, pud, addr); - if (vma_is_special_huge(vma)) { + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { spin_unlock(ptl); /* No zero page support yet */ } else { - /* No support for anonymous PUD pages yet */ - BUG(); + struct page *page = NULL; + struct folio *folio; + + /* No support for anonymous PUD pages or migration yet */ + VM_WARN_ON_ONCE(vma_is_anonymous(vma) || + !pud_present(orig_pud)); + + page = pud_page(orig_pud); + folio = page_folio(page); + folio_remove_rmap_pud(folio, page, vma); + add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR); + + spin_unlock(ptl); + tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE); } return 1; } @@ -2659,6 +2754,10 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, unsigned long haddr) { + struct folio *folio; + struct page *page; + pud_t old_pud; + VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); @@ -2666,7 +2765,22 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, count_vm_event(THP_SPLIT_PUD); - pudp_huge_clear_flush(vma, haddr, pud); + old_pud = pudp_huge_clear_flush(vma, haddr, pud); + + if (!vma_is_dax(vma)) + return; + + page = pud_page(old_pud); + folio = page_folio(page); + + if (!folio_test_dirty(folio) && pud_dirty(old_pud)) + folio_mark_dirty(folio); + if (!folio_test_referenced(folio) && pud_young(old_pud)) + folio_set_referenced(folio); + folio_remove_rmap_pud(folio, page, vma); + folio_put(folio); + add_mm_counter(vma->vm_mm, mm_counter_file(folio), + -HPAGE_PUD_NR); } void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, @@ -2766,13 +2880,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ if (arch_needs_pgtable_deposit()) zap_deposited_table(mm, pmd); - if (vma_is_special_huge(vma)) + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) return; if (unlikely(is_pmd_migration_entry(old_pmd))) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); folio = pfn_swap_entry_folio(entry); + } else if (is_huge_zero_pmd(old_pmd)) { + return; } else { page = pmd_page(old_pmd); folio = page_folio(page); @@ -3017,9 +3133,9 @@ static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned } void vma_adjust_trans_huge(struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - long adjust_next) + unsigned long start, + unsigned long end, + struct vm_area_struct *next) { /* Check if we need to split start first. */ split_huge_pmd_if_needed(vma, start); @@ -3027,16 +3143,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, /* Check if we need to split end next. */ split_huge_pmd_if_needed(vma, end); - /* - * If we're also updating the next vma vm_start, - * check if we need to split it. - */ - if (adjust_next > 0) { - struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); - unsigned long nstart = next->vm_start; - nstart += adjust_next; - split_huge_pmd_if_needed(next, nstart); - } + /* If we're incrementing next->vm_start, we might need to split it. */ + if (next) + split_huge_pmd_if_needed(next, end); } static void unmap_folio(struct folio *folio) @@ -3070,8 +3179,12 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, int ref_count, map_count; pmd_t orig_pmd = *pmdp; - if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) + if (pmd_dirty(orig_pmd)) + folio_set_dirty(folio); + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + folio_set_swapbacked(folio); return false; + } orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); @@ -3098,8 +3211,15 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, * * The only folio refs must be one from isolation plus the rmap(s). */ - if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) || - ref_count != map_count + 1) { + if (pmd_dirty(orig_pmd)) + folio_set_dirty(folio); + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + folio_set_swapbacked(folio); + set_pmd_at(mm, addr, pmdp, orig_pmd); + return false; + } + + if (ref_count != map_count + 1) { set_pmd_at(mm, addr, pmdp, orig_pmd); return false; } @@ -3119,12 +3239,11 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, { VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio); VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) - return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); - - return false; + return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); } static void remap_page(struct folio *folio, unsigned long nr, int flags) @@ -3143,225 +3262,378 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) } } -static void lru_add_page_tail(struct folio *folio, struct page *tail, +static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, struct lruvec *lruvec, struct list_head *list) { - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); - VM_BUG_ON_FOLIO(PageLRU(tail), folio); + VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); lockdep_assert_held(&lruvec->lru_lock); if (list) { /* page reclaim is reclaiming a huge page */ VM_WARN_ON(folio_test_lru(folio)); - get_page(tail); - list_add_tail(&tail->lru, list); + folio_get(new_folio); + list_add_tail(&new_folio->lru, list); } else { /* head is still on lru (and we have it frozen) */ VM_WARN_ON(!folio_test_lru(folio)); if (folio_test_unevictable(folio)) - tail->mlock_count = 0; + new_folio->mlock_count = 0; else - list_add_tail(&tail->lru, &folio->lru); - SetPageLRU(tail); + list_add_tail(&new_folio->lru, &folio->lru); + folio_set_lru(new_folio); } } -static void __split_huge_page_tail(struct folio *folio, int tail, - struct lruvec *lruvec, struct list_head *list, - unsigned int new_order) +/* Racy check whether the huge page can be split */ +bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) { - struct page *head = &folio->page; - struct page *page_tail = head + tail; - /* - * Careful: new_folio is not a "real" folio before we cleared PageTail. - * Don't pass it around before clear_compound_head(). - */ - struct folio *new_folio = (struct folio *)page_tail; + int extra_pins; + + /* Additional pins from page cache */ + if (folio_test_anon(folio)) + extra_pins = folio_test_swapcache(folio) ? + folio_nr_pages(folio) : 0; + else + extra_pins = folio_nr_pages(folio); + if (pextra_pins) + *pextra_pins = extra_pins; + return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - + caller_pins; +} - VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); +/* + * It splits @folio into @new_order folios and copies the @folio metadata to + * all the resulting folios. + */ +static void __split_folio_to_order(struct folio *folio, int old_order, + int new_order) +{ + long new_nr_pages = 1 << new_order; + long nr_pages = 1 << old_order; + long i; /* - * Clone page flags before unfreezing refcount. - * - * After successful get_page_unless_zero() might follow flags change, - * for example lock_page() which set PG_waiters. - * - * Note that for mapped sub-pages of an anonymous THP, - * PG_anon_exclusive has been cleared in unmap_folio() and is stored in - * the migration entry instead from where remap_page() will restore it. - * We can still have PG_anon_exclusive set on effectively unmapped and - * unreferenced sub-pages of an anonymous THP: we can simply drop - * PG_anon_exclusive (-> PG_mappedtodisk) for these here. + * Skip the first new_nr_pages, since the new folio from them have all + * the flags from the original folio. */ - page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; - page_tail->flags |= (head->flags & - ((1L << PG_referenced) | - (1L << PG_swapbacked) | - (1L << PG_swapcache) | - (1L << PG_mlocked) | - (1L << PG_uptodate) | - (1L << PG_active) | - (1L << PG_workingset) | - (1L << PG_locked) | - (1L << PG_unevictable) | + for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) { + struct page *new_head = &folio->page + i; + + /* + * Careful: new_folio is not a "real" folio before we cleared PageTail. + * Don't pass it around before clear_compound_head(). + */ + struct folio *new_folio = (struct folio *)new_head; + + VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head); + + /* + * Clone page flags before unfreezing refcount. + * + * After successful get_page_unless_zero() might follow flags change, + * for example lock_page() which set PG_waiters. + * + * Note that for mapped sub-pages of an anonymous THP, + * PG_anon_exclusive has been cleared in unmap_folio() and is stored in + * the migration entry instead from where remap_page() will restore it. + * We can still have PG_anon_exclusive set on effectively unmapped and + * unreferenced sub-pages of an anonymous THP: we can simply drop + * PG_anon_exclusive (-> PG_mappedtodisk) for these here. + */ + new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + new_folio->flags |= (folio->flags & + ((1L << PG_referenced) | + (1L << PG_swapbacked) | + (1L << PG_swapcache) | + (1L << PG_mlocked) | + (1L << PG_uptodate) | + (1L << PG_active) | + (1L << PG_workingset) | + (1L << PG_locked) | + (1L << PG_unevictable) | #ifdef CONFIG_ARCH_USES_PG_ARCH_2 - (1L << PG_arch_2) | + (1L << PG_arch_2) | #endif #ifdef CONFIG_ARCH_USES_PG_ARCH_3 - (1L << PG_arch_3) | + (1L << PG_arch_3) | #endif - (1L << PG_dirty) | - LRU_GEN_MASK | LRU_REFS_MASK)); + (1L << PG_dirty) | + LRU_GEN_MASK | LRU_REFS_MASK)); - /* ->mapping in first and second tail page is replaced by other uses */ - VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, - page_tail); - new_folio->mapping = folio->mapping; - new_folio->index = folio->index + tail; + new_folio->mapping = folio->mapping; + new_folio->index = folio->index + i; - /* - * page->private should not be set in tail pages. Fix up and warn once - * if private is unexpectedly set. - */ - if (unlikely(page_tail->private)) { - VM_WARN_ON_ONCE_PAGE(true, page_tail); - page_tail->private = 0; - } - if (folio_test_swapcache(folio)) - new_folio->swap.val = folio->swap.val + tail; + /* + * page->private should not be set in tail pages. Fix up and warn once + * if private is unexpectedly set. + */ + if (unlikely(new_folio->private)) { + VM_WARN_ON_ONCE_PAGE(true, new_head); + new_folio->private = NULL; + } - /* Page flags must be visible before we make the page non-compound. */ - smp_wmb(); + if (folio_test_swapcache(folio)) + new_folio->swap.val = folio->swap.val + i; - /* - * Clear PageTail before unfreezing page refcount. - * - * After successful get_page_unless_zero() might follow put_page() - * which needs correct compound_head(). - */ - clear_compound_head(page_tail); - if (new_order) { - prep_compound_page(page_tail, new_order); - folio_set_large_rmappable(new_folio); - } + /* Page flags must be visible before we make the page non-compound. */ + smp_wmb(); - /* Finally unfreeze refcount. Additional reference from page cache. */ - page_ref_unfreeze(page_tail, - 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ? - folio_nr_pages(new_folio) : 0)); + /* + * Clear PageTail before unfreezing page refcount. + * + * After successful get_page_unless_zero() might follow put_page() + * which needs correct compound_head(). + */ + clear_compound_head(new_head); + if (new_order) { + prep_compound_page(new_head, new_order); + folio_set_large_rmappable(new_folio); + } - if (folio_test_young(folio)) - folio_set_young(new_folio); - if (folio_test_idle(folio)) - folio_set_idle(new_folio); + if (folio_test_young(folio)) + folio_set_young(new_folio); + if (folio_test_idle(folio)) + folio_set_idle(new_folio); +#ifdef CONFIG_MEMCG + new_folio->memcg_data = folio->memcg_data; +#endif - folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); + folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); + } - /* - * always add to the tail because some iterators expect new - * pages to show after the currently processed elements - e.g. - * migrate_pages - */ - lru_add_page_tail(folio, page_tail, lruvec, list); + if (new_order) + folio_set_order(folio, new_order); + else + ClearPageCompound(&folio->page); } -static void __split_huge_page(struct page *page, struct list_head *list, - pgoff_t end, unsigned int new_order) +/* + * It splits an unmapped @folio to lower order smaller folios in two ways. + * @folio: the to-be-split folio + * @new_order: the smallest order of the after split folios (since buddy + * allocator like split generates folios with orders from @folio's + * order - 1 to new_order). + * @split_at: in buddy allocator like split, the folio containing @split_at + * will be split until its order becomes @new_order. + * @lock_at: the folio containing @lock_at is left locked for caller. + * @list: the after split folios will be added to @list if it is not NULL, + * otherwise to LRU lists. + * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory. + * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller + * @mapping: @folio->mapping + * @uniform_split: if the split is uniform or not (buddy allocator like split) + * + * + * 1. uniform split: the given @folio into multiple @new_order small folios, + * where all small folios have the same order. This is done when + * uniform_split is true. + * 2. buddy allocator like (non-uniform) split: the given @folio is split into + * half and one of the half (containing the given page) is split into half + * until the given @page's order becomes @new_order. This is done when + * uniform_split is false. + * + * The high level flow for these two methods are: + * 1. uniform split: a single __split_folio_to_order() is called to split the + * @folio into @new_order, then we traverse all the resulting folios one by + * one in PFN ascending order and perform stats, unfreeze, adding to list, + * and file mapping index operations. + * 2. non-uniform split: in general, folio_order - @new_order calls to + * __split_folio_to_order() are made in a for loop to split the @folio + * to one lower order at a time. The resulting small folios are processed + * like what is done during the traversal in 1, except the one containing + * @page, which is split in next for loop. + * + * After splitting, the caller's folio reference will be transferred to the + * folio containing @page. The other folios may be freed if they are not mapped. + * + * In terms of locking, after splitting, + * 1. uniform split leaves @page (or the folio contains it) locked; + * 2. buddy allocator like (non-uniform) split leaves @folio locked. + * + * + * For !uniform_split, when -ENOMEM is returned, the original folio might be + * split. The caller needs to check the input folio. + */ +static int __split_unmapped_folio(struct folio *folio, int new_order, + struct page *split_at, struct page *lock_at, + struct list_head *list, pgoff_t end, + struct xa_state *xas, struct address_space *mapping, + bool uniform_split) { - struct folio *folio = page_folio(page); - struct page *head = &folio->page; struct lruvec *lruvec; struct address_space *swap_cache = NULL; - unsigned long offset = 0; - int i, nr_dropped = 0; - unsigned int new_nr = 1 << new_order; + struct folio *origin_folio = folio; + struct folio *next_folio = folio_next(folio); + struct folio *new_folio; + struct folio *next; int order = folio_order(folio); - unsigned int nr = 1 << order; + int split_order; + int start_order = uniform_split ? new_order : order - 1; + int nr_dropped = 0; + int ret = 0; + bool stop_split = false; + + if (folio_test_swapcache(folio)) { + VM_BUG_ON(mapping); - /* complete memcg works before add pages to LRU */ - split_page_memcg(head, order, new_order); + /* a swapcache folio can only be uniformly split to order-0 */ + if (!uniform_split || new_order != 0) + return -EINVAL; - if (folio_test_anon(folio) && folio_test_swapcache(folio)) { - offset = swap_cache_index(folio->swap); swap_cache = swap_address_space(folio->swap); xa_lock(&swap_cache->i_pages); } + if (folio_test_anon(folio)) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = folio_lruvec_lock(folio); folio_clear_has_hwpoisoned(folio); - for (i = nr - new_nr; i >= new_nr; i -= new_nr) { - struct folio *tail; - __split_huge_page_tail(folio, i, lruvec, list, new_order); - tail = page_folio(head + i); - /* Some pages can be beyond EOF: drop them from page cache */ - if (tail->index >= end) { - if (shmem_mapping(folio->mapping)) - nr_dropped++; - else if (folio_test_clear_dirty(tail)) - folio_account_cleaned(tail, - inode_to_wb(folio->mapping->host)); - __filemap_remove_folio(tail, NULL); - folio_put_refs(tail, folio_nr_pages(tail)); - } else if (!folio_test_anon(folio)) { - __xa_store(&folio->mapping->i_pages, tail->index, - tail, 0); - } else if (swap_cache) { - __xa_store(&swap_cache->i_pages, offset + i, - tail, 0); + /* + * split to new_order one order at a time. For uniform split, + * folio is split to new_order directly. + */ + for (split_order = start_order; + split_order >= new_order && !stop_split; + split_order--) { + int old_order = folio_order(folio); + struct folio *release; + struct folio *end_folio = folio_next(folio); + + /* order-1 anonymous folio is not supported */ + if (folio_test_anon(folio) && split_order == 1) + continue; + if (uniform_split && split_order != new_order) + continue; + + if (mapping) { + /* + * uniform split has xas_split_alloc() called before + * irq is disabled to allocate enough memory, whereas + * non-uniform split can handle ENOMEM. + */ + if (uniform_split) + xas_split(xas, folio, old_order); + else { + xas_set_order(xas, folio->index, split_order); + xas_try_split(xas, folio, old_order); + if (xas_error(xas)) { + ret = xas_error(xas); + stop_split = true; + goto after_split; + } + } } - } - if (!new_order) - ClearPageCompound(head); - else { - struct folio *new_folio = (struct folio *)head; + folio_split_memcg_refs(folio, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); - folio_set_order(new_folio, new_order); - } - unlock_page_lruvec(lruvec); - /* Caller disabled irqs, so they are still disabled here */ + __split_folio_to_order(folio, old_order, split_order); - split_page_owner(head, order, new_order); - pgalloc_tag_split(folio, order, new_order); +after_split: + /* + * Iterate through after-split folios and perform related + * operations. But in buddy allocator like split, the folio + * containing the specified page is skipped until its order + * is new_order, since the folio will be worked on in next + * iteration. + */ + for (release = folio; release != end_folio; release = next) { + next = folio_next(release); + /* + * for buddy allocator like split, the folio containing + * page will be split next and should not be released, + * until the folio's order is new_order or stop_split + * is set to true by the above xas_split() failure. + */ + if (release == page_folio(split_at)) { + folio = release; + if (split_order != new_order && !stop_split) + continue; + } + if (folio_test_anon(release)) { + mod_mthp_stat(folio_order(release), + MTHP_STAT_NR_ANON, 1); + } - /* See comment in __split_huge_page_tail() */ - if (folio_test_anon(folio)) { - /* Additional pin to swap cache */ - if (folio_test_swapcache(folio)) { - folio_ref_add(folio, 1 + new_nr); - xa_unlock(&swap_cache->i_pages); - } else { - folio_ref_inc(folio); + /* + * origin_folio should be kept frozon until page cache + * entries are updated with all the other after-split + * folios to prevent others seeing stale page cache + * entries. + */ + if (release == origin_folio) + continue; + + folio_ref_unfreeze(release, 1 + + ((mapping || swap_cache) ? + folio_nr_pages(release) : 0)); + + lru_add_split_folio(origin_folio, release, lruvec, + list); + + /* Some pages can be beyond EOF: drop them from cache */ + if (release->index >= end) { + if (shmem_mapping(mapping)) + nr_dropped += folio_nr_pages(release); + else if (folio_test_clear_dirty(release)) + folio_account_cleaned(release, + inode_to_wb(mapping->host)); + __filemap_remove_folio(release, NULL); + folio_put_refs(release, folio_nr_pages(release)); + } else if (mapping) { + __xa_store(&mapping->i_pages, + release->index, release, 0); + } else if (swap_cache) { + __xa_store(&swap_cache->i_pages, + swap_cache_index(release->swap), + release, 0); + } } - } else { - /* Additional pin to page cache */ - folio_ref_add(folio, 1 + new_nr); - xa_unlock(&folio->mapping->i_pages); } + + /* + * Unfreeze origin_folio only after all page cache entries, which used + * to point to it, have been updated with new folios. Otherwise, + * a parallel folio_try_get() can grab origin_folio and its caller can + * see stale page cache entries. + */ + folio_ref_unfreeze(origin_folio, 1 + + ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0)); + + unlock_page_lruvec(lruvec); + + if (swap_cache) + xa_unlock(&swap_cache->i_pages); + if (mapping) + xa_unlock(&mapping->i_pages); + + /* Caller disabled irqs, so they are still disabled here */ local_irq_enable(); if (nr_dropped) - shmem_uncharge(folio->mapping->host, nr_dropped); - remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0); + shmem_uncharge(mapping->host, nr_dropped); + + remap_page(origin_folio, 1 << order, + folio_test_anon(origin_folio) ? + RMP_USE_SHARED_ZEROPAGE : 0); /* - * set page to its compound_head when split to non order-0 pages, so - * we can skip unlocking it below, since PG_locked is transferred to - * the compound_head of the page and the caller will unlock it. + * At this point, folio should contain the specified page. + * For uniform split, it is left for caller to unlock. + * For buddy allocator like split, the first after-split folio is left + * for caller to unlock. */ - if (new_order) - page = compound_head(page); - - for (i = 0; i < nr; i += new_nr) { - struct page *subpage = head + i; - struct folio *new_folio = page_folio(subpage); - if (subpage == page) + for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) { + next = folio_next(new_folio); + if (new_folio == page_folio(lock_at)) continue; - folio_unlock(new_folio); + folio_unlock(new_folio); /* * Subpages may be freed if there wasn't any mapping * like if add_to_swap() is running on a lru page that @@ -3369,81 +3641,90 @@ static void __split_huge_page(struct page *page, struct list_head *list, * requires taking the lru_lock so we do the put_page * of the tail pages after the split is complete. */ - free_page_and_swap_cache(subpage); + free_page_and_swap_cache(&new_folio->page); } + return ret; } -/* Racy check whether the huge page can be split */ -bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) +bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns) { - int extra_pins; + if (folio_test_anon(folio)) { + /* order-1 is not supported for anonymous THP. */ + VM_WARN_ONCE(warns && new_order == 1, + "Cannot split to order-1 folio"); + return new_order != 1; + } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + !mapping_large_folio_support(folio->mapping)) { + /* + * No split if the file system does not support large folio. + * Note that we might still have THPs in such mappings due to + * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping + * does not actually support large folios properly. + */ + VM_WARN_ONCE(warns, + "Cannot split file folio to non-0 order"); + return false; + } - /* Additional pins from page cache */ - if (folio_test_anon(folio)) - extra_pins = folio_test_swapcache(folio) ? - folio_nr_pages(folio) : 0; - else - extra_pins = folio_nr_pages(folio); - if (pextra_pins) - *pextra_pins = extra_pins; - return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - - caller_pins; + /* Only swapping a whole PMD-mapped folio is supported */ + if (folio_test_swapcache(folio)) { + VM_WARN_ONCE(warns, + "Cannot split swapcache folio to non-0 order"); + return false; + } + + return true; +} + +/* See comments in non_uniform_split_supported() */ +bool uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns) +{ + if (folio_test_anon(folio)) { + VM_WARN_ONCE(warns && new_order == 1, + "Cannot split to order-1 folio"); + return new_order != 1; + } else if (new_order) { + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + !mapping_large_folio_support(folio->mapping)) { + VM_WARN_ONCE(warns, + "Cannot split file folio to non-0 order"); + return false; + } + } + + if (new_order && folio_test_swapcache(folio)) { + VM_WARN_ONCE(warns, + "Cannot split swapcache folio to non-0 order"); + return false; + } + + return true; } /* - * This function splits a large folio into smaller folios of order @new_order. - * @page can point to any page of the large folio to split. The split operation - * does not change the position of @page. - * - * Prerequisites: - * - * 1) The caller must hold a reference on the @page's owning folio, also known - * as the large folio. + * __folio_split: split a folio at @split_at to a @new_order folio + * @folio: folio to split + * @new_order: the order of the new folio + * @split_at: a page within the new folio + * @lock_at: a page within @folio to be left locked to caller + * @list: after-split folios will be put on it if non NULL + * @uniform_split: perform uniform split or not (non-uniform split) * - * 2) The large folio must be locked. + * It calls __split_unmapped_folio() to perform uniform and non-uniform split. + * It is in charge of checking whether the split is supported or not and + * preparing @folio for __split_unmapped_folio(). * - * 3) The folio must not be pinned. Any unexpected folio references, including - * GUP pins, will result in the folio not getting split; instead, the caller - * will receive an -EAGAIN. - * - * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not - * supported for non-file-backed folios, because folio->_deferred_list, which - * is used by partially mapped folios, is stored in subpage 2, but an order-1 - * folio only has subpages 0 and 1. File-backed order-1 folios are supported, - * since they do not use _deferred_list. - * - * After splitting, the caller's folio reference will be transferred to @page, - * resulting in a raised refcount of @page after this call. The other pages may - * be freed if they are not mapped. - * - * If @list is null, tail pages will be added to LRU list, otherwise, to @list. - * - * Pages in @new_order will inherit the mapping, flags, and so on from the - * huge page. - * - * Returns 0 if the huge page was split successfully. - * - * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if - * the folio was concurrently removed from the page cache. - * - * Returns -EBUSY when trying to split the huge zeropage, if the folio is - * under writeback, if fs-specific folio metadata cannot currently be - * released, or if some unexpected race happened (e.g., anon VMA disappeared, - * truncation). - * - * Callers should ensure that the order respects the address space mapping - * min-order if one is set for non-anonymous folios. - * - * Returns -EINVAL when trying to split to an order that is incompatible - * with the folio. Splitting to order 0 is compatible with all folios. + * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be + * split but not to @new_order, the caller needs to check) */ -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, - unsigned int new_order) +static int __folio_split(struct folio *folio, unsigned int new_order, + struct page *split_at, struct page *lock_at, + struct list_head *list, bool uniform_split) { - struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); - /* reset xarray order to new order after split */ - XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); + XA_STATE(xas, &folio->mapping->i_pages, folio->index); bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; struct anon_vma *anon_vma = NULL; @@ -3455,38 +3736,17 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (folio != page_folio(split_at) || folio != page_folio(lock_at)) + return -EINVAL; + if (new_order >= folio_order(folio)) return -EINVAL; - if (is_anon) { - /* order-1 is not supported for anonymous THP. */ - if (new_order == 1) { - VM_WARN_ONCE(1, "Cannot split to order-1 folio"); - return -EINVAL; - } - } else if (new_order) { - /* Split shmem folio to non-zero order not supported */ - if (shmem_mapping(folio->mapping)) { - VM_WARN_ONCE(1, - "Cannot split shmem folio to non-0 order"); - return -EINVAL; - } - /* - * No split if the file system does not support large folio. - * Note that we might still have THPs in such mappings due to - * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping - * does not actually support large folios properly. - */ - if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - !mapping_large_folio_support(folio->mapping)) { - VM_WARN_ONCE(1, - "Cannot split file folio to non-0 order"); - return -EINVAL; - } - } + if (uniform_split && !uniform_split_supported(folio, new_order, true)) + return -EINVAL; - /* Only swapping a whole PMD-mapped folio is supported */ - if (folio_test_swapcache(folio) && new_order) + if (!uniform_split && + !non_uniform_split_supported(folio, new_order, true)) return -EINVAL; is_hzp = is_huge_zero_folio(folio); @@ -3522,6 +3782,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, mapping = folio->mapping; /* Truncated ? */ + /* + * TODO: add support for large shmem folio in swap cache. + * When shmem is in swap cache, mapping is NULL and + * folio_test_swapcache() is true. + */ if (!mapping) { ret = -EBUSY; goto out; @@ -3543,21 +3808,24 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, goto out; } - xas_split_alloc(&xas, folio, folio_order(folio), gfp); - if (xas_error(&xas)) { - ret = xas_error(&xas); - goto out; + if (uniform_split) { + xas_set_order(&xas, folio->index, new_order); + xas_split_alloc(&xas, folio, folio_order(folio), gfp); + if (xas_error(&xas)) { + ret = xas_error(&xas); + goto out; + } } anon_vma = NULL; i_mmap_lock_read(mapping); /* - *__split_huge_page() may need to trim off pages beyond EOF: - * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, - * which cannot be nested inside the page tree lock. So note - * end now: i_size itself may be changed at any moment, but - * folio lock is good enough to serialize the trimming. + *__split_unmapped_folio() may need to trim off pages beyond + * EOF: but on 32-bit, i_size_read() takes an irq-unsafe + * seqlock, which cannot be nested inside the page tree lock. + * So note end now: i_size itself may be changed at any moment, + * but folio lock is good enough to serialize the trimming. */ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); if (shmem_mapping(mapping)) @@ -3611,7 +3879,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, if (mapping) { int nr = folio_nr_pages(folio); - xas_split(&xas, folio, folio_order(folio)); if (folio_test_pmd_mappable(folio) && new_order < HPAGE_PMD_ORDER) { if (folio_test_swapbacked(folio)) { @@ -3625,12 +3892,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, } } - if (is_anon) { - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); - mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order)); - } - __split_huge_page(page, list, end, new_order); - ret = 0; + ret = __split_unmapped_folio(folio, new_order, + split_at, lock_at, list, end, &xas, mapping, + uniform_split); } else { spin_unlock(&ds_queue->split_queue_lock); fail: @@ -3656,6 +3920,90 @@ out: return ret; } +/* + * This function splits a large folio into smaller folios of order @new_order. + * @page can point to any page of the large folio to split. The split operation + * does not change the position of @page. + * + * Prerequisites: + * + * 1) The caller must hold a reference on the @page's owning folio, also known + * as the large folio. + * + * 2) The large folio must be locked. + * + * 3) The folio must not be pinned. Any unexpected folio references, including + * GUP pins, will result in the folio not getting split; instead, the caller + * will receive an -EAGAIN. + * + * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not + * supported for non-file-backed folios, because folio->_deferred_list, which + * is used by partially mapped folios, is stored in subpage 2, but an order-1 + * folio only has subpages 0 and 1. File-backed order-1 folios are supported, + * since they do not use _deferred_list. + * + * After splitting, the caller's folio reference will be transferred to @page, + * resulting in a raised refcount of @page after this call. The other pages may + * be freed if they are not mapped. + * + * If @list is null, tail pages will be added to LRU list, otherwise, to @list. + * + * Pages in @new_order will inherit the mapping, flags, and so on from the + * huge page. + * + * Returns 0 if the huge page was split successfully. + * + * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if + * the folio was concurrently removed from the page cache. + * + * Returns -EBUSY when trying to split the huge zeropage, if the folio is + * under writeback, if fs-specific folio metadata cannot currently be + * released, or if some unexpected race happened (e.g., anon VMA disappeared, + * truncation). + * + * Callers should ensure that the order respects the address space mapping + * min-order if one is set for non-anonymous folios. + * + * Returns -EINVAL when trying to split to an order that is incompatible + * with the folio. Splitting to order 0 is compatible with all folios. + */ +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order) +{ + struct folio *folio = page_folio(page); + + return __folio_split(folio, new_order, &folio->page, page, list, true); +} + +/* + * folio_split: split a folio at @split_at to a @new_order folio + * @folio: folio to split + * @new_order: the order of the new folio + * @split_at: a page within the new folio + * + * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be + * split but not to @new_order, the caller needs to check) + * + * It has the same prerequisites and returns as + * split_huge_page_to_list_to_order(). + * + * Split a folio at @split_at to a new_order folio, leave the + * remaining subpages of the original folio as large as possible. For example, + * in the case of splitting an order-9 folio at its third order-3 subpages to + * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio. + * After the split, there will be a group of folios with different orders and + * the new folio containing @split_at is marked in bracket: + * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8]. + * + * After split, folio is left locked for caller. + */ +int folio_split(struct folio *folio, unsigned int new_order, + struct page *split_at, struct list_head *list) +{ + return __folio_split(folio, new_order, split_at, &folio->page, list, + false); +} + int min_order_for_split(struct folio *folio) { if (folio_test_anon(folio)) @@ -3740,7 +4088,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) /* * Exclude swapcache: originally to avoid a corrupt deferred split - * queue. Nowadays that is fully prevented by mem_cgroup_swapout(); + * queue. Nowadays that is fully prevented by memcg1_swapout(); * but if page reclaim is already handling the same folio, it is * unnecessary to handle it again in the shrinker, so excluding * swapcache here may still be a useful optimization. @@ -3975,7 +4323,8 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) } static int split_huge_pages_pid(int pid, unsigned long vaddr_start, - unsigned long vaddr_end, unsigned int new_order) + unsigned long vaddr_end, unsigned int new_order, + long in_folio_offset) { int ret = 0; struct task_struct *task; @@ -4059,8 +4408,16 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, if (!folio_test_anon(folio) && folio->mapping != mapping) goto unlock; - if (!split_folio_to_order(folio, target_order)) - split++; + if (in_folio_offset < 0 || + in_folio_offset >= folio_nr_pages(folio)) { + if (!split_folio_to_order(folio, target_order)) + split++; + } else { + struct page *split_at = folio_page(folio, + in_folio_offset); + if (!folio_split(folio, target_order, split_at, NULL)) + split++; + } unlock: @@ -4083,7 +4440,8 @@ out: } static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, - pgoff_t off_end, unsigned int new_order) + pgoff_t off_end, unsigned int new_order, + long in_folio_offset) { struct filename *file; struct file *candidate; @@ -4132,8 +4490,15 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (folio->mapping != mapping) goto unlock; - if (!split_folio_to_order(folio, target_order)) - split++; + if (in_folio_offset < 0 || in_folio_offset >= nr_pages) { + if (!split_folio_to_order(folio, target_order)) + split++; + } else { + struct page *split_at = folio_page(folio, + in_folio_offset); + if (!folio_split(folio, target_order, split_at, NULL)) + split++; + } unlock: folio_unlock(folio); @@ -4166,6 +4531,7 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, int pid; unsigned long vaddr_start, vaddr_end; unsigned int new_order = 0; + long in_folio_offset = -1; ret = mutex_lock_interruptible(&split_debug_mutex); if (ret) @@ -4194,30 +4560,33 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, goto out; } - ret = sscanf(tok_buf, "0x%lx,0x%lx,%d", &off_start, - &off_end, &new_order); - if (ret != 2 && ret != 3) { + ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end, + &new_order, &in_folio_offset); + if (ret != 2 && ret != 3 && ret != 4) { ret = -EINVAL; goto out; } - ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order); + ret = split_huge_pages_in_file(file_path, off_start, off_end, + new_order, in_folio_offset); if (!ret) ret = input_len; goto out; } - ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order); + ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start, + &vaddr_end, &new_order, &in_folio_offset); if (ret == 1 && pid == 1) { split_huge_pages_all(); ret = strlen(input_buf); goto out; - } else if (ret != 3 && ret != 4) { + } else if (ret != 3 && ret != 4 && ret != 5) { ret = -EINVAL; goto out; } - ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order); + ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order, + in_folio_offset); if (!ret) ret = strlen(input_buf); out: diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 318624c96584..6fccfe6d046c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -14,9 +14,11 @@ #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/compiler.h> +#include <linux/cpumask.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/memblock.h> +#include <linux/minmax.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/sched/mm.h> @@ -40,6 +42,7 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/tlb.h> +#include <asm/setup.h> #include <linux/io.h> #include <linux/hugetlb.h> @@ -48,19 +51,33 @@ #include <linux/page_owner.h> #include "internal.h" #include "hugetlb_vmemmap.h" +#include "hugetlb_cma.h" #include <linux/page-isolation.h> int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; -#ifdef CONFIG_CMA -static struct cma *hugetlb_cma[MAX_NUMNODES]; -static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; -#endif -static unsigned long hugetlb_cma_size __initdata; - __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; +static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; + +/* + * Due to ordering constraints across the init code for various + * architectures, hugetlb hstate cmdline parameters can't simply + * be early_param. early_param might call the setup function + * before valid hugetlb page sizes are determined, leading to + * incorrect rejection of valid hugepagesz= options. + * + * So, record the parameters early and consume them whenever the + * init code is ready for them, by calling hugetlb_parse_params(). + */ + +/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */ +#define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1) +struct hugetlb_cmdline { + char *val; + int (*setup)(char *val); +}; /* for command line parsing */ static struct hstate * __initdata parsed_hstate; @@ -68,6 +85,21 @@ static unsigned long __initdata default_hstate_max_huge_pages; static bool __initdata parsed_valid_hugepagesz = true; static bool __initdata parsed_default_hugepagesz; static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; +static unsigned long hugepage_allocation_threads __initdata; + +static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; +static int hstate_cmdline_index __initdata; +static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata; +static int hugetlb_param_index __initdata; +static __init int hugetlb_add_param(char *s, int (*setup)(char *val)); +static __init void hugetlb_parse_params(void); + +#define hugetlb_early_param(str, func) \ +static __init int func##args(char *s) \ +{ \ + return hugetlb_add_param(s, func); \ +} \ +early_param(str, func##args) /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, @@ -93,12 +125,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma); static void hugetlb_free_folio(struct folio *folio) { -#ifdef CONFIG_CMA - int nid = folio_nid(folio); - - if (cma_free_folio(hugetlb_cma[nid], folio)) + if (folio_test_hugetlb_cma(folio)) { + hugetlb_cma_free_folio(folio); return; -#endif + } + folio_put(folio); } @@ -1455,27 +1486,11 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, if (nid == NUMA_NO_NODE) nid = numa_mem_id(); retry: - folio = NULL; -#ifdef CONFIG_CMA - { - int node; - - if (hugetlb_cma[nid]) - folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); - - if (!folio && !(gfp_mask & __GFP_THISNODE)) { - for_each_node_mask(node, *nodemask) { - if (node == nid || !hugetlb_cma[node]) - continue; - - folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); - if (folio) - break; - } - } - } -#endif + folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); if (!folio) { + if (hugetlb_cma_exclusive_alloc()) + return NULL; + folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); if (!folio) return NULL; @@ -1634,7 +1649,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, folio_ref_unfreeze(folio, 1); - INIT_LIST_HEAD(&folio->_deferred_list); hugetlb_free_folio(folio); } @@ -2245,12 +2259,21 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, goto out_unlock; spin_unlock_irq(&hugetlb_lock); - folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); + folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); if (!folio) return NULL; + hugetlb_vmemmap_optimize_folio(h, folio); + spin_lock_irq(&hugetlb_lock); /* + * nr_huge_pages needs to be adjusted within the same lock cycle + * as surplus_pages, otherwise it might confuse + * persistent_huge_pages() momentarily. + */ + __prep_account_new_huge_page(h, nid); + + /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse @@ -3148,6 +3171,56 @@ out_end_reservation: return ERR_PTR(-ENOSPC); } +static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) +{ + struct huge_bootmem_page *m; + int listnode = nid; + + if (hugetlb_early_cma(h)) + m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact); + else { + if (node_exact) + m = memblock_alloc_exact_nid_raw(huge_page_size(h), + huge_page_size(h), 0, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + else { + m = memblock_alloc_try_nid_raw(huge_page_size(h), + huge_page_size(h), 0, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + /* + * For pre-HVO to work correctly, pages need to be on + * the list for the node they were actually allocated + * from. That node may be different in the case of + * fallback by memblock_alloc_try_nid_raw. So, + * extract the actual node first. + */ + if (m) + listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); + } + + if (m) { + m->flags = 0; + m->cma = NULL; + } + } + + if (m) { + /* + * Use the beginning of the huge page to store the + * huge_bootmem_page struct (until gather_bootmem + * puts them into the mem_map). + * + * Put them into a private list first because mem_map + * is not up yet. + */ + INIT_LIST_HEAD(&m->list); + list_add(&m->list, &huge_boot_pages[listnode]); + m->hstate = h; + } + + return m; +} + int alloc_bootmem_huge_page(struct hstate *h, int nid) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h, int nid) @@ -3157,22 +3230,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) /* do node specific alloc */ if (nid != NUMA_NO_NODE) { - m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), - 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); + m = alloc_bootmem(h, node, true); if (!m) return 0; goto found; } + /* allocate from next node when distributing huge pages */ - for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { - m = memblock_alloc_try_nid_raw( - huge_page_size(h), huge_page_size(h), - 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); - /* - * Use the beginning of the huge page to store the - * huge_bootmem_page struct (until gather_bootmem - * puts them into the mem_map). - */ + for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) { + m = alloc_bootmem(h, node, false); if (!m) return 0; goto found; @@ -3189,10 +3255,7 @@ found: */ memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), huge_page_size(h) - PAGE_SIZE); - /* Put them into a private list first because mem_map is not up yet */ - INIT_LIST_HEAD(&m->list); - list_add(&m->list, &huge_boot_pages[node]); - m->hstate = h; + return 1; } @@ -3210,7 +3273,6 @@ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { struct page *page = pfn_to_page(pfn); - __ClearPageReserved(folio_page(folio, pfn - head_pfn)); __init_single_page(page, pfn, zone, nid); prep_compound_tail((struct page *)folio, pfn - head_pfn); ret = page_ref_freeze(page, 1); @@ -3234,6 +3296,42 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio, prep_compound_head((struct page *)folio, huge_page_order(h)); } +static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) +{ + return m->flags & HUGE_BOOTMEM_HVO; +} + +static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m) +{ + return m->flags & HUGE_BOOTMEM_CMA; +} + +/* + * memblock-allocated pageblocks might not have the migrate type set + * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) + * here, or MIGRATE_CMA if this was a page allocated through an early CMA + * reservation. + * + * In case of vmemmap optimized folios, the tail vmemmap pages are mapped + * read-only, but that's ok - for sparse vmemmap this does not write to + * the page structure. + */ +static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, + struct hstate *h) +{ + unsigned long nr_pages = pages_per_huge_page(h), i; + + WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); + + for (i = 0; i < nr_pages; i += pageblock_nr_pages) { + if (folio_test_hugetlb_cma(folio)) + init_cma_pageblock(folio_page(folio, i)); + else + set_pageblock_migratetype(folio_page(folio, i), + MIGRATE_MOVABLE); + } +} + static void __init prep_and_add_bootmem_folios(struct hstate *h, struct list_head *folio_list) { @@ -3241,7 +3339,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, struct folio *folio, *tmp_f; /* Send list for bulk vmemmap optimization processing */ - hugetlb_vmemmap_optimize_folios(h, folio_list); + hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { if (!folio_test_hugetlb_vmemmap_optimized(folio)) { @@ -3255,6 +3353,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, HUGETLB_VMEMMAP_RESERVE_PAGES, pages_per_huge_page(h)); } + hugetlb_bootmem_init_migratetype(folio, h); /* Subdivide locks to achieve better parallel performance */ spin_lock_irqsave(&hugetlb_lock, flags); __prep_account_new_huge_page(h, folio_nid(folio)); @@ -3263,6 +3362,57 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, } } +bool __init hugetlb_bootmem_page_zones_valid(int nid, + struct huge_bootmem_page *m) +{ + unsigned long start_pfn; + bool valid; + + if (m->flags & HUGE_BOOTMEM_ZONES_VALID) { + /* + * Already validated, skip check. + */ + return true; + } + + if (hugetlb_bootmem_page_earlycma(m)) { + valid = cma_validate_zones(m->cma); + goto out; + } + + start_pfn = virt_to_phys(m) >> PAGE_SHIFT; + + valid = !pfn_range_intersects_zones(nid, start_pfn, + pages_per_huge_page(m->hstate)); +out: + if (!valid) + hstate_boot_nrinvalid[hstate_index(m->hstate)]++; + + return valid; +} + +/* + * Free a bootmem page that was found to be invalid (intersecting with + * multiple zones). + * + * Since it intersects with multiple zones, we can't just do a free + * operation on all pages at once, but instead have to walk all + * pages, freeing them one by one. + */ +static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, + struct hstate *h) +{ + unsigned long npages = pages_per_huge_page(h); + unsigned long pfn; + + while (npages--) { + pfn = page_to_pfn(page); + __init_page_from_nid(pfn, nid); + free_reserved_page(page); + page++; + } +} + /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. @@ -3270,14 +3420,25 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, static void __init gather_bootmem_prealloc_node(unsigned long nid) { LIST_HEAD(folio_list); - struct huge_bootmem_page *m; + struct huge_bootmem_page *m, *tm; struct hstate *h = NULL, *prev_h = NULL; - list_for_each_entry(m, &huge_boot_pages[nid], list) { + list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { struct page *page = virt_to_page(m); struct folio *folio = (void *)page; h = m->hstate; + if (!hugetlb_bootmem_page_zones_valid(nid, m)) { + /* + * Can't use this page. Initialize the + * page structures if that hasn't already + * been done, and give them to the page + * allocator. + */ + hugetlb_bootmem_free_invalid_page(nid, page, h); + continue; + } + /* * It is possible to have multiple huge page sizes (hstates) * in this list. If so, process each size separately. @@ -3292,14 +3453,30 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid) hugetlb_folio_init_vmemmap(folio, h, HUGETLB_VMEMMAP_RESERVE_PAGES); init_new_hugetlb_folio(h, folio); + + if (hugetlb_bootmem_page_prehvo(m)) + /* + * If pre-HVO was done, just set the + * flag, the HVO code will then skip + * this folio. + */ + folio_set_hugetlb_vmemmap_optimized(folio); + + if (hugetlb_bootmem_page_earlycma(m)) + folio_set_hugetlb_cma(folio); + list_add(&folio->lru, &folio_list); /* * We need to restore the 'stolen' pages to totalram_pages * in order to fix confusing memory reports from free(1) and * other side-effects, like CommitLimit going negative. + * + * For CMA pages, this is done in init_cma_pageblock + * (via hugetlb_bootmem_init_migratetype), so skip it here. */ - adjust_managed_page_count(page, pages_per_huge_page(h)); + if (!folio_test_hugetlb_cma(folio)) + adjust_managed_page_count(page, pages_per_huge_page(h)); cond_resched(); } @@ -3439,32 +3616,44 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) .numa_aware = true }; + unsigned long jiffies_start; + unsigned long jiffies_end; + job.thread_fn = hugetlb_pages_alloc_boot_node; job.start = 0; job.size = h->max_huge_pages; /* - * job.max_threads is twice the num_node_state(N_MEMORY), + * job.max_threads is 25% of the available cpu threads by default. * - * Tests below indicate that a multiplier of 2 significantly improves - * performance, and although larger values also provide improvements, - * the gains are marginal. + * On large servers with terabytes of memory, huge page allocation + * can consume a considerably amount of time. * - * Therefore, choosing 2 as the multiplier strikes a good balance between - * enhancing parallel processing capabilities and maintaining efficient - * resource management. + * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages. + * 2MiB huge pages. Using more threads can significantly improve allocation time. * - * +------------+-------+-------+-------+-------+-------+ - * | multiplier | 1 | 2 | 3 | 4 | 5 | - * +------------+-------+-------+-------+-------+-------+ - * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms | - * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms | - * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms | - * +------------+-------+-------+-------+-------+-------+ + * +-----------------------+-------+-------+-------+-------+-------+ + * | threads | 8 | 16 | 32 | 64 | 128 | + * +-----------------------+-------+-------+-------+-------+-------+ + * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | + * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | + * +-----------------------+-------+-------+-------+-------+-------+ */ - job.max_threads = num_node_state(N_MEMORY) * 2; - job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; + if (hugepage_allocation_threads == 0) { + hugepage_allocation_threads = num_online_cpus() / 4; + hugepage_allocation_threads = max(hugepage_allocation_threads, 1); + } + + job.max_threads = hugepage_allocation_threads; + job.min_chunk = h->max_huge_pages / hugepage_allocation_threads; + + jiffies_start = jiffies; padata_do_multithreaded(&job); + jiffies_end = jiffies; + + pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n", + jiffies_to_msecs(jiffies_end - jiffies_start), + hugepage_allocation_threads); return h->nr_huge_pages; } @@ -3483,23 +3672,17 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long allocated; - static bool initialized __initdata; - /* skip gigantic hugepages allocation if hugetlb_cma enabled */ - if (hstate_is_gigantic(h) && hugetlb_cma_size) { + /* + * Skip gigantic hugepages allocation if early CMA + * reservations are not available. + */ + if (hstate_is_gigantic(h) && hugetlb_cma_total_size() && + !hugetlb_early_cma(h)) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); return; } - /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */ - if (!initialized) { - int i = 0; - - for (i = 0; i < MAX_NUMNODES; i++) - INIT_LIST_HEAD(&huge_boot_pages[i]); - initialized = true; - } - /* do node specific alloc */ if (hugetlb_hstate_alloc_pages_specific_nodes(h)) return; @@ -3532,7 +3715,7 @@ static void __init hugetlb_init_hstates(void) */ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) continue; - if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) + if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER) continue; for_each_hstate(h2) { if (h2 == h) @@ -3547,13 +3730,20 @@ static void __init hugetlb_init_hstates(void) static void __init report_hugepages(void) { struct hstate *h; + unsigned long nrinvalid; for_each_hstate(h) { char buf[32]; + nrinvalid = hstate_boot_nrinvalid[hstate_index(h)]; + h->max_huge_pages -= nrinvalid; + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); + if (nrinvalid) + pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n", + buf, nrinvalid, nrinvalid > 1 ? "s" : ""); pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); } @@ -4427,14 +4617,6 @@ static void hugetlb_register_all_nodes(void) { } #endif -#ifdef CONFIG_CMA -static void __init hugetlb_cma_check(void); -#else -static inline __init void hugetlb_cma_check(void) -{ -} -#endif - static void __init hugetlb_sysfs_init(void) { struct hstate *h; @@ -4559,8 +4741,6 @@ void __init hugetlb_add_hstate(unsigned int order) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); - h->next_nid_to_alloc = first_memory_node; - h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/SZ_1K); @@ -4585,6 +4765,44 @@ static void __init hugepages_clear_pages_in_node(void) } } +static __init int hugetlb_add_param(char *s, int (*setup)(char *)) +{ + size_t len; + char *p; + + if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS) + return -EINVAL; + + len = strlen(s) + 1; + if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf)) + return -EINVAL; + + p = &hstate_cmdline_buf[hstate_cmdline_index]; + memcpy(p, s, len); + hstate_cmdline_index += len; + + hugetlb_params[hugetlb_param_index].val = p; + hugetlb_params[hugetlb_param_index].setup = setup; + + hugetlb_param_index++; + + return 0; +} + +static __init void hugetlb_parse_params(void) +{ + int i; + struct hugetlb_cmdline *hcp; + + for (i = 0; i < hugetlb_param_index; i++) { + hcp = &hugetlb_params[i]; + + hcp->setup(hcp->val); + } + + hugetlb_cma_validate_params(); +} + /* * hugepages command line processing * hugepages normally follows a valid hugepagsz or default_hugepagsz @@ -4604,7 +4822,7 @@ static int __init hugepages_setup(char *s) if (!parsed_valid_hugepagesz) { pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; - return 1; + return -EINVAL; } /* @@ -4658,24 +4876,16 @@ static int __init hugepages_setup(char *s) } } - /* - * Global state is always initialized later in hugetlb_init. - * But we need to allocate gigantic hstates here early to still - * use the bootmem allocator. - */ - if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) - hugetlb_hstate_alloc_pages(parsed_hstate); - last_mhp = mhp; - return 1; + return 0; invalid: pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); hugepages_clear_pages_in_node(); - return 1; + return -EINVAL; } -__setup("hugepages=", hugepages_setup); +hugetlb_early_param("hugepages", hugepages_setup); /* * hugepagesz command line processing @@ -4694,7 +4904,7 @@ static int __init hugepagesz_setup(char *s) if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); - return 1; + return -EINVAL; } h = size_to_hstate(size); @@ -4709,7 +4919,7 @@ static int __init hugepagesz_setup(char *s) if (!parsed_default_hugepagesz || h != &default_hstate || default_hstate.max_huge_pages) { pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); - return 1; + return -EINVAL; } /* @@ -4719,14 +4929,14 @@ static int __init hugepagesz_setup(char *s) */ parsed_hstate = h; parsed_valid_hugepagesz = true; - return 1; + return 0; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; - return 1; + return 0; } -__setup("hugepagesz=", hugepagesz_setup); +hugetlb_early_param("hugepagesz", hugepagesz_setup); /* * default_hugepagesz command line input @@ -4740,14 +4950,14 @@ static int __init default_hugepagesz_setup(char *s) parsed_valid_hugepagesz = false; if (parsed_default_hugepagesz) { pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); - return 1; + return -EINVAL; } size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); - return 1; + return -EINVAL; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); @@ -4764,17 +4974,74 @@ static int __init default_hugepagesz_setup(char *s) */ if (default_hstate_max_huge_pages) { default_hstate.max_huge_pages = default_hstate_max_huge_pages; - for_each_online_node(i) - default_hstate.max_huge_pages_node[i] = - default_hugepages_in_node[i]; - if (hstate_is_gigantic(&default_hstate)) - hugetlb_hstate_alloc_pages(&default_hstate); + /* + * Since this is an early parameter, we can't check + * NUMA node state yet, so loop through MAX_NUMNODES. + */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (default_hugepages_in_node[i] != 0) + default_hstate.max_huge_pages_node[i] = + default_hugepages_in_node[i]; + } default_hstate_max_huge_pages = 0; } + return 0; +} +hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); + +static bool __hugetlb_bootmem_allocated __initdata; + +bool __init hugetlb_bootmem_allocated(void) +{ + return __hugetlb_bootmem_allocated; +} + +void __init hugetlb_bootmem_alloc(void) +{ + struct hstate *h; + int i; + + if (__hugetlb_bootmem_allocated) + return; + + for (i = 0; i < MAX_NUMNODES; i++) + INIT_LIST_HEAD(&huge_boot_pages[i]); + + hugetlb_parse_params(); + + for_each_hstate(h) { + h->next_nid_to_alloc = first_online_node; + h->next_nid_to_free = first_online_node; + + if (hstate_is_gigantic(h)) + hugetlb_hstate_alloc_pages(h); + } + + __hugetlb_bootmem_allocated = true; +} + +/* + * hugepage_alloc_threads command line parsing. + * + * When set, use this specific number of threads for the boot + * allocation of hugepages. + */ +static int __init hugepage_alloc_threads_setup(char *s) +{ + unsigned long allocation_threads; + + if (kstrtoul(s, 0, &allocation_threads) != 0) + return 1; + + if (allocation_threads == 0) + return 1; + + hugepage_allocation_threads = allocation_threads; + return 1; } -__setup("default_hugepagesz=", default_hugepagesz_setup); +__setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup); static unsigned int allowed_mems_nr(struct hstate *h) { @@ -7625,163 +7892,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), ALIGN_DOWN(vma->vm_end, PUD_SIZE)); } - -#ifdef CONFIG_CMA -static bool cma_reserve_called __initdata; - -static int __init cmdline_parse_hugetlb_cma(char *p) -{ - int nid, count = 0; - unsigned long tmp; - char *s = p; - - while (*s) { - if (sscanf(s, "%lu%n", &tmp, &count) != 1) - break; - - if (s[count] == ':') { - if (tmp >= MAX_NUMNODES) - break; - nid = array_index_nospec(tmp, MAX_NUMNODES); - - s += count + 1; - tmp = memparse(s, &s); - hugetlb_cma_size_in_node[nid] = tmp; - hugetlb_cma_size += tmp; - - /* - * Skip the separator if have one, otherwise - * break the parsing. - */ - if (*s == ',') - s++; - else - break; - } else { - hugetlb_cma_size = memparse(p, &p); - break; - } - } - - return 0; -} - -early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); - -void __init hugetlb_cma_reserve(int order) -{ - unsigned long size, reserved, per_node; - bool node_specific_cma_alloc = false; - int nid; - - /* - * HugeTLB CMA reservation is required for gigantic - * huge pages which could not be allocated via the - * page allocator. Just warn if there is any change - * breaking this assumption. - */ - VM_WARN_ON(order <= MAX_PAGE_ORDER); - cma_reserve_called = true; - - if (!hugetlb_cma_size) - return; - - for (nid = 0; nid < MAX_NUMNODES; nid++) { - if (hugetlb_cma_size_in_node[nid] == 0) - continue; - - if (!node_online(nid)) { - pr_warn("hugetlb_cma: invalid node %d specified\n", nid); - hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; - hugetlb_cma_size_in_node[nid] = 0; - continue; - } - - if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { - pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", - nid, (PAGE_SIZE << order) / SZ_1M); - hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; - hugetlb_cma_size_in_node[nid] = 0; - } else { - node_specific_cma_alloc = true; - } - } - - /* Validate the CMA size again in case some invalid nodes specified. */ - if (!hugetlb_cma_size) - return; - - if (hugetlb_cma_size < (PAGE_SIZE << order)) { - pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", - (PAGE_SIZE << order) / SZ_1M); - hugetlb_cma_size = 0; - return; - } - - if (!node_specific_cma_alloc) { - /* - * If 3 GB area is requested on a machine with 4 numa nodes, - * let's allocate 1 GB on first three nodes and ignore the last one. - */ - per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); - pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", - hugetlb_cma_size / SZ_1M, per_node / SZ_1M); - } - - reserved = 0; - for_each_online_node(nid) { - int res; - char name[CMA_MAX_NAME]; - - if (node_specific_cma_alloc) { - if (hugetlb_cma_size_in_node[nid] == 0) - continue; - - size = hugetlb_cma_size_in_node[nid]; - } else { - size = min(per_node, hugetlb_cma_size - reserved); - } - - size = round_up(size, PAGE_SIZE << order); - - snprintf(name, sizeof(name), "hugetlb%d", nid); - /* - * Note that 'order per bit' is based on smallest size that - * may be returned to CMA allocator in the case of - * huge page demotion. - */ - res = cma_declare_contiguous_nid(0, size, 0, - PAGE_SIZE << order, - HUGETLB_PAGE_ORDER, false, name, - &hugetlb_cma[nid], nid); - if (res) { - pr_warn("hugetlb_cma: reservation failed: err %d, node %d", - res, nid); - continue; - } - - reserved += size; - pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", - size / SZ_1M, nid); - - if (reserved >= hugetlb_cma_size) - break; - } - - if (!reserved) - /* - * hugetlb_cma_size is used to determine if allocations from - * cma are possible. Set to zero if no cma regions are set up. - */ - hugetlb_cma_size = 0; -} - -static void __init hugetlb_cma_check(void) -{ - if (!hugetlb_cma_size || cma_reserve_called) - return; - - pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); -} - -#endif /* CONFIG_CMA */ diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index bb9578bd99f9..58e895f3899a 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -101,10 +101,9 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, int idx; for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { - struct page_counter *fault_parent = NULL; - struct page_counter *rsvd_parent = NULL; + struct page_counter *fault, *fault_parent = NULL; + struct page_counter *rsvd, *rsvd_parent = NULL; unsigned long limit; - int ret; if (parent_h_cgroup) { fault_parent = hugetlb_cgroup_counter_from_cgroup( @@ -112,24 +111,22 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd( parent_h_cgroup, idx); } - page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup, - idx), - fault_parent, false); - page_counter_init( - hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), - rsvd_parent, false); + fault = hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx); + rsvd = hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx); + + page_counter_init(fault, fault_parent, false); + page_counter_init(rsvd, rsvd_parent, false); + + if (!cgroup_subsys_on_dfl(hugetlb_cgrp_subsys)) { + fault->track_failcnt = true; + rsvd->track_failcnt = true; + } limit = round_down(PAGE_COUNTER_MAX, pages_per_huge_page(&hstates[idx])); - ret = page_counter_set_max( - hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx), - limit); - VM_BUG_ON(ret); - ret = page_counter_set_max( - hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), - limit); - VM_BUG_ON(ret); + VM_BUG_ON(page_counter_set_max(fault, limit)); + VM_BUG_ON(page_counter_set_max(rsvd, limit)); } } diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c new file mode 100644 index 000000000000..e0f2d5c3a84c --- /dev/null +++ b/mm/hugetlb_cma.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/mm.h> +#include <linux/cma.h> +#include <linux/compiler.h> +#include <linux/mm_inline.h> + +#include <asm/page.h> +#include <asm/setup.h> + +#include <linux/hugetlb.h> +#include "internal.h" +#include "hugetlb_cma.h" + + +static struct cma *hugetlb_cma[MAX_NUMNODES]; +static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; +static bool hugetlb_cma_only; +static unsigned long hugetlb_cma_size __initdata; + +void hugetlb_cma_free_folio(struct folio *folio) +{ + int nid = folio_nid(folio); + + WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); +} + + +struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + int node; + int order = huge_page_order(h); + struct folio *folio = NULL; + + if (hugetlb_cma[nid]) + folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); + + if (!folio && !(gfp_mask & __GFP_THISNODE)) { + for_each_node_mask(node, *nodemask) { + if (node == nid || !hugetlb_cma[node]) + continue; + + folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); + if (folio) + break; + } + } + + if (folio) + folio_set_hugetlb_cma(folio); + + return folio; +} + +struct huge_bootmem_page * __init +hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact) +{ + struct cma *cma; + struct huge_bootmem_page *m; + int node = *nid; + + cma = hugetlb_cma[*nid]; + m = cma_reserve_early(cma, huge_page_size(h)); + if (!m) { + if (node_exact) + return NULL; + + for_each_online_node(node) { + cma = hugetlb_cma[node]; + if (!cma || node == *nid) + continue; + m = cma_reserve_early(cma, huge_page_size(h)); + if (m) { + *nid = node; + break; + } + } + } + + if (m) { + m->flags = HUGE_BOOTMEM_CMA; + m->cma = cma; + } + + return m; +} + + +static bool cma_reserve_called __initdata; + +static int __init cmdline_parse_hugetlb_cma(char *p) +{ + int nid, count = 0; + unsigned long tmp; + char *s = p; + + while (*s) { + if (sscanf(s, "%lu%n", &tmp, &count) != 1) + break; + + if (s[count] == ':') { + if (tmp >= MAX_NUMNODES) + break; + nid = array_index_nospec(tmp, MAX_NUMNODES); + + s += count + 1; + tmp = memparse(s, &s); + hugetlb_cma_size_in_node[nid] = tmp; + hugetlb_cma_size += tmp; + + /* + * Skip the separator if have one, otherwise + * break the parsing. + */ + if (*s == ',') + s++; + else + break; + } else { + hugetlb_cma_size = memparse(p, &p); + break; + } + } + + return 0; +} + +early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); + +static int __init cmdline_parse_hugetlb_cma_only(char *p) +{ + return kstrtobool(p, &hugetlb_cma_only); +} + +early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only); + +void __init hugetlb_cma_reserve(int order) +{ + unsigned long size, reserved, per_node; + bool node_specific_cma_alloc = false; + int nid; + + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + VM_WARN_ON(order <= MAX_PAGE_ORDER); + cma_reserve_called = true; + + if (!hugetlb_cma_size) + return; + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + if (hugetlb_cma_size_in_node[nid] == 0) + continue; + + if (!node_online(nid)) { + pr_warn("hugetlb_cma: invalid node %d specified\n", nid); + hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; + hugetlb_cma_size_in_node[nid] = 0; + continue; + } + + if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { + pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", + nid, (PAGE_SIZE << order) / SZ_1M); + hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; + hugetlb_cma_size_in_node[nid] = 0; + } else { + node_specific_cma_alloc = true; + } + } + + /* Validate the CMA size again in case some invalid nodes specified. */ + if (!hugetlb_cma_size) + return; + + if (hugetlb_cma_size < (PAGE_SIZE << order)) { + pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", + (PAGE_SIZE << order) / SZ_1M); + hugetlb_cma_size = 0; + return; + } + + if (!node_specific_cma_alloc) { + /* + * If 3 GB area is requested on a machine with 4 numa nodes, + * let's allocate 1 GB on first three nodes and ignore the last one. + */ + per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); + pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", + hugetlb_cma_size / SZ_1M, per_node / SZ_1M); + } + + reserved = 0; + for_each_online_node(nid) { + int res; + char name[CMA_MAX_NAME]; + + if (node_specific_cma_alloc) { + if (hugetlb_cma_size_in_node[nid] == 0) + continue; + + size = hugetlb_cma_size_in_node[nid]; + } else { + size = min(per_node, hugetlb_cma_size - reserved); + } + + size = round_up(size, PAGE_SIZE << order); + + snprintf(name, sizeof(name), "hugetlb%d", nid); + /* + * Note that 'order per bit' is based on smallest size that + * may be returned to CMA allocator in the case of + * huge page demotion. + */ + res = cma_declare_contiguous_multi(size, PAGE_SIZE << order, + HUGETLB_PAGE_ORDER, name, + &hugetlb_cma[nid], nid); + if (res) { + pr_warn("hugetlb_cma: reservation failed: err %d, node %d", + res, nid); + continue; + } + + reserved += size; + pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", + size / SZ_1M, nid); + + if (reserved >= hugetlb_cma_size) + break; + } + + if (!reserved) + /* + * hugetlb_cma_size is used to determine if allocations from + * cma are possible. Set to zero if no cma regions are set up. + */ + hugetlb_cma_size = 0; +} + +void __init hugetlb_cma_check(void) +{ + if (!hugetlb_cma_size || cma_reserve_called) + return; + + pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); +} + +bool hugetlb_cma_exclusive_alloc(void) +{ + return hugetlb_cma_only; +} + +unsigned long __init hugetlb_cma_total_size(void) +{ + return hugetlb_cma_size; +} + +void __init hugetlb_cma_validate_params(void) +{ + if (!hugetlb_cma_size) + hugetlb_cma_only = false; +} + +bool __init hugetlb_early_cma(struct hstate *h) +{ + if (arch_has_huge_bootmem_alloc()) + return false; + + return hstate_is_gigantic(h) && hugetlb_cma_only; +} diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h new file mode 100644 index 000000000000..f7d7fb9880a2 --- /dev/null +++ b/mm/hugetlb_cma.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HUGETLB_CMA_H +#define _LINUX_HUGETLB_CMA_H + +#ifdef CONFIG_CMA +void hugetlb_cma_free_folio(struct folio *folio); +struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); +struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, + bool node_exact); +void hugetlb_cma_check(void); +bool hugetlb_cma_exclusive_alloc(void); +unsigned long hugetlb_cma_total_size(void); +void hugetlb_cma_validate_params(void); +bool hugetlb_early_cma(struct hstate *h); +#else +static inline void hugetlb_cma_free_folio(struct folio *folio) +{ +} + +static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h, + gfp_t gfp_mask, int nid, nodemask_t *nodemask) +{ + return NULL; +} + +static inline +struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, + bool node_exact) +{ + return NULL; +} + +static inline void hugetlb_cma_check(void) +{ +} + +static inline bool hugetlb_cma_exclusive_alloc(void) +{ + return false; +} + +static inline unsigned long hugetlb_cma_total_size(void) +{ + return 0; +} + +static inline void hugetlb_cma_validate_params(void) +{ +} + +static inline bool hugetlb_early_cma(struct hstate *h) +{ + return false; +} +#endif +#endif diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 7735972add01..9a99dfa3c495 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -444,7 +444,11 @@ DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); -core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); +static int __init hugetlb_vmemmap_optimize_param(char *buf) +{ + return kstrtobool(buf, &vmemmap_optimize_enabled); +} +early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param); static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags) @@ -645,14 +649,39 @@ static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *fol return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse); } -void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, + struct list_head *folio_list, + bool boot) { struct folio *folio; + int nr_to_optimize; LIST_HEAD(vmemmap_pages); unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU; + nr_to_optimize = 0; list_for_each_entry(folio, folio_list, lru) { - int ret = hugetlb_vmemmap_split_folio(h, folio); + int ret; + unsigned long spfn, epfn; + + if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) { + /* + * Already optimized by pre-HVO, just map the + * mirrored tail page structs RO. + */ + spfn = (unsigned long)&folio->page; + epfn = spfn + pages_per_huge_page(h); + vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio), + HUGETLB_VMEMMAP_RESERVE_SIZE); + register_page_bootmem_memmap(pfn_to_section_nr(spfn), + &folio->page, + HUGETLB_VMEMMAP_RESERVE_SIZE); + static_branch_inc(&hugetlb_optimize_vmemmap_key); + continue; + } + + nr_to_optimize++; + + ret = hugetlb_vmemmap_split_folio(h, folio); /* * Spliting the PMD requires allocating a page, thus lets fail @@ -664,6 +693,16 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l break; } + if (!nr_to_optimize) + /* + * All pre-HVO folios, nothing left to do. It's ok if + * there is a mix of pre-HVO and not yet HVO-ed folios + * here, as __hugetlb_vmemmap_optimize_folio() will + * skip any folios that already have the optimized flag + * set, see vmemmap_should_optimize_folio(). + */ + goto out; + flush_tlb_all(); list_for_each_entry(folio, folio_list, lru) { @@ -689,10 +728,164 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l } } +out: flush_tlb_all(); free_vmemmap_page_list(&vmemmap_pages); } +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +{ + __hugetlb_vmemmap_optimize_folios(h, folio_list, false); +} + +void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list) +{ + __hugetlb_vmemmap_optimize_folios(h, folio_list, true); +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT + +/* Return true of a bootmem allocated HugeTLB page should be pre-HVO-ed */ +static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m) +{ + unsigned long section_size, psize, pmd_vmemmap_size; + phys_addr_t paddr; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return false; + + if (!hugetlb_vmemmap_optimizable(m->hstate)) + return false; + + psize = huge_page_size(m->hstate); + paddr = virt_to_phys(m); + + /* + * Pre-HVO only works if the bootmem huge page + * is aligned to the section size. + */ + section_size = (1UL << PA_SECTION_SHIFT); + if (!IS_ALIGNED(paddr, section_size) || + !IS_ALIGNED(psize, section_size)) + return false; + + /* + * The pre-HVO code does not deal with splitting PMDS, + * so the bootmem page must be aligned to the number + * of base pages that can be mapped with one vmemmap PMD. + */ + pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT; + if (!IS_ALIGNED(paddr, pmd_vmemmap_size) || + !IS_ALIGNED(psize, pmd_vmemmap_size)) + return false; + + return true; +} + +/* + * Initialize memmap section for a gigantic page, HVO-style. + */ +void __init hugetlb_vmemmap_init_early(int nid) +{ + unsigned long psize, paddr, section_size; + unsigned long ns, i, pnum, pfn, nr_pages; + unsigned long start, end; + struct huge_bootmem_page *m = NULL; + void *map; + + /* + * Noting to do if bootmem pages were not allocated + * early in boot, or if HVO wasn't enabled in the + * first place. + */ + if (!hugetlb_bootmem_allocated()) + return; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return; + + section_size = (1UL << PA_SECTION_SHIFT); + + list_for_each_entry(m, &huge_boot_pages[nid], list) { + if (!vmemmap_should_optimize_bootmem_page(m)) + continue; + + nr_pages = pages_per_huge_page(m->hstate); + psize = nr_pages << PAGE_SHIFT; + paddr = virt_to_phys(m); + pfn = PHYS_PFN(paddr); + map = pfn_to_page(pfn); + start = (unsigned long)map; + end = start + nr_pages * sizeof(struct page); + + if (vmemmap_populate_hvo(start, end, nid, + HUGETLB_VMEMMAP_RESERVE_SIZE) < 0) + continue; + + memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE); + + pnum = pfn_to_section_nr(pfn); + ns = psize / section_size; + + for (i = 0; i < ns; i++) { + sparse_init_early_section(nid, map, pnum, + SECTION_IS_VMEMMAP_PREINIT); + map += section_map_size(); + pnum++; + } + + m->flags |= HUGE_BOOTMEM_HVO; + } +} + +void __init hugetlb_vmemmap_init_late(int nid) +{ + struct huge_bootmem_page *m, *tm; + unsigned long phys, nr_pages, start, end; + unsigned long pfn, nr_mmap; + struct hstate *h; + void *map; + + if (!hugetlb_bootmem_allocated()) + return; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return; + + list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { + if (!(m->flags & HUGE_BOOTMEM_HVO)) + continue; + + phys = virt_to_phys(m); + h = m->hstate; + pfn = PHYS_PFN(phys); + nr_pages = pages_per_huge_page(h); + + if (!hugetlb_bootmem_page_zones_valid(nid, m)) { + /* + * Oops, the hugetlb page spans multiple zones. + * Remove it from the list, and undo HVO. + */ + list_del(&m->list); + + map = pfn_to_page(pfn); + + start = (unsigned long)map; + end = start + nr_pages * sizeof(struct page); + + vmemmap_undo_hvo(start, end, nid, + HUGETLB_VMEMMAP_RESERVE_SIZE); + nr_mmap = end - start - HUGETLB_VMEMMAP_RESERVE_SIZE; + memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE)); + + memblock_phys_free(phys, huge_page_size(h)); + continue; + } else + m->flags |= HUGE_BOOTMEM_ZONES_VALID; + } +} +#endif + static const struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 2fcae92d3359..18b490825215 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -9,6 +9,8 @@ #ifndef _LINUX_HUGETLB_VMEMMAP_H #define _LINUX_HUGETLB_VMEMMAP_H #include <linux/hugetlb.h> +#include <linux/io.h> +#include <linux/memblock.h> /* * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See @@ -24,6 +26,12 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *non_hvo_folios); void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); +void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list); +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +void hugetlb_vmemmap_init_early(int nid); +void hugetlb_vmemmap_init_late(int nid); +#endif + static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) { @@ -48,7 +56,7 @@ static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct f return 0; } -static long hugetlb_vmemmap_restore_folios(const struct hstate *h, +static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *folio_list, struct list_head *non_hvo_folios) { @@ -64,6 +72,19 @@ static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list { } +static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, + struct list_head *folio_list) +{ +} + +static inline void hugetlb_vmemmap_init_early(int nid) +{ +} + +static inline void hugetlb_vmemmap_init_late(int nid) +{ +} + static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { return 0; diff --git a/mm/init-mm.c b/mm/init-mm.c index 6af3ad675930..4600e7605cab 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -40,6 +40,7 @@ struct mm_struct init_mm = { .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), #ifdef CONFIG_PER_VMA_LOCK + .vma_writer_wait = __RCUWAIT_INITIALIZER(init_mm.vma_writer_wait), .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq), #endif .user_ns = &init_user_ns, diff --git a/mm/internal.h b/mm/internal.h index 8d1bada7323a..50c2f590b2d0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -25,6 +25,47 @@ struct folio_batch; /* + * Maintains state across a page table move. The operation assumes both source + * and destination VMAs already exist and are specified by the user. + * + * Partial moves are permitted, but the old and new ranges must both reside + * within a VMA. + * + * mmap lock must be held in write and VMA write locks must be held on any VMA + * that is visible. + * + * Use the PAGETABLE_MOVE() macro to initialise this struct. + * + * The old_addr and new_addr fields are updated as the page table move is + * executed. + * + * NOTE: The page table move is affected by reading from [old_addr, old_end), + * and old_addr may be updated for better page table alignment, so len_in + * represents the length of the range being copied as specified by the user. + */ +struct pagetable_move_control { + struct vm_area_struct *old; /* Source VMA. */ + struct vm_area_struct *new; /* Destination VMA. */ + unsigned long old_addr; /* Address from which the move begins. */ + unsigned long old_end; /* Exclusive address at which old range ends. */ + unsigned long new_addr; /* Address to move page tables to. */ + unsigned long len_in; /* Bytes to remap specified by user. */ + + bool need_rmap_locks; /* Do rmap locks need to be taken? */ + bool for_stack; /* Is this an early temp stack being moved? */ +}; + +#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ + struct pagetable_move_control name = { \ + .old = old_, \ + .new = new_, \ + .old_addr = old_addr_, \ + .old_end = (old_addr_) + (len_), \ + .new_addr = new_addr_, \ + .len_in = len_, \ + } + +/* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints * about IO, FS and watermark checking while ignoring placement @@ -84,6 +125,8 @@ void page_writeback_init(void); */ static inline int folio_nr_pages_mapped(const struct folio *folio) { + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) + return -1; return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; } @@ -493,6 +536,7 @@ extern char * const zone_names[MAX_NR_ZONES]; DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); extern int min_free_kbytes; +extern int defrag_mode; void setup_per_zone_wmarks(void); void calculate_min_free_kbytes(void); @@ -658,6 +702,8 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, } void set_zone_contiguous(struct zone *zone); +bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, + unsigned long nr_pages); static inline void clear_zone_contiguous(struct zone *zone) { @@ -682,8 +728,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) return; folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 1U << order; #endif } @@ -719,9 +765,17 @@ static inline void prep_compound_head(struct page *page, unsigned int order) folio_set_order(folio, order); atomic_set(&folio->_large_mapcount, -1); - atomic_set(&folio->_entire_mapcount, -1); - atomic_set(&folio->_nr_pages_mapped, 0); - atomic_set(&folio->_pincount, 0); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, 0); + if (IS_ENABLED(CONFIG_MM_ID)) { + folio->_mm_ids = 0; + folio->_mm_id_mapcount[0] = -1; + folio->_mm_id_mapcount[1] = -1; + } + if (IS_ENABLED(CONFIG_64BIT) || order > 1) { + atomic_set(&folio->_pincount, 0); + atomic_set(&folio->_entire_mapcount, -1); + } if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); } @@ -735,8 +789,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) set_page_private(p, 0); } -extern void prep_compound_page(struct page *page, unsigned int order); - void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); @@ -846,8 +898,24 @@ void init_cma_reserved_pageblock(struct page *page); #endif /* CONFIG_COMPACTION || CONFIG_CMA */ +struct cma; + +#ifdef CONFIG_CMA +void *cma_reserve_early(struct cma *cma, unsigned long size); +void init_cma_pageblock(struct page *page); +#else +static inline void *cma_reserve_early(struct cma *cma, unsigned long size) +{ + return NULL; +} +static inline void init_cma_pageblock(struct page *page) +{ +} +#endif + + int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal); + int migratetype, bool claim_only, bool *claim_block); static inline bool free_area_empty(struct free_area *area, int migratetype) { @@ -1419,7 +1487,8 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, } extern bool mirrored_kernelcore; -extern bool memblock_has_mirror(void); +bool memblock_has_mirror(void); +void memblock_free_all(void); static __always_inline void vma_set_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, @@ -1460,6 +1529,7 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid); +void __meminit __init_page_from_nid(unsigned long pfn, int nid); /* shrinker related functions */ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, @@ -1521,10 +1591,7 @@ extern struct list_lru shadow_nodes; } while (0) /* mremap.c */ -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack); +unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); diff --git a/mm/ioremap.c b/mm/ioremap.c index 3e049dfb28bd..c36dd9f62fd5 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -50,9 +50,9 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, #ifndef ioremap_prot void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); #endif diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5f0be134141e..cc945c6ab3bd 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -607,7 +607,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); /* See hpage_collapse_scan_pmd(). */ - if (folio_likely_mapped_shared(folio)) { + if (folio_maybe_mapped_shared(folio)) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { @@ -1359,11 +1359,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, /* * We treat a single page as shared if any part of the THP - * is shared. "False negatives" from - * folio_likely_mapped_shared() are not expected to matter - * much in practice. + * is shared. */ - if (folio_likely_mapped_shared(folio)) { + if (folio_maybe_mapped_shared(folio)) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c6ed68604136..c12cef3eeb32 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -352,6 +352,15 @@ static bool unreferenced_object(struct kmemleak_object *object) jiffies_last_scan); } +static const char *__object_type_str(struct kmemleak_object *object) +{ + if (object->flags & OBJECT_PHYS) + return " (phys)"; + if (object->flags & OBJECT_PERCPU) + return " (percpu)"; + return ""; +} + /* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. @@ -364,8 +373,9 @@ static void print_unreferenced(struct seq_file *seq, unsigned int nr_entries; nr_entries = stack_depot_fetch(object->trace_handle, &entries); - warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", - object->pointer, object->size); + warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n", + __object_type_str(object), + object->pointer, object->size); warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); hex_dump_object(seq, object); @@ -384,10 +394,10 @@ static void print_unreferenced(struct seq_file *seq, */ static void dump_object_info(struct kmemleak_object *object) { - pr_notice("Object 0x%08lx (size %zu):\n", - object->pointer, object->size); + pr_notice("Object%s 0x%08lx (size %zu):\n", + __object_type_str(object), object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", - object->comm, object->pid, object->jiffies); + object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%x\n", object->flags); @@ -1998,25 +2008,41 @@ static int kmemleak_open(struct inode *inode, struct file *file) return seq_open(file, &kmemleak_seq_ops); } -static int dump_str_object_info(const char *str) +static bool __dump_str_object_info(unsigned long addr, unsigned int objflags) { unsigned long flags; struct kmemleak_object *object; + + object = __find_and_get_object(addr, 1, objflags); + if (!object) + return false; + + raw_spin_lock_irqsave(&object->lock, flags); + dump_object_info(object); + raw_spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); + + return true; +} + +static int dump_str_object_info(const char *str) +{ unsigned long addr; + bool found = false; if (kstrtoul(str, 0, &addr)) return -EINVAL; - object = find_and_get_object(addr, 0); - if (!object) { + + found |= __dump_str_object_info(addr, 0); + found |= __dump_str_object_info(addr, OBJECT_PHYS); + found |= __dump_str_object_info(addr, OBJECT_PERCPU); + + if (!found) { pr_info("Unknown object at 0x%08lx\n", addr); return -EINVAL; } - raw_spin_lock_irqsave(&object->lock, flags); - dump_object_info(object); - raw_spin_unlock_irqrestore(&object->lock, flags); - - put_object(object); return 0; } @@ -1270,8 +1270,15 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - anon_exclusive = PageAnonExclusive(&folio->page); entry = ptep_get(pvmw.pte); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that actually + * map pages: give up just like the next folio_walk would. + */ + if (unlikely(!pte_present(entry))) + goto out_unlock; + + anon_exclusive = PageAnonExclusive(&folio->page); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { swapped = folio_test_swapcache(folio); diff --git a/mm/list_lru.c b/mm/list_lru.c index 7d69434c70e0..490473af3122 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp) { unsigned long flags; - struct list_lru_memcg *mlru; + struct list_lru_memcg *mlru = NULL; struct mem_cgroup *pos, *parent; XA_STATE(xas, &lru->xa, 0); @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, parent = parent_mem_cgroup(pos); } - mlru = memcg_init_list_lru_one(lru, gfp); - if (!mlru) - return -ENOMEM; + if (!mlru) { + mlru = memcg_init_list_lru_one(lru, gfp); + if (!mlru) + return -ENOMEM; + } xas_set(&xas, pos->kmemcg_id); do { xas_lock_irqsave(&xas, flags); @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, gfp)); - if (mlru) - kfree(mlru); } while (pos != memcg && !css_is_dying(&pos->css)); + if (unlikely(mlru)) + kfree(mlru); + return xas_error(&xas); } #else diff --git a/mm/madvise.c b/mm/madvise.c index 08b207f8e61e..b17f684322ad 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -387,7 +387,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pmd_folio(orig_pmd); /* Do not interfere with other mappings of this folio */ - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -486,7 +486,7 @@ restart: if (nr < folio_nr_pages(folio)) { int err; - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; @@ -721,7 +721,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (nr < folio_nr_pages(folio)) { int err; - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) continue; if (!folio_trylock(folio)) continue; @@ -1051,13 +1051,7 @@ static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) if (!allow_locked) disallowed |= VM_LOCKED; - if (!vma_is_anonymous(vma)) - return false; - - if ((vma->vm_flags & (VM_MAYWRITE | disallowed)) != VM_MAYWRITE) - return false; - - return true; + return !(vma->vm_flags & disallowed); } static bool is_guard_pte_marker(pte_t ptent) @@ -1398,7 +1392,32 @@ static int madvise_inject_error(int behavior, return 0; } -#endif + +static bool is_memory_failure(int behavior) +{ + switch (behavior) { + case MADV_HWPOISON: + case MADV_SOFT_OFFLINE: + return true; + default: + return false; + } +} + +#else + +static int madvise_inject_error(int behavior, + unsigned long start, unsigned long end) +{ + return 0; +} + +static bool is_memory_failure(int behavior) +{ + return false; +} + +#endif /* CONFIG_MEMORY_FAILURE */ static bool madvise_behavior_valid(int behavior) @@ -1574,6 +1593,111 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, madvise_vma_anon_name); } #endif /* CONFIG_ANON_VMA_NAME */ + +static int madvise_lock(struct mm_struct *mm, int behavior) +{ + if (is_memory_failure(behavior)) + return 0; + + if (madvise_need_mmap_write(behavior)) { + if (mmap_write_lock_killable(mm)) + return -EINTR; + } else { + mmap_read_lock(mm); + } + return 0; +} + +static void madvise_unlock(struct mm_struct *mm, int behavior) +{ + if (is_memory_failure(behavior)) + return; + + if (madvise_need_mmap_write(behavior)) + mmap_write_unlock(mm); + else + mmap_read_unlock(mm); +} + +static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) +{ + size_t len; + + if (!madvise_behavior_valid(behavior)) + return false; + + if (!PAGE_ALIGNED(start)) + return false; + len = PAGE_ALIGN(len_in); + + /* Check to see whether len was rounded up from small -ve to zero */ + if (len_in && !len) + return false; + + if (start + len < start) + return false; + + return true; +} + +/* + * madvise_should_skip() - Return if the request is invalid or nothing. + * @start: Start address of madvise-requested address range. + * @len_in: Length of madvise-requested address range. + * @behavior: Requested madvise behavor. + * @err: Pointer to store an error code from the check. + * + * If the specified behaviour is invalid or nothing would occur, we skip the + * operation. This function returns true in the cases, otherwise false. In + * the former case we store an error on @err. + */ +static bool madvise_should_skip(unsigned long start, size_t len_in, + int behavior, int *err) +{ + if (!is_valid_madvise(start, len_in, behavior)) { + *err = -EINVAL; + return true; + } + if (start + PAGE_ALIGN(len_in) == start) { + *err = 0; + return true; + } + return false; +} + +static bool is_madvise_populate(int behavior) +{ + switch (behavior) { + case MADV_POPULATE_READ: + case MADV_POPULATE_WRITE: + return true; + default: + return false; + } +} + +static int madvise_do_behavior(struct mm_struct *mm, + unsigned long start, size_t len_in, int behavior) +{ + struct blk_plug plug; + unsigned long end; + int error; + + if (is_memory_failure(behavior)) + return madvise_inject_error(behavior, start, start + len_in); + start = untagged_addr_remote(mm, start); + end = start + PAGE_ALIGN(len_in); + + blk_start_plug(&plug); + if (is_madvise_populate(behavior)) + error = madvise_populate(mm, start, end, behavior); + else + error = madvise_walk_vmas(mm, start, end, behavior, + madvise_vma_behavior); + blk_finish_plug(&plug); + return error; +} + /* * The madvise(2) system call. * @@ -1648,63 +1772,15 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, */ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) { - unsigned long end; int error; - int write; - size_t len; - struct blk_plug plug; - - if (!madvise_behavior_valid(behavior)) - return -EINVAL; - - if (!PAGE_ALIGNED(start)) - return -EINVAL; - len = PAGE_ALIGN(len_in); - /* Check to see whether len was rounded up from small -ve to zero */ - if (len_in && !len) - return -EINVAL; - - end = start + len; - if (end < start) - return -EINVAL; - - if (end == start) - return 0; - -#ifdef CONFIG_MEMORY_FAILURE - if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) - return madvise_inject_error(behavior, start, start + len_in); -#endif - - write = madvise_need_mmap_write(behavior); - if (write) { - if (mmap_write_lock_killable(mm)) - return -EINTR; - } else { - mmap_read_lock(mm); - } - - start = untagged_addr_remote(mm, start); - end = start + len; - - blk_start_plug(&plug); - switch (behavior) { - case MADV_POPULATE_READ: - case MADV_POPULATE_WRITE: - error = madvise_populate(mm, start, end, behavior); - break; - default: - error = madvise_walk_vmas(mm, start, end, behavior, - madvise_vma_behavior); - break; - } - blk_finish_plug(&plug); - - if (write) - mmap_write_unlock(mm); - else - mmap_read_unlock(mm); + if (madvise_should_skip(start, len_in, behavior, &error)) + return error; + error = madvise_lock(mm, behavior); + if (error) + return error; + error = madvise_do_behavior(mm, start, len_in, behavior); + madvise_unlock(mm, behavior); return error; } @@ -1723,16 +1799,26 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, total_len = iov_iter_count(iter); + ret = madvise_lock(mm, behavior); + if (ret) + return ret; + while (iov_iter_count(iter)) { - ret = do_madvise(mm, (unsigned long)iter_iov_addr(iter), - iter_iov_len(iter), behavior); + unsigned long start = (unsigned long)iter_iov_addr(iter); + size_t len_in = iter_iov_len(iter); + int error; + + if (madvise_should_skip(start, len_in, behavior, &error)) + ret = error; + else + ret = madvise_do_behavior(mm, start, len_in, behavior); /* * An madvise operation is attempting to restart the syscall, * but we cannot proceed as it would not be correct to repeat * the operation in aggregate, and would be surprising to the * user. * - * As we have already dropped locks, it is safe to just loop and + * We drop and reacquire locks so it is safe to just loop and * try again. We check for fatal signals in case we need exit * early anyway. */ @@ -1741,12 +1827,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, ret = -EINTR; break; } + + /* Drop and reacquire lock to unwind race. */ + madvise_unlock(mm, behavior); + madvise_lock(mm, behavior); continue; } if (ret < 0) break; iov_iter_advance(iter, iter_iov_len(iter)); } + madvise_unlock(mm, behavior); ret = (total_len - iov_iter_count(iter)) ? : ret; diff --git a/mm/memblock.c b/mm/memblock.c index 8cd95f60015d..284154445409 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2165,8 +2165,7 @@ static unsigned long __init __free_memory_core(phys_addr_t start, phys_addr_t end) { unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = min_t(unsigned long, - PFN_DOWN(end), max_low_pfn); + unsigned long end_pfn = PFN_DOWN(end); if (start_pfn >= end_pfn) return 0; diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 2e9fa431bbf5..8660908850dc 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -490,6 +490,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg) } /* Cgroup1: threshold notifications & softlimit tree updates */ + +/* + * Per memcg event counter is incremented at every pagein/pageout. With THP, + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + */ +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_NTARGETS, +}; + struct memcg1_events_percpu { unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; @@ -568,8 +581,59 @@ void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg) local_irq_restore(flags); } -void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) +/** + * memcg1_swapout - transfer a memsw charge to swap + * @folio: folio whose memsw charge to transfer + * @entry: swap entry to move the charge to + * + * Transfer the memsw charge of @folio to @entry. + */ +void memcg1_swapout(struct folio *folio, swp_entry_t entry) { + struct mem_cgroup *memcg, *swap_memcg; + unsigned int nr_entries; + + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); + + if (mem_cgroup_disabled()) + return; + + if (!do_memsw_account()) + return; + + memcg = folio_memcg(folio); + + VM_WARN_ON_ONCE_FOLIO(!memcg, folio); + if (!memcg) + return; + + /* + * In case the memcg owning these pages has been offlined and doesn't + * have an ID allocated to it anymore, charge the closest online + * ancestor for the swap instead and transfer the memory+swap charge. + */ + swap_memcg = mem_cgroup_id_get_online(memcg); + nr_entries = folio_nr_pages(folio); + /* Get references for the tail pages, too */ + if (nr_entries > 1) + mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); + mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); + + swap_cgroup_record(folio, mem_cgroup_id(memcg), entry); + + folio_unqueue_deferred_split(folio); + folio->memcg_data = 0; + + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memory, nr_entries); + + if (memcg != swap_memcg) { + if (!mem_cgroup_is_root(swap_memcg)) + page_counter_charge(&swap_memcg->memsw, nr_entries); + page_counter_uncharge(&memcg->memsw, nr_entries); + } + /* * Interrupts should be disabled here because the caller holds the * i_pages lock which is taken with interrupts-off. It is @@ -581,6 +645,42 @@ void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) memcg1_charge_statistics(memcg, -folio_nr_pages(folio)); preempt_enable_nested(); memcg1_check_events(memcg, folio_nid(folio)); + + css_put(&memcg->css); +} + +/* + * memcg1_swapin - uncharge swap slot + * @entry: the first swap entry for which the pages are charged + * @nr_pages: number of pages which will be uncharged + * + * Call this function after successfully adding the charged page to swapcache. + * + * Note: This function assumes the page for which swap slot is being uncharged + * is order 0 page. + */ +void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +{ + /* + * Cgroup1's unified memory+swap counter has been charged with the + * new swapcache page, finish the transfer by uncharging the swap + * slot. The swap slot would also get uncharged when it dies, but + * it can stick around indefinitely and we'd count the page twice + * the entire time. + * + * Cgroup2 has separate resource counters for memory and swap, + * so this is a non-issue here. Memory and swap charge lifetimes + * correspond 1:1 to page and swap slot lifetimes: we charge the + * page to memory here, and uncharge swap when the slot is freed. + */ + if (do_memsw_account()) { + /* + * The swap entry might not get freed for a long time, + * let's not wait for it. The page already received a + * memory+swap charge, drop the swap entry duplicate. + */ + mem_cgroup_uncharge_swap(entry, nr_pages); + } } void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 144d71b65907..6358464bb416 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -7,21 +7,6 @@ /* Cgroup v1 and v2 common declarations */ -int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages); - -static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages) -{ - if (mem_cgroup_is_root(memcg)) - return 0; - - return try_charge_memcg(memcg, gfp_mask, nr_pages); -} - -void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n); -void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n); - /* * Iteration constructs for visiting all cgroups (under a tree). If * loops are exited prematurely (break), mem_cgroup_iter_break() must @@ -37,38 +22,29 @@ void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n); iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -/* Whether legacy memory+swap accounting is active */ -static inline bool do_memsw_account(void) -{ - return !cgroup_subsys_on_dfl(memory_cgrp_subsys); -} - -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremented by the number of pages. This counter is used - * to trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_NTARGETS, -}; - unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); void drain_all_stock(struct mem_cgroup *root_memcg); unsigned long memcg_events(struct mem_cgroup *memcg, int event); -unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); -unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); -unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); int memory_stat_show(struct seq_file *m, void *v); +void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n); +struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg); + /* Cgroup v1-specific declarations */ #ifdef CONFIG_MEMCG_V1 +/* Whether legacy memory+swap accounting is active */ +static inline bool do_memsw_account(void) +{ + return !cgroup_subsys_on_dfl(memory_cgrp_subsys); +} + +unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); +unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); +unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); bool memcg1_alloc_events(struct mem_cgroup *memcg); void memcg1_free_events(struct mem_cgroup *memcg); @@ -96,7 +72,6 @@ void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked); void memcg1_oom_recover(struct mem_cgroup *memcg); void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg); -void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg); void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, unsigned long nr_memory, int nid); @@ -119,6 +94,7 @@ extern struct cftype mem_cgroup_legacy_files[]; #else /* CONFIG_MEMCG_V1 */ +static inline bool do_memsw_account(void) { return false; } static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; } static inline void memcg1_free_events(struct mem_cgroup *memcg) {} @@ -134,8 +110,6 @@ static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {} static inline void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg) {} -static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {} - static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, unsigned long nr_memory, int nid) {} diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 83c2df73e4b6..421740f1bcdc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -315,6 +315,7 @@ static const unsigned int memcg_node_stat_items[] = { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + PGDEMOTE_PROACTIVE, #ifdef CONFIG_HUGETLB_PAGE NR_HUGETLB, #endif @@ -431,9 +432,11 @@ static const unsigned int memcg_vm_event_stat[] = { PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, + PGSCAN_PROACTIVE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSTEAL_KHUGEPAGED, + PGSTEAL_PROACTIVE, PGFAULT, PGMAJFAULT, PGREFILL, @@ -706,6 +709,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, trace_mod_memcg_state(memcg, idx, val); } +#ifdef CONFIG_MEMCG_V1 /* idx can be of type enum memcg_stat_item or node_stat_item. */ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) { @@ -722,6 +726,7 @@ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) #endif return x; } +#endif static void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, @@ -869,6 +874,7 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event) return READ_ONCE(memcg->vmstats->events[i]); } +#ifdef CONFIG_MEMCG_V1 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) { int i = memcg_events_index(event); @@ -878,6 +884,7 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) return READ_ONCE(memcg->vmstats->events_local[i]); } +#endif struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) { @@ -1390,6 +1397,7 @@ static const struct memory_stat memory_stats[] = { { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, { "pgdemote_direct", PGDEMOTE_DIRECT }, { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, + { "pgdemote_proactive", PGDEMOTE_PROACTIVE }, #ifdef CONFIG_NUMA_BALANCING { "pgpromote_success", PGPROMOTE_SUCCESS }, #endif @@ -1432,6 +1440,7 @@ static int memcg_page_state_output_unit(int item) case PGDEMOTE_KSWAPD: case PGDEMOTE_DIRECT: case PGDEMOTE_KHUGEPAGED: + case PGDEMOTE_PROACTIVE: #ifdef CONFIG_NUMA_BALANCING case PGPROMOTE_SUCCESS: #endif @@ -1447,11 +1456,13 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) memcg_page_state_output_unit(item); } +#ifdef CONFIG_MEMCG_V1 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) { return memcg_page_state_local(memcg, item) * memcg_page_state_output_unit(item); } +#endif #ifdef CONFIG_HUGETLB_PAGE static bool memcg_accounts_hugetlb(void) @@ -1503,10 +1514,12 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) seq_buf_printf(s, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + memcg_events(memcg, PGSCAN_DIRECT) + + memcg_events(memcg, PGSCAN_PROACTIVE) + memcg_events(memcg, PGSCAN_KHUGEPAGED)); seq_buf_printf(s, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + memcg_events(memcg, PGSTEAL_DIRECT) + + memcg_events(memcg, PGSTEAL_PROACTIVE) + memcg_events(memcg, PGSTEAL_KHUGEPAGED)); for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { @@ -1566,16 +1579,23 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) /* Use static buffer, for the caller is holding oom_lock. */ static char buf[SEQ_BUF_SIZE]; struct seq_buf s; + unsigned long memory_failcnt; lockdep_assert_held(&oom_lock); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); + else + memory_failcnt = memcg->memory.failcnt; + pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", K((u64)page_counter_read(&memcg->memory)), - K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); + K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", K((u64)page_counter_read(&memcg->swap)), - K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); + K((u64)READ_ONCE(memcg->swap.max)), + atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); #ifdef CONFIG_MEMCG_V1 else { pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", @@ -2224,8 +2244,8 @@ out: css_put(&memcg->css); } -int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages) +static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, + unsigned int nr_pages) { unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); int nr_retries = MAX_RECLAIM_RETRIES; @@ -2418,6 +2438,15 @@ done_restock: return 0; } +static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, + unsigned int nr_pages) +{ + if (mem_cgroup_is_root(memcg)) + return 0; + + return try_charge_memcg(memcg, gfp_mask, nr_pages); +} + static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) { VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); @@ -2642,7 +2671,8 @@ static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); memcg1_account_kmem(memcg, -nr_pages); - refill_stock(memcg, nr_pages); + if (!mem_cgroup_is_root(memcg)) + refill_stock(memcg, nr_pages); css_put(&memcg->css); } @@ -2675,6 +2705,23 @@ out: return ret; } +static struct obj_cgroup *page_objcg(const struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + if (mem_cgroup_disabled() || !memcg_data) + return NULL; + + VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM, + page); + return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); +} + +static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg) +{ + page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; +} + /** * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup * @page: page to charge @@ -2693,8 +2740,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); if (!ret) { obj_cgroup_get(objcg); - page->memcg_data = (unsigned long)objcg | - MEMCG_DATA_KMEM; + page_set_objcg(page, objcg); return 0; } } @@ -2708,19 +2754,31 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) */ void __memcg_kmem_uncharge_page(struct page *page, int order) { - struct folio *folio = page_folio(page); - struct obj_cgroup *objcg; + struct obj_cgroup *objcg = page_objcg(page); unsigned int nr_pages = 1 << order; - if (!folio_memcg_kmem(folio)) + if (!objcg) return; - objcg = __folio_objcg(folio); obj_cgroup_uncharge_pages(objcg, nr_pages); - folio->memcg_data = 0; + page->memcg_data = 0; obj_cgroup_put(objcg); } +/* Replace the stock objcg with objcg, return the old objcg */ +static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock, + struct obj_cgroup *objcg) +{ + struct obj_cgroup *old = NULL; + + old = drain_obj_stock(stock); + obj_cgroup_get(objcg); + stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) + ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + WRITE_ONCE(stock->cached_objcg, objcg); + return old; +} + static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) { @@ -2738,11 +2796,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, * changes. */ if (READ_ONCE(stock->cached_objcg) != objcg) { - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; - WRITE_ONCE(stock->cached_objcg, objcg); + old = replace_stock_objcg(stock, objcg); stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ @@ -2896,11 +2950,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock = this_cpu_ptr(&memcg_stock); if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - WRITE_ONCE(stock->cached_objcg, objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + old = replace_stock_objcg(stock, objcg); allow_uncharge = true; /* Allow uncharge when objcg changes */ } stock->nr_bytes += nr_bytes; @@ -3058,25 +3108,33 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, } /* - * Because folio_memcg(head) is not set on tails, set it now. + * The objcg is only set on the first page, so transfer it to all the + * other pages. */ -void split_page_memcg(struct page *head, int old_order, int new_order) +void split_page_memcg(struct page *page, unsigned order) { - struct folio *folio = page_folio(head); - int i; - unsigned int old_nr = 1 << old_order; - unsigned int new_nr = 1 << new_order; + struct obj_cgroup *objcg = page_objcg(page); + unsigned int i, nr = 1 << order; - if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) + if (!objcg) return; - for (i = new_nr; i < old_nr; i += new_nr) - folio_page(folio, i)->memcg_data = folio->memcg_data; + for (i = 1; i < nr; i++) + page_set_objcg(&page[i], objcg); - if (folio_memcg_kmem(folio)) - obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); - else - css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1); + obj_cgroup_get_many(objcg, nr - 1); +} + +void folio_split_memcg_refs(struct folio *folio, unsigned old_order, + unsigned new_order) +{ + unsigned new_refs; + + if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) + return; + + new_refs = (1 << (old_order - new_order)) - 1; + css_get_many(&__folio_memcg(folio)->css, new_refs); } unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) @@ -3404,7 +3462,7 @@ void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, refcount_add(n, &memcg->id.ref); } -void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) +static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) { if (refcount_sub_and_test(n, &memcg->id.ref)) { mem_cgroup_id_remove(memcg); @@ -3419,6 +3477,24 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) mem_cgroup_id_put_many(memcg, 1); } +struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) +{ + while (!refcount_inc_not_zero(&memcg->id.ref)) { + /* + * The root cgroup cannot be destroyed, so it's refcount must + * always be >= 1. + */ + if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { + VM_BUG_ON(1); + break; + } + memcg = parent_mem_cgroup(memcg); + if (!memcg) + memcg = root_mem_cgroup; + } + return memcg; +} + /** * mem_cgroup_from_id - look up a memcg from a memcg id * @id: the memcg id to look up @@ -3454,6 +3530,16 @@ struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) } #endif +static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn) +{ + if (!pn) + return; + + free_percpu(pn->lruvec_stats_percpu); + kfree(pn->lruvec_stats); + kfree(pn); +} + static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) { struct mem_cgroup_per_node *pn; @@ -3478,23 +3564,10 @@ static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) memcg->nodeinfo[node] = pn; return true; fail: - kfree(pn->lruvec_stats); - kfree(pn); + free_mem_cgroup_per_node_info(pn); return false; } -static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) -{ - struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; - - if (!pn) - return; - - free_percpu(pn->lruvec_stats_percpu); - kfree(pn->lruvec_stats); - kfree(pn); -} - static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; @@ -3502,7 +3575,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) obj_cgroup_put(memcg->orig_objcg); for_each_node(node) - free_mem_cgroup_per_node_info(memcg, node); + free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); memcg1_free_events(memcg); kfree(memcg->vmstats); free_percpu(memcg->vmstats_percpu); @@ -3595,6 +3668,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); struct mem_cgroup *memcg, *old_memcg; + bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys); old_memcg = set_active_memcg(parent); memcg = mem_cgroup_alloc(parent); @@ -3612,9 +3686,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); - page_counter_init(&memcg->memory, &parent->memory, true); + page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); page_counter_init(&memcg->swap, &parent->swap, false); #ifdef CONFIG_MEMCG_V1 + memcg->memory.track_failcnt = !memcg_on_dfl; WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); page_counter_init(&memcg->kmem, &parent->kmem, false); page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); @@ -3632,7 +3707,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) return &memcg->css; } - if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) + if (memcg_on_dfl && !cgroup_memory_nosocket) static_branch_inc(&memcg_sockets_enabled_key); if (!cgroup_memory_nobpf) @@ -4034,7 +4109,7 @@ static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, WRITE_ONCE(peer_ctx->value, usage); /* initial write, register watcher */ - if (ofp->value == -1) + if (ofp->value == OFP_PEAK_UNSET) list_add(&ofp->list, watchers); WRITE_ONCE(ofp->value, usage); @@ -4607,40 +4682,6 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, return ret; } -/* - * mem_cgroup_swapin_uncharge_swap - uncharge swap slot - * @entry: the first swap entry for which the pages are charged - * @nr_pages: number of pages which will be uncharged - * - * Call this function after successfully adding the charged page to swapcache. - * - * Note: This function assumes the page for which swap slot is being uncharged - * is order 0 page. - */ -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) -{ - /* - * Cgroup1's unified memory+swap counter has been charged with the - * new swapcache page, finish the transfer by uncharging the swap - * slot. The swap slot would also get uncharged when it dies, but - * it can stick around indefinitely and we'd count the page twice - * the entire time. - * - * Cgroup2 has separate resource counters for memory and swap, - * so this is a non-issue here. Memory and swap charge lifetimes - * correspond 1:1 to page and swap slot lifetimes: we charge the - * page to memory here, and uncharge swap when the slot is freed. - */ - if (do_memsw_account()) { - /* - * The swap entry might not get freed for a long time, - * let's not wait for it. The page already received a - * memory+swap charge, drop the swap entry duplicate. - */ - mem_cgroup_uncharge_swap(entry, nr_pages); - } -} - struct uncharge_gather { struct mem_cgroup *memcg; unsigned long nr_memory; @@ -4891,7 +4932,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); - if (try_charge(memcg, gfp_mask, nr_pages) == 0) { + if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) { mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); return true; } @@ -4966,81 +5007,6 @@ static int __init mem_cgroup_init(void) subsys_initcall(mem_cgroup_init); #ifdef CONFIG_SWAP -static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) -{ - while (!refcount_inc_not_zero(&memcg->id.ref)) { - /* - * The root cgroup cannot be destroyed, so it's refcount must - * always be >= 1. - */ - if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { - VM_BUG_ON(1); - break; - } - memcg = parent_mem_cgroup(memcg); - if (!memcg) - memcg = root_mem_cgroup; - } - return memcg; -} - -/** - * mem_cgroup_swapout - transfer a memsw charge to swap - * @folio: folio whose memsw charge to transfer - * @entry: swap entry to move the charge to - * - * Transfer the memsw charge of @folio to @entry. - */ -void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) -{ - struct mem_cgroup *memcg, *swap_memcg; - unsigned int nr_entries; - - VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); - - if (mem_cgroup_disabled()) - return; - - if (!do_memsw_account()) - return; - - memcg = folio_memcg(folio); - - VM_WARN_ON_ONCE_FOLIO(!memcg, folio); - if (!memcg) - return; - - /* - * In case the memcg owning these pages has been offlined and doesn't - * have an ID allocated to it anymore, charge the closest online - * ancestor for the swap instead and transfer the memory+swap charge. - */ - swap_memcg = mem_cgroup_id_get_online(memcg); - nr_entries = folio_nr_pages(folio); - /* Get references for the tail pages, too */ - if (nr_entries > 1) - mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); - mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); - - swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry); - - folio_unqueue_deferred_split(folio); - folio->memcg_data = 0; - - if (!mem_cgroup_is_root(memcg)) - page_counter_uncharge(&memcg->memory, nr_entries); - - if (memcg != swap_memcg) { - if (!mem_cgroup_is_root(swap_memcg)) - page_counter_charge(&swap_memcg->memsw, nr_entries); - page_counter_uncharge(&memcg->memsw, nr_entries); - } - - memcg1_swapout(folio, memcg); - css_put(&memcg->css); -} - /** * __mem_cgroup_try_charge_swap - try charging swap space for a folio * @folio: folio being added to swap diff --git a/mm/memfd.c b/mm/memfd.c index 37f7be57c2f5..c64df1343059 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -259,7 +259,7 @@ static int memfd_add_seals(struct file *file, unsigned int seals) } /* - * SEAL_EXEC implys SEAL_WRITE, making W^X from the start. + * SEAL_EXEC implies SEAL_WRITE, making W^X from the start. */ if (seals & F_SEAL_EXEC && inode->i_mode & 0111) seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE; @@ -337,7 +337,7 @@ static int check_write_seal(unsigned long *vm_flags_ptr) unsigned long vm_flags = *vm_flags_ptr; unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE); - /* If a private matting then writability is irrelevant. */ + /* If a private mapping then writability is irrelevant. */ if (!(mask & VM_SHARED)) return 0; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 327e02fdc029..b91a33fb6c69 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -419,18 +419,18 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; - if (pud_devmap(*pud)) + if (pud_trans_huge(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; - if (pmd_devmap(*pmd)) + if (pmd_trans_huge(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte) return 0; ptent = ptep_get(pte); - if (pte_present(ptent) && pte_devmap(ptent)) + if (pte_present(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; @@ -881,12 +881,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, mmap_read_lock(p->mm); ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); + /* + * ret = 1 when CMCI wins, regardless of whether try_to_unmap() + * succeeds or fails, then kill the process with SIGBUS. + * ret = 0 when poison page is a clean page and it's dropped, no + * SIGBUS is needed. + */ if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); - else - ret = 0; mmap_read_unlock(p->mm); - return ret > 0 ? -EHWPOISON : -EFAULT; + + return ret > 0 ? -EHWPOISON : 0; } /* @@ -2210,9 +2215,13 @@ static void kill_procs_now(struct page *p, unsigned long pfn, int flags, * Must run in process context (e.g. a work queue) with interrupts * enabled and no spinlocks held. * - * Return: 0 for successfully handled the memory error, - * -EOPNOTSUPP for hwpoison_filter() filtered the error event, - * < 0(except -EOPNOTSUPP) on failure. + * Return: + * 0 - success, + * -ENXIO - memory not managed by the kernel + * -EOPNOTSUPP - hwpoison_filter() filtered the error event, + * -EHWPOISON - the page was already poisoned, potentially + * kill process, + * other negative values - failure. */ int memory_failure(unsigned long pfn, int flags) { diff --git a/mm/memory.c b/mm/memory.c index 369905596243..6ea3551eb2df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -94,14 +94,6 @@ #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif -#ifndef CONFIG_NUMA -unsigned long max_mapnr; -EXPORT_SYMBOL(max_mapnr); - -struct page *mem_map; -EXPORT_SYMBOL(mem_map); -#endif - static vm_fault_t do_fault(struct vm_fault *vmf); static vm_fault_t do_anonymous_page(struct vm_fault *vmf); static bool vmf_pte_changed(struct vm_fault *vmf); @@ -121,14 +113,6 @@ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) } /* - * A number of key systems in x86 including ioremap() rely on the assumption - * that high_memory defines the upper bound on direct map memory, then end - * of ZONE_NORMAL. - */ -void *high_memory; -EXPORT_SYMBOL(high_memory); - -/* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, @@ -715,42 +699,53 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, } #endif +/** + * restore_exclusive_pte - Restore a device-exclusive entry + * @vma: VMA covering @address + * @folio: the mapped folio + * @page: the mapped folio page + * @address: the virtual address + * @ptep: pte pointer into the locked page table mapping the folio page + * @orig_pte: pte value at @ptep + * + * Restore a device-exclusive non-swap entry to an ordinary present pte. + * + * The folio and the page table must be locked, and MMU notifiers must have + * been called to invalidate any (exclusive) device mappings. + * + * Locking the folio makes sure that anybody who just converted the pte to + * a device-exclusive entry can map it into the device to make forward + * progress without others converting it back until the folio was unlocked. + * + * If the folio lock ever becomes an issue, we can stop relying on the folio + * lock; it might make some scenarios with heavy thrashing less likely to + * make forward progress, but these scenarios might not be valid use cases. + * + * Note that the folio lock does not protect against all cases of concurrent + * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers + * must use MMU notifiers to sync against any concurrent changes. + */ static void restore_exclusive_pte(struct vm_area_struct *vma, - struct page *page, unsigned long address, - pte_t *ptep) + struct folio *folio, struct page *page, unsigned long address, + pte_t *ptep, pte_t orig_pte) { - struct folio *folio = page_folio(page); - pte_t orig_pte; pte_t pte; - swp_entry_t entry; - orig_pte = ptep_get(ptep); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(orig_pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); - else if (is_writable_device_exclusive_entry(entry)) - pte = maybe_mkwrite(pte_mkdirty(pte), vma); - - VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && - PageAnonExclusive(page)), folio); - - /* - * No need to take a page reference as one was already - * created when the swap entry was made. - */ - if (folio_test_anon(folio)) - folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); - else - /* - * Currently device exclusive access only supports anonymous - * memory so the entry shouldn't point to a filebacked page. - */ - WARN_ON_ONCE(1); + if ((vma->vm_flags & VM_WRITE) && + can_change_pte_writable(vma, address, pte)) { + if (folio_test_dirty(folio)) + pte = pte_mkdirty(pte); + pte = pte_mkwrite(pte, vma); + } set_pte_at(vma->vm_mm, address, ptep, pte); /* @@ -764,16 +759,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, * Tries to restore an exclusive pte if the page lock can be acquired without * sleeping. */ -static int -try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, - unsigned long addr) +static int try_restore_exclusive_pte(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t orig_pte) { - swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); - struct page *page = pfn_swap_entry_to_page(entry); + struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte)); + struct folio *folio = page_folio(page); - if (trylock_page(page)) { - restore_exclusive_pte(vma, page, addr, src_pte); - unlock_page(page); + if (folio_trylock(folio)) { + restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte); + folio_unlock(folio); return 0; } @@ -853,7 +847,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, folio_get(folio); rss[mm_counter(folio)]++; /* Cannot fail as these pages cannot get pinned. */ - folio_try_dup_anon_rmap_pte(folio, page, src_vma); + folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); /* * We do not preserve soft-dirty information, because so @@ -879,7 +873,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * (ie. COW) mappings. */ VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); - if (try_restore_exclusive_pte(src_pte, src_vma, addr)) + if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) return -EBUSY; return -ENOENT; } else if (is_pte_marker_entry(entry)) { @@ -1007,14 +1001,14 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, - nr, src_vma))) { + nr, dst_vma, src_vma))) { folio_ref_sub(folio, nr); return -EAGAIN; } rss[MM_ANONPAGES] += nr; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { - folio_dup_file_rmap_ptes(folio, page, nr); + folio_dup_file_rmap_ptes(folio, page, nr, dst_vma); rss[mm_counter_file(folio)] += nr; } if (any_writable) @@ -1032,7 +1026,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * guarantee the pinned page won't be randomly replaced in the * future. */ - if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, @@ -1042,7 +1036,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma rss[MM_ANONPAGES]++; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { - folio_dup_file_rmap_pte(folio, page); + folio_dup_file_rmap_pte(folio, page, dst_vma); rss[mm_counter_file(folio)]++; } @@ -1619,8 +1613,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(folio)]--; - if (is_device_private_entry(entry)) - folio_remove_rmap_pte(folio, page, vma); + folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { /* Genuine swap entries, hence a private anon pages */ @@ -2132,19 +2125,39 @@ static int validate_page_before_insert(struct vm_area_struct *vma, } static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, - unsigned long addr, struct page *page, pgprot_t prot) + unsigned long addr, struct page *page, + pgprot_t prot, bool mkwrite) { struct folio *folio = page_folio(page); - pte_t pteval; + pte_t pteval = ptep_get(pte); + + if (!pte_none(pteval)) { + if (!mkwrite) + return -EBUSY; + + /* see insert_pfn(). */ + if (pte_pfn(pteval) != page_to_pfn(page)) { + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval))); + return -EFAULT; + } + pteval = maybe_mkwrite(pteval, vma); + pteval = pte_mkyoung(pteval); + if (ptep_set_access_flags(vma, addr, pte, pteval, 1)) + update_mmu_cache(vma, addr, pte); + return 0; + } - if (!pte_none(ptep_get(pte))) - return -EBUSY; /* Ok, finally just insert the thing.. */ pteval = mk_pte(page, prot); if (unlikely(is_zero_folio(folio))) { pteval = pte_mkspecial(pteval); } else { folio_get(folio); + pteval = mk_pte(page, prot); + if (mkwrite) { + pteval = pte_mkyoung(pteval); + pteval = maybe_mkwrite(pte_mkdirty(pteval), vma); + } inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); folio_add_file_rmap_pte(folio, page, vma); } @@ -2153,7 +2166,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, } static int insert_page(struct vm_area_struct *vma, unsigned long addr, - struct page *page, pgprot_t prot) + struct page *page, pgprot_t prot, bool mkwrite) { int retval; pte_t *pte; @@ -2166,7 +2179,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, pte = get_locked_pte(vma->vm_mm, addr, &ptl); if (!pte) goto out; - retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); + retval = insert_page_into_pte_locked(vma, pte, addr, page, prot, + mkwrite); pte_unmap_unlock(pte, ptl); out: return retval; @@ -2180,7 +2194,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, err = validate_page_before_insert(vma, page); if (err) return err; - return insert_page_into_pte_locked(vma, pte, addr, page, prot); + return insert_page_into_pte_locked(vma, pte, addr, page, prot, false); } /* insert_pages() amortizes the cost of spinlock operations @@ -2316,7 +2330,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } - return insert_page(vma, addr, page, vma->vm_page_prot); + return insert_page(vma, addr, page, vma->vm_page_prot, false); } EXPORT_SYMBOL(vm_insert_page); @@ -2596,7 +2610,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, * result in pfn_t_has_page() == false. */ page = pfn_to_page(pfn_t_to_pfn(pfn)); - err = insert_page(vma, addr, page, pgprot); + err = insert_page(vma, addr, page, pgprot, mkwrite); } else { return insert_pfn(vma, addr, pfn, pgprot, mkwrite); } @@ -2609,6 +2623,26 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, + bool write) +{ + pgprot_t pgprot = vmf->vma->vm_page_prot; + unsigned long addr = vmf->address; + int err; + + if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + + err = insert_page(vmf->vma, addr, page, pgprot, write); + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite); + vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { @@ -3673,19 +3707,86 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) return ret; } -static bool wp_can_reuse_anon_folio(struct folio *folio, - struct vm_area_struct *vma) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static bool __wp_can_reuse_large_anon_folio(struct folio *folio, + struct vm_area_struct *vma) { + bool exclusive = false; + + /* Let's just free up a large folio if only a single page is mapped. */ + if (folio_large_mapcount(folio) <= 1) + return false; + /* - * We could currently only reuse a subpage of a large folio if no - * other subpages of the large folios are still mapped. However, - * let's just consistently not reuse subpages even if we could - * reuse in that scenario, and give back a large folio a bit - * sooner. + * The assumption for anonymous folios is that each page can only get + * mapped once into each MM. The only exception are KSM folios, which + * are always small. + * + * Each taken mapcount must be paired with exactly one taken reference, + * whereby the refcount must be incremented before the mapcount when + * mapping a page, and the refcount must be decremented after the + * mapcount when unmapping a page. + * + * If all folio references are from mappings, and all mappings are in + * the page tables of this MM, then this folio is exclusive to this MM. */ - if (folio_test_large(folio)) + if (folio_test_large_maybe_mapped_shared(folio)) + return false; + + VM_WARN_ON_ONCE(folio_test_ksm(folio)); + VM_WARN_ON_ONCE(folio_mapcount(folio) > folio_nr_pages(folio)); + VM_WARN_ON_ONCE(folio_entire_mapcount(folio)); + + if (unlikely(folio_test_swapcache(folio))) { + /* + * Note: freeing up the swapcache will fail if some PTEs are + * still swap entries. + */ + if (!folio_trylock(folio)) + return false; + folio_free_swap(folio); + folio_unlock(folio); + } + + if (folio_large_mapcount(folio) != folio_ref_count(folio)) return false; + /* Stabilize the mapcount vs. refcount and recheck. */ + folio_lock_large_mapcount(folio); + VM_WARN_ON_ONCE(folio_large_mapcount(folio) < folio_ref_count(folio)); + + if (folio_test_large_maybe_mapped_shared(folio)) + goto unlock; + if (folio_large_mapcount(folio) != folio_ref_count(folio)) + goto unlock; + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id && + folio_mm_id(folio, 1) != vma->vm_mm->mm_id); + + /* + * Do we need the folio lock? Likely not. If there would have been + * references from page migration/swapout, we would have detected + * an additional folio reference and never ended up here. + */ + exclusive = true; +unlock: + folio_unlock_large_mapcount(folio); + return exclusive; +} +#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ +static bool __wp_can_reuse_large_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static bool wp_can_reuse_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio)) + return __wp_can_reuse_large_anon_folio(folio, vma); + /* * We have to verify under folio lock: these early checks are * just an optimization to avoid locking the folio and freeing @@ -3794,13 +3895,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a - * VM_PFNMAP VMA. + * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ - if (!vmf->page) + if (!vmf->page || is_fsdax_page(vmf->page)) { + vmf->page = NULL; return wp_pfn_shared(vmf); + } return wp_page_shared(vmf, folio); } @@ -3990,7 +4093,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) folio_put(folio); return ret; } - mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); mmu_notifier_invalidate_range_start(&range); @@ -3998,7 +4101,8 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) - restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); + restore_exclusive_pte(vma, folio, vmf->page, vmf->address, + vmf->pte, vmf->orig_pte); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -4347,9 +4451,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * freed. */ if (trylock_page(vmf->page)) { + struct dev_pagemap *pgmap; + get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); + pgmap = page_pgmap(vmf->page); + ret = pgmap->ops->migrate_to_ram(vmf); unlock_page(vmf->page); put_page(vmf->page); } else { @@ -4408,7 +4515,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } need_clear_cache = true; - mem_cgroup_swapin_uncharge_swap(entry, nr_pages); + memcg1_swapin(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) @@ -5577,7 +5684,7 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) + if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) *flags |= TNF_SHARED; /* * For memory tiering mode, cpupid of slow memory page is used @@ -6348,6 +6455,88 @@ fail: #endif #ifdef CONFIG_PER_VMA_LOCK +static inline bool __vma_enter_locked(struct vm_area_struct *vma, bool detaching) +{ + unsigned int tgt_refcnt = VMA_LOCK_OFFSET; + + /* Additional refcnt if the vma is attached. */ + if (!detaching) + tgt_refcnt++; + + /* + * If vma is detached then only vma_mark_attached() can raise the + * vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached(). + */ + if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt)) + return false; + + rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_); + rcuwait_wait_event(&vma->vm_mm->vma_writer_wait, + refcount_read(&vma->vm_refcnt) == tgt_refcnt, + TASK_UNINTERRUPTIBLE); + lock_acquired(&vma->vmlock_dep_map, _RET_IP_); + + return true; +} + +static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached) +{ + *detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt); + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); +} + +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) +{ + bool locked; + + /* + * __vma_enter_locked() returns false immediately if the vma is not + * attached, otherwise it waits until refcnt is indicating that vma + * is attached with no readers. + */ + locked = __vma_enter_locked(vma, false); + + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); + + if (locked) { + bool detached; + + __vma_exit_locked(vma, &detached); + WARN_ON_ONCE(detached); /* vma should remain attached */ + } +} +EXPORT_SYMBOL_GPL(__vma_start_write); + +void vma_mark_detached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_attached(vma); + + /* + * We are the only writer, so no need to use vma_refcount_put(). + * The condition below is unlikely because the vma has been already + * write-locked and readers can increment vm_refcnt only temporarily + * before they check vm_lock_seq, realize the vma is locked and drop + * back the vm_refcnt. That is a narrow window for observing a raised + * vm_refcnt. + */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* Wait until vma is detached with no readers. */ + if (__vma_enter_locked(vma, true)) { + bool detached; + + __vma_exit_locked(vma, &detached); + WARN_ON_ONCE(!detached); + } + } +} + /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the @@ -6365,15 +6554,17 @@ retry: if (!vma) goto inval; - if (!vma_start_read(vma)) - goto inval; + vma = vma_start_read(mm, vma); + if (IS_ERR_OR_NULL(vma)) { + /* Check if the VMA got isolated after we found it */ + if (PTR_ERR(vma) == -EAGAIN) { + count_vm_vma_lock_event(VMA_LOCK_MISS); + /* The area was replaced with another one */ + goto retry; + } - /* Check if the VMA got isolated after we found it */ - if (vma->detached) { - vma_end_read(vma); - count_vm_vma_lock_event(VMA_LOCK_MISS); - /* The area was replaced with another one */ - goto retry; + /* Failed to lock the VMA */ + goto inval; } /* * At this point, we have a stable reference to a VMA: The VMA is @@ -6382,8 +6573,9 @@ retry: * fields are accessible for RCU readers. */ - /* Check since vm_start/vm_end might change before we lock the VMA */ - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + /* Check if the vma we locked is the right one. */ + if (unlikely(vma->vm_mm != mm || + address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; rcu_read_unlock(); @@ -6637,7 +6829,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; - unsigned long prot = 0; + pgprot_t prot = __pgprot(0); void __iomem *maddr; int offset = offset_in_page(addr); int ret = -EINVAL; @@ -6647,7 +6839,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, retry: if (follow_pfnmap_start(&args)) return -EINVAL; - prot = pgprot_val(args.pgprot); + prot = args.pgprot; phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; writable = args.writable; follow_pfnmap_end(&args); @@ -6662,7 +6854,7 @@ retry: if (follow_pfnmap_start(&args)) goto out_unmap; - if ((prot != pgprot_val(args.pgprot)) || + if ((pgprot_val(prot) != pgprot_val(args.pgprot)) || (phys_addr != (args.pfn << PAGE_SHIFT)) || (writable != args.writable)) { follow_pfnmap_end(&args); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 16cf9e17077e..75401866fb76 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1828,8 +1828,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (unlikely(page_folio(page) != folio)) goto put_folio; - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { + if (folio_contain_hwpoisoned_page(folio)) { if (WARN_ON(folio_test_lru(folio))) folio_isolate_lru(folio); if (folio_mapped(folio)) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a9eea051b0d6..b28a1e6ae096 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -673,11 +673,11 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * See folio_likely_mapped_shared() on possible imprecision when we + * See folio_maybe_mapped_shared() on possible imprecision when we * cannot easily detect if a folio is shared. */ if ((flags & MPOL_MF_MOVE_ALL) || - (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) + (!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) if (!folio_isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: @@ -1064,10 +1064,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * See folio_likely_mapped_shared() on possible imprecision when we + * See folio_maybe_mapped_shared() on possible imprecision when we * cannot easily detect if a folio is shared. */ - if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { + if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, diff --git a/mm/memremap.c b/mm/memremap.c index 40d4547ce514..2aebc1b192da 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -458,8 +458,9 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!folio->page.pgmap->ops || - !folio->page.pgmap->ops->page_free)) + struct dev_pagemap *pgmap = folio->pgmap; + + if (WARN_ON_ONCE(!pgmap)) return; mem_cgroup_uncharge(folio); @@ -484,19 +485,42 @@ void free_zone_device_folio(struct folio *folio) * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need * to clear folio->mapping. + * + * FS DAX pages clear the mapping when the folio->share count hits + * zero which indicating the page has been removed from the file + * system mapping. */ - folio->mapping = NULL; - folio->page.pgmap->ops->page_free(folio_page(folio, 0)); + if (pgmap->type != MEMORY_DEVICE_FS_DAX && + pgmap->type != MEMORY_DEVICE_GENERIC) + folio->mapping = NULL; + + switch (pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; + pgmap->ops->page_free(folio_page(folio, 0)); + put_dev_pagemap(pgmap); + break; - if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE && - folio->page.pgmap->type != MEMORY_DEVICE_COHERENT) + case MEMORY_DEVICE_GENERIC: /* * Reset the refcount to 1 to prepare for handing out the page * again. */ folio_set_count(folio, 1); - else - put_dev_pagemap(folio->page.pgmap); + break; + + case MEMORY_DEVICE_FS_DAX: + wake_up_var(&folio->page); + break; + + case MEMORY_DEVICE_PCI_P2PDMA: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; + pgmap->ops->page_free(folio_page(folio, 0)); + break; + } } void zone_device_page_init(struct page *page) @@ -505,26 +529,8 @@ void zone_device_page_init(struct page *page) * Drivers shouldn't be allocating pages after calling * memunmap_pages(). */ - WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); + WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref)); set_page_count(page, 1); lock_page(page); } EXPORT_SYMBOL_GPL(zone_device_page_init); - -#ifdef CONFIG_FS_DAX -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX) - return false; - - /* - * fsdax page refcounts are 1-based, rather than 0-based: if - * refcount is 1, then the page is free and the refcount is - * stable because nobody holds a reference on the page. - */ - if (folio_ref_sub_return(folio, refs) == 1) - wake_up_var(&folio->_refcount); - return true; -} -EXPORT_SYMBOL(__put_devmap_managed_folio_refs); -#endif /* CONFIG_FS_DAX */ diff --git a/mm/migrate.c b/mm/migrate.c index 97f0edf0c032..f3ee6d8d5e2e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -202,7 +202,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, return false; VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); + VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page); if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || mm_forbids_zeropage(pvmw->vma->vm_mm)) @@ -328,7 +328,7 @@ static bool remove_migration_pte(struct folio *folio, folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } - if (vma->vm_flags & VM_LOCKED) + if (READ_ONCE(vma->vm_flags) & VM_LOCKED) mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), @@ -2226,7 +2226,7 @@ static int __add_folio_for_migration(struct folio *folio, int node, if (folio_nid(folio) == node) return 0; - if (folio_likely_mapped_shared(folio) && !migrate_all) + if (folio_maybe_mapped_shared(folio) && !migrate_all) return -EACCES; if (folio_test_hugetlb(folio)) { @@ -2651,11 +2651,10 @@ int migrate_misplaced_folio_prepare(struct folio *folio, * processes with execute permissions as they are probably * shared libraries. * - * See folio_likely_mapped_shared() on possible imprecision + * See folio_maybe_mapped_shared() on possible imprecision * when we cannot easily detect if a folio is shared. */ - if ((vma->vm_flags & VM_EXEC) && - folio_likely_mapped_shared(folio)) + if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio)) return -EACCES; /* diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a351497ced4a..3158afe7eb23 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -113,6 +113,7 @@ again: arch_enter_lazy_mmu_mode(); for (; addr < end; addr += PAGE_SIZE, ptep++) { + struct dev_pagemap *pgmap; unsigned long mpfn = 0, pfn; struct folio *folio; struct page *page; @@ -140,9 +141,10 @@ again: goto next; page = pfn_swap_entry_to_page(entry); + pgmap = page_pgmap(page); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || - page->pgmap->owner != migrate->pgmap_owner) + pgmap->owner != migrate->pgmap_owner) goto next; mpfn = migrate_pfn(page_to_pfn(page)) | @@ -159,12 +161,16 @@ again: } page = vm_normal_page(migrate->vma, addr, pte); if (page && !is_zone_device_page(page) && - !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) - goto next; - else if (page && is_device_coherent_page(page) && - (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || - page->pgmap->owner != migrate->pgmap_owner)) + !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { goto next; + } else if (page && is_device_coherent_page(page)) { + pgmap = page_pgmap(page); + + if (!(migrate->flags & + MIGRATE_VMA_SELECT_DEVICE_COHERENT) || + pgmap->owner != migrate->pgmap_owner) + goto next; + } mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; } diff --git a/mm/mincore.c b/mm/mincore.c index d6bd19e520fc..832f29f46767 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -239,7 +239,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, start = untagged_addr(start); /* Check the start address: needs to be page-aligned.. */ - if (start & ~PAGE_MASK) + if (unlikely(start & ~PAGE_MASK)) return -EINVAL; /* ..and we need to be passed a valid user-space range */ diff --git a/mm/mlock.c b/mm/mlock.c index cde076fa7d5e..3cb72b579ffd 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -368,6 +368,8 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, if (is_huge_zero_pmd(*pmd)) goto out; folio = pmd_folio(*pmd); + if (folio_is_zone_device(folio)) + goto out; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else diff --git a/mm/mm_init.c b/mm/mm_init.c index 2630cc30147e..a38a1909b407 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -30,12 +30,28 @@ #include <linux/crash_dump.h> #include <linux/execmem.h> #include <linux/vmstat.h> +#include <linux/hugetlb.h> #include "internal.h" #include "slab.h" #include "shuffle.h" #include <asm/setup.h> +#ifndef CONFIG_NUMA +unsigned long max_mapnr; +EXPORT_SYMBOL(max_mapnr); + +struct page *mem_map; +EXPORT_SYMBOL(mem_map); +#endif + +/* + * high_memory defines the upper bound on direct map memory, then end + * of ZONE_NORMAL. + */ +void *high_memory; +EXPORT_SYMBOL(high_memory); + #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; @@ -438,7 +454,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) * was requested by the user */ required_movablecore = - roundup(required_movablecore, MAX_ORDER_NR_PAGES); + round_up(required_movablecore, MAX_ORDER_NR_PAGES); required_movablecore = min(totalpages, required_movablecore); corepages = totalpages - required_movablecore; @@ -545,11 +561,11 @@ restart: out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ - for (nid = 0; nid < MAX_NUMNODES; nid++) { + for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; zone_movable_pfn[nid] = - roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); + round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (zone_movable_pfn[nid] >= end_pfn) @@ -649,6 +665,28 @@ static inline void fixup_hashdist(void) static inline void fixup_hashdist(void) {} #endif /* CONFIG_NUMA */ +/* + * Initialize a reserved page unconditionally, finding its zone first. + */ +void __meminit __init_page_from_nid(unsigned long pfn, int nid) +{ + pg_data_t *pgdat; + int zid; + + pgdat = NODE_DATA(nid); + + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct zone *zone = &pgdat->node_zones[zid]; + + if (zone_spans_pfn(zone, pfn)) + break; + } + __init_single_page(pfn_to_page(pfn), pfn, zid, nid); + + if (pageblock_aligned(pfn)) + set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE); +} + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static inline void pgdat_set_deferred_range(pg_data_t *pgdat) { @@ -705,26 +743,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static void __meminit init_reserved_page(unsigned long pfn, int nid) +static void __meminit init_deferred_page(unsigned long pfn, int nid) { - pg_data_t *pgdat; - int zid; - if (early_page_initialised(pfn, nid)) return; - pgdat = NODE_DATA(nid); - - for (zid = 0; zid < MAX_NR_ZONES; zid++) { - struct zone *zone = &pgdat->node_zones[zid]; - - if (zone_spans_pfn(zone, pfn)) - break; - } - __init_single_page(pfn_to_page(pfn), pfn, zid, nid); - - if (pageblock_aligned(pfn)) - set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE); + __init_page_from_nid(pfn, nid); } #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} @@ -739,7 +763,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static inline void init_reserved_page(unsigned long pfn, int nid) +static inline void init_deferred_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -760,7 +784,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn, nid); + init_deferred_page(start_pfn, nid); /* * no need for atomic set_bit because the struct @@ -998,7 +1022,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, * and zone_device_data. It is a bug if a ZONE_DEVICE page is * ever freed or placed on a driver-private list. */ - page->pgmap = pgmap; + page_folio(page)->pgmap = pgmap; page->zone_device_data = NULL; /* @@ -1017,12 +1041,25 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, } /* - * ZONE_DEVICE pages are released directly to the driver page allocator - * which will set the page count to 1 when allocating the page. + * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released + * directly to the driver page allocator which will set the page count + * to 1 when allocating the page. + * + * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have + * their refcount reset to one whenever they are freed (ie. after + * their refcount drops to 0). */ - if (pgmap->type == MEMORY_DEVICE_PRIVATE || - pgmap->type == MEMORY_DEVICE_COHERENT) + switch (pgmap->type) { + case MEMORY_DEVICE_FS_DAX: + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: + case MEMORY_DEVICE_PCI_P2PDMA: set_page_count(page, 0); + break; + + case MEMORY_DEVICE_GENERIC: + break; + } } /* @@ -1431,7 +1468,7 @@ void __meminit init_currently_empty_zone(struct zone *zone, #ifndef CONFIG_SPARSEMEM /* - * Calculate the size of the zone->blockflags rounded to an unsigned long + * Calculate the size of the zone->pageblock_flags rounded to an unsigned long * Start by making sure zonesize is a multiple of pageblock_order by rounding * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in @@ -1442,10 +1479,10 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l unsigned long usemapsize; zonesize += zone_start_pfn & (pageblock_nr_pages-1); - usemapsize = roundup(zonesize, pageblock_nr_pages); + usemapsize = round_up(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; - usemapsize = roundup(usemapsize, BITS_PER_LONG); + usemapsize = round_up(usemapsize, BITS_PER_LONG); return usemapsize / BITS_PER_BYTE; } @@ -1617,7 +1654,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); offset = pgdat->node_start_pfn - start; /* - * The zone's endpoints aren't required to be MAX_PAGE_ORDER + * The zone's endpoints aren't required to be MAX_PAGE_ORDER * aligned but the node_mem_map endpoints must be in order * for the buddy allocator to function correctly. */ @@ -1633,14 +1670,15 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", __func__, pgdat->node_id, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map); -#ifndef CONFIG_NUMA + /* the global mem_map is just set as node 0's */ - if (pgdat == NODE_DATA(0)) { - mem_map = NODE_DATA(0)->node_mem_map; - if (page_to_pfn(mem_map) != pgdat->node_start_pfn) - mem_map -= offset; - } -#endif + WARN_ON(pgdat != NODE_DATA(0)); + + mem_map = pgdat->node_mem_map; + if (page_to_pfn(mem_map) != pgdat->node_start_pfn) + mem_map -= offset; + + max_mapnr = end - start; } #else static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } @@ -1747,6 +1785,27 @@ static bool arch_has_descending_max_zone_pfns(void) return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); } +static void set_high_memory(void) +{ + phys_addr_t highmem = memblock_end_of_DRAM(); + + /* + * Some architectures (e.g. ARM) set high_memory very early and + * use it in arch setup code. + * If an architecture already set high_memory don't overwrite it + */ + if (high_memory) + return; + +#ifdef CONFIG_HIGHMEM + if (arch_has_descending_max_zone_pfns() || + highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])) + highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]); +#endif + + high_memory = phys_to_virt(highmem - 1) + 1; +} + /** * free_area_init - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone @@ -1861,11 +1920,16 @@ void __init free_area_init(unsigned long *max_zone_pfn) } } + for_each_node_state(nid, N_MEMORY) + sparse_vmemmap_init_nid_late(nid); + calc_nr_kernel_pages(); memmap_init(); /* disable hash distribution for systems with a single node */ fixup_hashdist(); + + set_high_memory(); } /** @@ -2251,6 +2315,15 @@ void __init init_cma_reserved_pageblock(struct page *page) adjust_managed_page_count(page, pageblock_nr_pages); page_zone(page)->cma_pages += pageblock_nr_pages; } +/* + * Similar to above, but only set the migrate type and stats. + */ +void __init init_cma_pageblock(struct page *page) +{ + set_pageblock_migratetype(page, MIGRATE_CMA); + adjust_managed_page_count(page, pageblock_nr_pages); + page_zone(page)->cma_pages += pageblock_nr_pages; +} #endif void set_zone_contiguous(struct zone *zone) @@ -2275,6 +2348,31 @@ void set_zone_contiguous(struct zone *zone) zone->contiguous = true; } +/* + * Check if a PFN range intersects multiple zones on one or more + * NUMA nodes. Specify the @nid argument if it is known that this + * PFN range is on one node, NUMA_NO_NODE otherwise. + */ +bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, + unsigned long nr_pages) +{ + struct zone *zone, *izone = NULL; + + for_each_zone(zone) { + if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid) + continue; + + if (zone_intersects(zone, start_pfn, nr_pages)) { + if (izone != NULL) + return true; + izone = zone; + } + + } + + return false; +} + static void __init mem_init_print_info(void); void __init page_alloc_init_late(void) { @@ -2636,11 +2734,22 @@ static void __init mem_init_print_info(void) ); } +void __init __weak arch_mm_preinit(void) +{ +} + +void __init __weak mem_init(void) +{ +} + /* * Set up kernel memory allocators */ void __init mm_core_init(void) { + arch_mm_preinit(); + hugetlb_bootmem_alloc(); + /* Initializations relying on SMP setup */ BUILD_BUG_ON(MAX_ZONELISTS > 2); build_all_zonelists(NULL); @@ -2656,6 +2765,7 @@ void __init mm_core_init(void) report_meminit(); kmsan_init_shadow(); stack_depot_early_init(); + memblock_free_all(); mem_init(); kmem_cache_init(); /* diff --git a/mm/mmap.c b/mm/mmap.c index d6bbe435bd99..bd210aaf7ebd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1305,7 +1305,8 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - remove_vma(vma, /* unreachable = */ true); + vma_mark_detached(vma); + remove_vma(vma); count++; cond_resched(); vma = vma_next(&vmi); @@ -1747,6 +1748,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); struct vm_area_struct *next; struct mmu_gather tlb; + PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); BUG_ON(new_start > new_end); @@ -1761,7 +1763,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) /* * cover the whole range: [new_start, old_end) */ - vmg.vma = vma; + vmg.middle = vma; if (vma_expand(&vmg)) return -ENOMEM; @@ -1769,8 +1771,8 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ - if (length != move_page_tables(vma, old_start, - vma, new_start, length, false, true)) + pmc.for_stack = true; + if (length != move_page_tables(&pmc)) return -ENOMEM; tlb_gather_mmu(&tlb, mm); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7aa6f18c500b..db7ba4a725d6 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -246,8 +246,16 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch) * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * - * Architectures that do not have this (PPC) need to delay the freeing by some - * other means, this is that means. + * Not all systems IPI every CPU for this purpose: + * + * - Some architectures have HW support for cross-CPU synchronisation of TLB + * flushes, so there's no IPI at all. + * + * - Paravirt guests can do this TLB flushing in the hypervisor, or coordinate + * with the hypervisor to defer flushing on preempted vCPUs. + * + * Such systems need to delay the freeing by some other means, this is that + * means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling diff --git a/mm/mprotect.c b/mm/mprotect.c index 516b1d847e2c..62c1f7945741 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -133,7 +133,7 @@ static long change_pte_range(struct mmu_gather *tlb, /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && (folio_maybe_dma_pinned(folio) || - folio_likely_mapped_shared(folio))) + folio_maybe_mapped_shared(folio))) continue; /* @@ -225,14 +225,6 @@ static long change_pte_range(struct mmu_gather *tlb, newpte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(oldpte)) newpte = pte_swp_mkuffd_wp(newpte); - } else if (is_writable_device_exclusive_entry(entry)) { - entry = make_readable_device_exclusive_entry( - swp_offset(entry)); - newpte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(oldpte)) - newpte = pte_swp_mksoft_dirty(newpte); - if (pte_swp_uffd_wp(oldpte)) - newpte = pte_swp_mkuffd_wp(newpte); } else if (is_pte_marker_entry(entry)) { /* * Ignore error swap entries unconditionally, @@ -607,7 +599,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; - unsigned long oldflags = vma->vm_flags; + unsigned long oldflags = READ_ONCE(vma->vm_flags); long nrpages = (end - start) >> PAGE_SHIFT; unsigned int mm_cp_flags = 0; unsigned long charged = 0; @@ -627,7 +619,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, * uncommon case, so doesn't need to be very optimized. */ if (arch_has_pfn_modify_check() && - (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && + (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) && (newflags & VM_ACCESS_FLAGS) == 0) { pgprot_t new_pgprot = vm_get_page_prot(newflags); @@ -676,7 +668,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, * held in write mode. */ vma_start_write(vma); - vm_flags_reset(vma, newflags); + vm_flags_reset_once(vma, newflags); if (vma_wants_manual_pte_write_upgrade(vma)) mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; vma_set_page_prot(vma); diff --git a/mm/mremap.c b/mm/mremap.c index cff7f552f909..0865387531ed 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -32,6 +32,45 @@ #include "internal.h" +/* Classify the kind of remap operation being performed. */ +enum mremap_type { + MREMAP_INVALID, /* Initial state. */ + MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */ + MREMAP_SHRINK, /* old_len > new_len. */ + MREMAP_EXPAND, /* old_len < new_len. */ +}; + +/* + * Describes a VMA mremap() operation and is threaded throughout it. + * + * Any of the fields may be mutated by the operation, however these values will + * always accurately reflect the remap (for instance, we may adjust lengths and + * delta to account for hugetlb alignment). + */ +struct vma_remap_struct { + /* User-provided state. */ + unsigned long addr; /* User-specified address from which we remap. */ + unsigned long old_len; /* Length of range being remapped. */ + unsigned long new_len; /* Desired new length of mapping. */ + unsigned long flags; /* user-specified MREMAP_* flags. */ + unsigned long new_addr; /* Optionally, desired new address. */ + + /* uffd state. */ + struct vm_userfaultfd_ctx *uf; + struct list_head *uf_unmap_early; + struct list_head *uf_unmap; + + /* VMA state, determined in do_mremap(). */ + struct vm_area_struct *vma; + + /* Internal state, determined in do_mremap(). */ + unsigned long delta; /* Absolute delta of old_len,new_len. */ + bool mlocked; /* Was the VMA mlock()'d? */ + enum mremap_type remap_type; /* expand, shrink, etc. */ + bool mmap_locked; /* Is mm currently write-locked? */ + unsigned long charged; /* If VM_ACCOUNT, # pages to account. */ +}; + static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -69,8 +108,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) return pmd; } -static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; @@ -83,13 +121,12 @@ static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, return pud_alloc(mm, p4d, addr); } -static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pmd_t *pmd; - pud = alloc_new_pud(mm, vma, addr); + pud = alloc_new_pud(mm, addr); if (!pud) return NULL; @@ -133,17 +170,19 @@ static pte_t move_soft_dirty_pte(pte_t pte) return pte; } -static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, - unsigned long old_addr, unsigned long old_end, - struct vm_area_struct *new_vma, pmd_t *new_pmd, - unsigned long new_addr, bool need_rmap_locks) +static int move_ptes(struct pagetable_move_control *pmc, + unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) { + struct vm_area_struct *vma = pmc->old; bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; pmd_t dummy_pmdval; spinlock_t *old_ptl, *new_ptl; bool force_flush = false; + unsigned long old_addr = pmc->old_addr; + unsigned long new_addr = pmc->new_addr; + unsigned long old_end = old_addr + extent; unsigned long len = old_end - old_addr; int err = 0; @@ -165,7 +204,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ - if (need_rmap_locks) + if (pmc->need_rmap_locks) take_rmap_locks(vma); /* @@ -239,7 +278,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); out: - if (need_rmap_locks) + if (pmc->need_rmap_locks) drop_rmap_locks(vma); return err; } @@ -254,10 +293,11 @@ static inline bool arch_supports_page_table_move(void) #endif #ifdef CONFIG_HAVE_MOVE_PMD -static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) +static bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; bool res = false; pmd_t pmd; @@ -303,7 +343,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pmd_lock(vma->vm_mm, old_pmd); + old_ptl = pmd_lock(mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -320,7 +360,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); - flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); out_unlock: if (new_ptl != old_ptl) spin_unlock(new_ptl); @@ -329,19 +369,19 @@ out_unlock: return res; } #else -static inline bool move_normal_pmd(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, - pmd_t *new_pmd) +static inline bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { return false; } #endif #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) -static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -367,7 +407,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -379,7 +419,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pud_none(*new_pud)); pud_populate(mm, new_pud, pud_pgtable(pud)); - flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -387,19 +427,19 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static inline bool move_normal_pud(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, - pud_t *new_pud) +static inline bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { return false; } #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -414,7 +454,7 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -427,8 +467,8 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, /* Set the new pud */ /* mark soft_ditry when we add pud level soft dirty support */ - set_pud_at(mm, new_addr, new_pud, pud); - flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); + set_pud_at(mm, pmc->new_addr, new_pud, pud); + flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -436,8 +476,9 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) + { WARN_ON_ONCE(1); return false; @@ -458,10 +499,12 @@ enum pgt_entry { * destination pgt_entry. */ static __always_inline unsigned long get_extent(enum pgt_entry entry, - unsigned long old_addr, unsigned long old_end, - unsigned long new_addr) + struct pagetable_move_control *pmc) { unsigned long next, extent, mask, size; + unsigned long old_addr = pmc->old_addr; + unsigned long old_end = pmc->old_end; + unsigned long new_addr = pmc->new_addr; switch (entry) { case HPAGE_PMD: @@ -491,37 +534,50 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry, } /* + * Should move_pgt_entry() acquire the rmap locks? This is either expressed in + * the PMC, or overridden in the case of normal, larger page tables. + */ +static bool should_take_rmap_locks(struct pagetable_move_control *pmc, + enum pgt_entry entry) +{ + switch (entry) { + case NORMAL_PMD: + case NORMAL_PUD: + return true; + default: + return pmc->need_rmap_locks; + } +} + +/* * Attempts to speedup the move by moving entry at the level corresponding to * pgt_entry. Returns true if the move was successful, else false. */ -static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, - void *old_entry, void *new_entry, bool need_rmap_locks) +static bool move_pgt_entry(struct pagetable_move_control *pmc, + enum pgt_entry entry, void *old_entry, void *new_entry) { bool moved = false; + bool need_rmap_locks = should_take_rmap_locks(pmc, entry); /* See comment in move_ptes() */ if (need_rmap_locks) - take_rmap_locks(vma); + take_rmap_locks(pmc->old); switch (entry) { case NORMAL_PMD: - moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pmd(pmc, old_entry, new_entry); break; case NORMAL_PUD: - moved = move_normal_pud(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pud(pmc, old_entry, new_entry); break; case HPAGE_PMD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pmd(vma, old_addr, new_addr, old_entry, + move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, new_entry); break; case HPAGE_PUD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pud(vma, old_addr, new_addr, old_entry, - new_entry); + move_huge_pud(pmc, old_entry, new_entry); break; default: @@ -530,7 +586,7 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, } if (need_rmap_locks) - drop_rmap_locks(vma); + drop_rmap_locks(pmc->old); return moved; } @@ -541,8 +597,9 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, * the VMA that is created to span the source and destination of the move, * so we make an exception for it. */ -static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, - unsigned long mask, bool for_stack) +static bool can_align_down(struct pagetable_move_control *pmc, + struct vm_area_struct *vma, unsigned long addr_to_align, + unsigned long mask) { unsigned long addr_masked = addr_to_align & mask; @@ -551,11 +608,11 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali * of the corresponding VMA, we can't align down or we will destroy part * of the current mapping. */ - if (!for_stack && vma->vm_start != addr_to_align) + if (!pmc->for_stack && vma->vm_start != addr_to_align) return false; /* In the stack case we explicitly permit in-VMA alignment. */ - if (for_stack && addr_masked >= vma->vm_start) + if (pmc->for_stack && addr_masked >= vma->vm_start) return true; /* @@ -565,163 +622,390 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; } -/* Opportunistically realign to specified boundary for faster copy. */ -static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, - unsigned long *new_addr, struct vm_area_struct *new_vma, - unsigned long mask, bool for_stack) +/* + * Determine if are in fact able to realign for efficiency to a higher page + * table boundary. + */ +static bool can_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) { + unsigned long align_mask = ~pagetable_mask; + unsigned long old_align = pmc->old_addr & align_mask; + unsigned long new_align = pmc->new_addr & align_mask; + unsigned long pagetable_size = align_mask + 1; + unsigned long old_align_next = pagetable_size - old_align; + + /* + * We don't want to have to go hunting for VMAs from the end of the old + * VMA to the next page table boundary, also we want to make sure the + * operation is wortwhile. + * + * So ensure that we only perform this realignment if the end of the + * range being copied reaches or crosses the page table boundary. + * + * boundary boundary + * .<- old_align -> . + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . <----------------.-----------> + * . len_in + * <-------------------------------> + * . pagetable_size . + * . <----------------> + * . old_align_next . + */ + if (pmc->len_in < old_align_next) + return false; + /* Skip if the addresses are already aligned. */ - if ((*old_addr & ~mask) == 0) - return; + if (old_align == 0) + return false; /* Only realign if the new and old addresses are mutually aligned. */ - if ((*old_addr & ~mask) != (*new_addr & ~mask)) - return; + if (old_align != new_align) + return false; /* Ensure realignment doesn't cause overlap with existing mappings. */ - if (!can_align_down(old_vma, *old_addr, mask, for_stack) || - !can_align_down(new_vma, *new_addr, mask, for_stack)) + if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || + !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) + return false; + + return true; +} + +/* + * Opportunistically realign to specified boundary for faster copy. + * + * Consider an mremap() of a VMA with page table boundaries as below, and no + * preceding VMAs from the lower page table boundary to the start of the VMA, + * with the end of the range reaching or crossing the page table boundary. + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . pmc->old_addr . pmc->old_end + * . <----------------------------> + * . move these page tables + * + * If we proceed with moving page tables in this scenario, we will have a lot of + * work to do traversing old page tables and establishing new ones in the + * destination across multiple lower level page tables. + * + * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the + * page table boundary, so we can simply copy a single page table entry for the + * aligned portion of the VMA instead: + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * pmc->old_addr . pmc->old_end + * <-------------------------------------------> + * . move these page tables + */ +static void try_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) +{ + + if (!can_realign_addr(pmc, pagetable_mask)) return; - *old_addr = *old_addr & mask; - *new_addr = *new_addr & mask; + /* + * Simply align to page table boundaries. Note that we do NOT update the + * pmc->old_end value, and since the move_page_tables() operation spans + * from [old_addr, old_end) (offsetting new_addr as it is performed), + * this simply changes the start of the copy, not the end. + */ + pmc->old_addr &= pagetable_mask; + pmc->new_addr &= pagetable_mask; +} + +/* Is the page table move operation done? */ +static bool pmc_done(struct pagetable_move_control *pmc) +{ + return pmc->old_addr >= pmc->old_end; +} + +/* Advance to the next page table, offset by extent bytes. */ +static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent) +{ + pmc->old_addr += extent; + pmc->new_addr += extent; } -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack) +/* + * Determine how many bytes in the specified input range have had their page + * tables moved so far. + */ +static unsigned long pmc_progress(struct pagetable_move_control *pmc) { - unsigned long extent, old_end; + unsigned long orig_old_addr = pmc->old_end - pmc->len_in; + unsigned long old_addr = pmc->old_addr; + + /* + * Prevent negative return values when {old,new}_addr was realigned but + * we broke out of the loop in move_page_tables() for the first PMD + * itself. + */ + return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; +} + +unsigned long move_page_tables(struct pagetable_move_control *pmc) +{ + unsigned long extent; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + struct mm_struct *mm = pmc->old->vm_mm; - if (!len) + if (!pmc->len_in) return 0; - old_end = old_addr + len; - - if (is_vm_hugetlb_page(vma)) - return move_hugetlb_page_tables(vma, new_vma, old_addr, - new_addr, len); + if (is_vm_hugetlb_page(pmc->old)) + return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, + pmc->new_addr, pmc->len_in); /* * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ - if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, - for_stack); + try_realign_addr(pmc, PMD_MASK); - flush_cache_range(vma, old_addr, old_end); - mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, - old_addr, old_end); + flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); + mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, + pmc->old_addr, pmc->old_end); mmu_notifier_invalidate_range_start(&range); - for (; old_addr < old_end; old_addr += extent, new_addr += extent) { + for (; !pmc_done(pmc); pmc_next(pmc, extent)) { cond_resched(); /* * If extent is PUD-sized try to speed up the move by moving at the * PUD level if possible. */ - extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); + extent = get_extent(NORMAL_PUD, pmc); - old_pud = get_old_pud(vma->vm_mm, old_addr); + old_pud = get_old_pud(mm, pmc->old_addr); if (!old_pud) continue; - new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); + new_pud = alloc_new_pud(mm, pmc->new_addr); if (!new_pud) break; if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { - move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, - old_pud, new_pud, need_rmap_locks); + move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - - if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, - old_pud, new_pud, true)) + if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud)) continue; } - extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); - old_pmd = get_old_pmd(vma->vm_mm, old_addr); + extent = get_extent(NORMAL_PMD, pmc); + old_pmd = get_old_pmd(mm, pmc->old_addr); if (!old_pmd) continue; - new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); + new_pmd = alloc_new_pmd(mm, pmc->new_addr); if (!new_pmd) break; again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && - move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, need_rmap_locks)) + move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd)) continue; - split_huge_pmd(vma, old_pmd, old_addr); + split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ - if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, true)) + if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd)) continue; } if (pmd_none(*old_pmd)) continue; - if (pte_alloc(new_vma->vm_mm, new_pmd)) + if (pte_alloc(pmc->new->vm_mm, new_pmd)) break; - if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, - new_vma, new_pmd, new_addr, need_rmap_locks) < 0) + if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0) goto again; } mmu_notifier_invalidate_range_end(&range); + return pmc_progress(pmc); +} + +/* Set vrm->delta to the difference in VMA size specified by user. */ +static void vrm_set_delta(struct vma_remap_struct *vrm) +{ + vrm->delta = abs_diff(vrm->old_len, vrm->new_len); +} + +/* Determine what kind of remap this is - shrink, expand or no resize at all. */ +static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm) +{ + if (vrm->delta == 0) + return MREMAP_NO_RESIZE; + + if (vrm->old_len > vrm->new_len) + return MREMAP_SHRINK; + + return MREMAP_EXPAND; +} + +/* + * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs + * overlapping? + */ +static bool vrm_overlaps(struct vma_remap_struct *vrm) +{ + unsigned long start_old = vrm->addr; + unsigned long start_new = vrm->new_addr; + unsigned long end_old = vrm->addr + vrm->old_len; + unsigned long end_new = vrm->new_addr + vrm->new_len; + /* - * Prevent negative return values when {old,new}_addr was realigned - * but we broke out of the above loop for the first PMD itself. + * start_old end_old + * |-----------| + * | | + * |-----------| + * |-------------| + * | | + * |-------------| + * start_new end_new */ - if (old_addr < old_end - len) - return 0; + if (end_old > start_new && end_new > start_old) + return true; - return len + old_addr - old_end; /* how much done */ + return false; } -static unsigned long move_vma(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long old_len, - unsigned long new_len, unsigned long new_addr, - bool *locked, unsigned long flags, - struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) +/* Do the mremap() flags require that the new_addr parameter be specified? */ +static bool vrm_implies_new_addr(struct vma_remap_struct *vrm) { - long to_account = new_len - old_len; - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *new_vma; - unsigned long vm_flags = vma->vm_flags; - unsigned long new_pgoff; - unsigned long moved_len; - unsigned long account_start = 0; - unsigned long account_end = 0; - unsigned long hiwater_vm; - int err = 0; - bool need_rmap_locks; - struct vma_iterator vmi; + return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); +} + +/* + * Find an unmapped area for the requested vrm->new_addr. + * + * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only + * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to + * mmap(), otherwise this is equivalent to mmap() specifying a NULL address. + * + * Returns 0 on success (with vrm->new_addr updated), or an error code upon + * failure. + */ +static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) +{ + struct vm_area_struct *vma = vrm->vma; + unsigned long map_flags = 0; + /* Page Offset _into_ the VMA. */ + pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; + pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; + unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; + unsigned long res; + + if (vrm->flags & MREMAP_FIXED) + map_flags |= MAP_FIXED; + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + + res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, + map_flags); + if (IS_ERR_VALUE(res)) + return res; + + vrm->new_addr = res; + return 0; +} + +/* + * Keep track of pages which have been added to the memory mapping. If the VMA + * is accounted, also check to see if there is sufficient memory. + * + * Returns true on success, false if insufficient memory to charge. + */ +static bool vrm_charge(struct vma_remap_struct *vrm) +{ + unsigned long charged; + + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return true; + + /* + * If we don't unmap the old mapping, then we account the entirety of + * the length of the new one. Otherwise it's just the delta in size. + */ + if (vrm->flags & MREMAP_DONTUNMAP) + charged = vrm->new_len >> PAGE_SHIFT; + else + charged = vrm->delta >> PAGE_SHIFT; + + + /* This accounts 'charged' pages of memory. */ + if (security_vm_enough_memory_mm(current->mm, charged)) + return false; + + vrm->charged = charged; + return true; +} + +/* + * an error has occurred so we will not be using vrm->charged memory. Unaccount + * this memory if the VMA is accounted. + */ +static void vrm_uncharge(struct vma_remap_struct *vrm) +{ + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return; + + vm_unacct_memory(vrm->charged); + vrm->charged = 0; +} + +/* + * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to + * account for 'bytes' memory used, and if locked, indicate this in the VRM so + * we can handle this correctly later. + */ +static void vrm_stat_account(struct vma_remap_struct *vrm, + unsigned long bytes) +{ + unsigned long pages = bytes >> PAGE_SHIFT; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + vrm->mlocked = true; + } +} + +/* + * Perform checks before attempting to write a VMA prior to it being + * moved. + */ +static unsigned long prep_move_vma(struct vma_remap_struct *vrm) +{ + unsigned long err = 0; + struct vm_area_struct *vma = vrm->vma; + unsigned long old_addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long dummy = vma->vm_flags; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ - if (mm->map_count >= sysctl_max_map_count - 3) + if (current->mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; - if (unlikely(flags & MREMAP_DONTUNMAP)) - to_account = new_len; - if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_start != old_addr) err = vma->vm_ops->may_split(vma, old_addr); @@ -739,61 +1023,239 @@ static unsigned long move_vma(struct vm_area_struct *vma, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, - MADV_UNMERGEABLE, &vm_flags); + MADV_UNMERGEABLE, &dummy); if (err) return err; - if (vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) - return -ENOMEM; + return 0; +} + +/* + * Unmap source VMA for VMA move, turning it from a copy to a move, being + * careful to ensure we do not underflow memory account while doing so if an + * accountable move. + * + * This is best effort, if we fail to unmap then we simply try to correct + * accounting and exit. + */ +static void unmap_source_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + unsigned long addr = vrm->addr; + unsigned long len = vrm->old_len; + struct vm_area_struct *vma = vrm->vma; + VMA_ITERATOR(vmi, mm, addr); + int err; + unsigned long vm_start; + unsigned long vm_end; + /* + * It might seem odd that we check for MREMAP_DONTUNMAP here, given this + * function implies that we unmap the original VMA, which seems + * contradictory. + * + * However, this occurs when this operation was attempted and an error + * arose, in which case we _do_ wish to unmap the _new_ VMA, which means + * we actually _do_ want it be unaccounted. + */ + bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && + !(vrm->flags & MREMAP_DONTUNMAP); + + /* + * So we perform a trick here to prevent incorrect accounting. Any merge + * or new VMA allocation performed in copy_vma() does not adjust + * accounting, it is expected that callers handle this. + * + * And indeed we already have, accounting appropriately in the case of + * both in vrm_charge(). + * + * However, when we unmap the existing VMA (to effect the move), this + * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount + * removed pages. + * + * To avoid this we temporarily clear this flag, reinstating on any + * portions of the original VMA that remain. + */ + if (accountable_move) { + vm_flags_clear(vma, VM_ACCOUNT); + /* We are about to split vma, so store the start/end. */ + vm_start = vma->vm_start; + vm_end = vma->vm_end; + } + + err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); + vrm->vma = NULL; /* Invalidated. */ + if (err) { + /* OOM: unable to split vma, just get accounts right */ + vm_acct_memory(len >> PAGE_SHIFT); + return; + } + + /* + * If we mremap() from a VMA like this: + * + * addr end + * | | + * v v + * |-------------| + * | | + * |-------------| + * + * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above + * we'll end up with: + * + * addr end + * | | + * v v + * |---| |---| + * | A | | B | + * |---| |---| + * + * The VMI is still pointing at addr, so vma_prev() will give us A, and + * a subsequent or lone vma_next() will give as B. + * + * do_vmi_munmap() will have restored the VMI back to addr. + */ + if (accountable_move) { + unsigned long end = addr + len; + + if (vm_start < addr) { + struct vm_area_struct *prev = vma_prev(&vmi); + + vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */ + } + + if (vm_end > end) { + struct vm_area_struct *next = vma_next(&vmi); + + vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */ + } } +} + +/* + * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the + * process. Additionally handle an error occurring on moving of page tables, + * where we reset vrm state to cause unmapping of the new VMA. + * + * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an + * error code. + */ +static int copy_vma_and_data(struct vma_remap_struct *vrm, + struct vm_area_struct **new_vma_ptr) +{ + unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; + unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; + unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; + unsigned long moved_len; + struct vm_area_struct *vma = vrm->vma; + struct vm_area_struct *new_vma; + int err = 0; + PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); - vma_start_write(vma); - new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, - &need_rmap_locks); + new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, + &pmc.need_rmap_locks); if (!new_vma) { - if (vm_flags & VM_ACCOUNT) - vm_unacct_memory(to_account >> PAGE_SHIFT); + vrm_uncharge(vrm); + *new_vma_ptr = NULL; return -ENOMEM; } + vrm->vma = vma; + pmc.old = vma; + pmc.new = new_vma; - moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, - need_rmap_locks, false); - if (moved_len < old_len) { + moved_len = move_page_tables(&pmc); + if (moved_len < vrm->old_len) err = -ENOMEM; - } else if (vma->vm_ops && vma->vm_ops->mremap) { + else if (vma->vm_ops && vma->vm_ops->mremap) err = vma->vm_ops->mremap(new_vma); - } if (unlikely(err)) { + PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, + vrm->addr, moved_len); + /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, - true, false); - vma = new_vma; - old_len = new_len; - old_addr = new_addr; - new_addr = err; + pmc_revert.need_rmap_locks = true; + move_page_tables(&pmc_revert); + + vrm->vma = new_vma; + vrm->old_len = vrm->new_len; + vrm->addr = vrm->new_addr; } else { - mremap_userfaultfd_prep(new_vma, uf); + mremap_userfaultfd_prep(new_vma, vrm->uf); } - if (is_vm_hugetlb_page(vma)) { + if (is_vm_hugetlb_page(vma)) clear_vma_resv_huge_pages(vma); - } - /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { - vm_flags_clear(vma, VM_ACCOUNT); - if (vma->vm_start < old_addr) - account_start = vma->vm_start; - if (vma->vm_end > old_addr + old_len) - account_end = vma->vm_end; - } + /* Tell pfnmap has moved from this vma */ + if (unlikely(vma->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(vma); + + *new_vma_ptr = new_vma; + return err; +} + +/* + * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and + * account flags on remaining VMA by convention (it cannot be mlock()'d any + * longer, as pages in range are no longer mapped), and removing anon_vma_chain + * links from it (if the entire VMA was copied over). + */ +static void dontunmap_complete(struct vma_remap_struct *vrm, + struct vm_area_struct *new_vma) +{ + unsigned long start = vrm->addr; + unsigned long end = vrm->addr + vrm->old_len; + unsigned long old_start = vrm->vma->vm_start; + unsigned long old_end = vrm->vma->vm_end; + + /* + * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old + * vma. + */ + vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); + + /* + * anon_vma links of the old vma is no longer needed after its page + * table has been moved. + */ + if (new_vma != vrm->vma && start == old_start && end == old_end) + unlink_anon_vmas(vrm->vma); + + /* Because we won't unmap we don't need to touch locked_vm. */ +} + +static unsigned long move_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *new_vma; + unsigned long hiwater_vm; + int err; + + err = prep_move_vma(vrm); + if (err) + return err; + + /* If accounted, charge the number of bytes the operation will use. */ + if (!vrm_charge(vrm)) + return -ENOMEM; + + /* We don't want racing faults. */ + vma_start_write(vrm->vma); + + /* Perform copy step. */ + err = copy_vma_and_data(vrm, &new_vma); + /* + * If we established the copied-to VMA, we attempt to recover from the + * error by setting the destination VMA to the source VMA and unmapping + * it below. + */ + if (err && !new_vma) + return err; /* * If we failed to move page tables we still do total_vm increment @@ -805,73 +1267,31 @@ static unsigned long move_vma(struct vm_area_struct *vma, * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; - vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); - - /* Tell pfnmap has moved from this vma */ - if (unlikely(vma->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(vma); - - if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { - /* We always clear VM_LOCKED[ONFAULT] on the old vma */ - vm_flags_clear(vma, VM_LOCKED_MASK); - - /* - * anon_vma links of the old vma is no longer needed after its page - * table has been moved. - */ - if (new_vma != vma && vma->vm_start == old_addr && - vma->vm_end == (old_addr + old_len)) - unlink_anon_vmas(vma); - /* Because we won't unmap we don't need to touch locked_vm */ - return new_addr; - } - - vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { - /* OOM: unable to split vma, just get accounts right */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) - vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = 0; - } - - if (vm_flags & VM_LOCKED) { - mm->locked_vm += new_len >> PAGE_SHIFT; - *locked = true; - } + vrm_stat_account(vrm, vrm->new_len); + if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) + dontunmap_complete(vrm, new_vma); + else + unmap_source_vma(vrm); mm->hiwater_vm = hiwater_vm; - /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (account_start) { - vma = vma_prev(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - if (account_end) { - vma = vma_next(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - return new_addr; + return err ? (unsigned long)err : vrm->new_addr; } /* * resize_is_valid() - Ensure the vma can be resized to the new length at the give * address. * - * @vma: The vma to resize - * @addr: The old address - * @old_len: The current size - * @new_len: The desired size - * @flags: The vma flags - * * Return 0 on success, error otherwise. */ -static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, - unsigned long old_len, unsigned long new_len, unsigned long flags) +static int resize_is_valid(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + unsigned long addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long new_len = vrm->new_len; unsigned long pgoff; /* @@ -883,11 +1303,12 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { - pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); + pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", + current->comm, current->pid); return -EINVAL; } - if ((flags & MREMAP_DONTUNMAP) && + if ((vrm->flags & MREMAP_DONTUNMAP) && (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) return -EINVAL; @@ -907,118 +1328,120 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return -EFAULT; - if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) return -EAGAIN; - if (!may_expand_vm(mm, vma->vm_flags, - (new_len - old_len) >> PAGE_SHIFT)) + if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) return -ENOMEM; return 0; } /* - * mremap_to() - remap a vma to a new location - * @addr: The old address - * @old_len: The old size - * @new_addr: The target address - * @new_len: The new size - * @locked: If the returned vma is locked (VM_LOCKED) - * @flags: the mremap flags - * @uf: The mremap userfaultfd context - * @uf_unmap_early: The userfaultfd unmap early context - * @uf_unmap: The userfaultfd unmap context + * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so + * execute this, optionally dropping the mmap lock when we do so. * - * Returns: The new address of the vma or an error. + * In both cases this invalidates the VMA, however if we don't drop the lock, + * then load the correct VMA into vrm->vma afterwards. */ -static unsigned long mremap_to(unsigned long addr, unsigned long old_len, - unsigned long new_addr, unsigned long new_len, bool *locked, - unsigned long flags, struct vm_userfaultfd_ctx *uf, - struct list_head *uf_unmap_early, - struct list_head *uf_unmap) +static unsigned long shrink_vma(struct vma_remap_struct *vrm, + bool drop_lock) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long ret; - unsigned long map_flags = 0; + unsigned long unmap_start = vrm->addr + vrm->new_len; + unsigned long unmap_bytes = vrm->delta; + unsigned long res; + VMA_ITERATOR(vmi, mm, unmap_start); - if (offset_in_page(new_addr)) - return -EINVAL; + VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) - return -EINVAL; - - /* Ensure the old/new locations do not overlap */ - if (addr + old_len > new_addr && new_addr + new_len > addr) - return -EINVAL; + res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes, + vrm->uf_unmap, drop_lock); + vrm->vma = NULL; /* Invalidated. */ + if (res) + return res; /* - * move_vma() need us to stay 4 maps below the threshold, otherwise - * it will bail out at the very beginning. - * That is a problem if we have already unmaped the regions here - * (new_addr, and old_addr), because userspace will not know the - * state of the vma's after it gets -ENOMEM. - * So, to avoid such scenario we can pre-compute if the whole - * operation has high chances to success map-wise. - * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmapping it. - * That means 2 more maps (1 for each) to the ones we already hold. - * Check whether current map count plus 2 still leads us to 4 maps below - * the threshold, otherwise return -ENOMEM here to be more safe. + * If we've not dropped the lock, then we should reload the VMA to + * replace the invalidated VMA with the one that may have now been + * split. */ - if ((mm->map_count + 2) >= sysctl_max_map_count - 3) - return -ENOMEM; + if (drop_lock) { + vrm->mmap_locked = false; + } else { + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; + } + + return 0; +} + +/* + * mremap_to() - remap a vma to a new location. + * Returns: The new address of the vma or an error. + */ +static unsigned long mremap_to(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + unsigned long err; + + /* Is the new length or address silly? */ + if (vrm->new_len > TASK_SIZE || + vrm->new_addr > TASK_SIZE - vrm->new_len) + return -EINVAL; - if (flags & MREMAP_FIXED) { + if (vrm_overlaps(vrm)) + return -EINVAL; + + if (vrm->flags & MREMAP_FIXED) { /* * In mremap_to(). * VMA is moved to dst address, and munmap dst first. * do_munmap will check if dst is sealed. */ - ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); - if (ret) - return ret; - } + err = do_munmap(mm, vrm->new_addr, vrm->new_len, + vrm->uf_unmap_early); + vrm->vma = NULL; /* Invalidated. */ + if (err) + return err; - if (old_len > new_len) { - ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); - if (ret) - return ret; - old_len = new_len; + /* + * If we remap a portion of a VMA elsewhere in the same VMA, + * this can invalidate the old VMA. Reset. + */ + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; } - vma = vma_lookup(mm, addr); - if (!vma) - return -EFAULT; - - ret = resize_is_valid(vma, addr, old_len, new_len, flags); - if (ret) - return ret; + if (vrm->remap_type == MREMAP_SHRINK) { + err = shrink_vma(vrm, /* drop_lock= */false); + if (err) + return err; - /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ - if (flags & MREMAP_DONTUNMAP && - !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { - return -ENOMEM; + /* Set up for the move now shrink has been executed. */ + vrm->old_len = vrm->new_len; } - if (flags & MREMAP_FIXED) - map_flags |= MAP_FIXED; + err = resize_is_valid(vrm); + if (err) + return err; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; + /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ + if (vrm->flags & MREMAP_DONTUNMAP) { + vm_flags_t vm_flags = vrm->vma->vm_flags; + unsigned long pages = vrm->old_len >> PAGE_SHIFT; - ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(ret)) - return ret; + if (!may_expand_vm(mm, vm_flags, pages)) + return -ENOMEM; + } - /* We got a new mapping */ - if (!(flags & MREMAP_FIXED)) - new_addr = ret; + err = vrm_set_new_addr(vrm); + if (err) + return err; - return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, - uf, uf_unmap); + return move_vma(vrm); } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) @@ -1035,215 +1458,328 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 1; } +/* Determine whether we are actually able to execute an in-place expansion. */ +static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm) +{ + /* Number of bytes from vrm->addr to end of VMA. */ + unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; + + /* If end of range aligns to end of VMA, we can just expand in-place. */ + if (suffix_bytes != vrm->old_len) + return false; + + /* Check whether this is feasible. */ + if (!vma_expandable(vrm->vma, vrm->delta)) + return false; + + return true; +} + /* - * Expand (or shrink) an existing mapping, potentially moving it at the - * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) - * - * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise - * This option implies MREMAP_MAYMOVE. + * Are the parameters passed to mremap() valid? If so return 0, otherwise return + * error. */ -SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, - unsigned long, new_len, unsigned long, flags, - unsigned long, new_addr) +static unsigned long check_mremap_params(struct vma_remap_struct *vrm) + { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long ret = -EINVAL; - bool locked = false; - struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; - LIST_HEAD(uf_unmap_early); - LIST_HEAD(uf_unmap); + unsigned long addr = vrm->addr; + unsigned long flags = vrm->flags; + + /* Ensure no unexpected flag values. */ + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) + return -EINVAL; + + /* Start address must be page-aligned. */ + if (offset_in_page(addr)) + return -EINVAL; /* - * There is a deliberate asymmetry here: we strip the pointer tag - * from the old address but leave the new address alone. This is - * for consistency with mmap(), where we prevent the creation of - * aliasing mappings in userspace by leaving the tag bits of the - * mapping address intact. A non-zero tag will cause the subsequent - * range checks to reject the address as invalid. - * - * See Documentation/arch/arm64/tagged-address-abi.rst for more - * information. + * We allow a zero old-len as a special case + * for DOS-emu "duplicate shm area" thing. But + * a zero new-len is nonsensical. */ - addr = untagged_addr(addr); + if (!PAGE_ALIGN(vrm->new_len)) + return -EINVAL; - if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) - return ret; + /* Remainder of checks are for cases with specific new_addr. */ + if (!vrm_implies_new_addr(vrm)) + return 0; - if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) - return ret; + /* The new address must be page-aligned. */ + if (offset_in_page(vrm->new_addr)) + return -EINVAL; + + /* A fixed address implies a move. */ + if (!(flags & MREMAP_MAYMOVE)) + return -EINVAL; + + /* MREMAP_DONTUNMAP does not allow resizing in the process. */ + if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) + return -EINVAL; /* - * MREMAP_DONTUNMAP is always a move and it does not allow resizing - * in the process. + * move_vma() need us to stay 4 maps below the threshold, otherwise + * it will bail out at the very beginning. + * That is a problem if we have already unmaped the regions here + * (new_addr, and old_addr), because userspace will not know the + * state of the vma's after it gets -ENOMEM. + * So, to avoid such scenario we can pre-compute if the whole + * operation has high chances to success map-wise. + * Worst-scenario case is when both vma's (new_addr and old_addr) get + * split in 3 before unmapping it. + * That means 2 more maps (1 for each) to the ones we already hold. + * Check whether current map count plus 2 still leads us to 4 maps below + * the threshold, otherwise return -ENOMEM here to be more safe. */ - if (flags & MREMAP_DONTUNMAP && - (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) - return ret; + if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) + return -ENOMEM; + return 0; +} - if (offset_in_page(addr)) - return ret; +/* + * We know we can expand the VMA in-place by delta pages, so do so. + * + * If we discover the VMA is locked, update mm_struct statistics accordingly and + * indicate so to the caller. + */ +static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + VMA_ITERATOR(vmi, mm, vma->vm_end); - old_len = PAGE_ALIGN(old_len); - new_len = PAGE_ALIGN(new_len); + if (!vrm_charge(vrm)) + return -ENOMEM; /* - * We allow a zero old-len as a special case - * for DOS-emu "duplicate shm area" thing. But - * a zero new-len is nonsensical. + * Function vma_merge_extend() is called on the + * extension we are adding to the already existing vma, + * vma_merge_extend() will merge this extension with the + * already existing vma (expand operation itself) and + * possibly also with the next vma if it becomes + * adjacent to the expanded vma and otherwise + * compatible. */ - if (!new_len) - return ret; - - if (mmap_write_lock_killable(current->mm)) - return -EINTR; - vma = vma_lookup(mm, addr); + vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta); if (!vma) { - ret = -EFAULT; - goto out; + vrm_uncharge(vrm); + return -ENOMEM; } - /* Don't allow remapping vmas when they have already been sealed */ - if (!can_modify_vma(vma)) { - ret = -EPERM; - goto out; - } + vrm_stat_account(vrm, vrm->delta); + + return 0; +} + +static bool align_hugetlb(struct vma_remap_struct *vrm) +{ + struct hstate *h __maybe_unused = hstate_vma(vrm->vma); + + vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); + vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); + + /* addrs must be huge page aligned */ + if (vrm->addr & ~huge_page_mask(h)) + return false; + if (vrm->new_addr & ~huge_page_mask(h)) + return false; + + /* + * Don't allow remap expansion, because the underlying hugetlb + * reservation is not yet capable to handle split reservation. + */ + if (vrm->new_len > vrm->old_len) + return false; + + vrm_set_delta(vrm); + + return true; +} - if (is_vm_hugetlb_page(vma)) { - struct hstate *h __maybe_unused = hstate_vma(vma); +/* + * We are mremap()'ing without specifying a fixed address to move to, but are + * requesting that the VMA's size be increased. + * + * Try to do so in-place, if this fails, then move the VMA to a new location to + * action the change. + */ +static unsigned long expand_vma(struct vma_remap_struct *vrm) +{ + unsigned long err; + unsigned long addr = vrm->addr; - old_len = ALIGN(old_len, huge_page_size(h)); - new_len = ALIGN(new_len, huge_page_size(h)); + err = resize_is_valid(vrm); + if (err) + return err; - /* addrs must be huge page aligned */ - if (addr & ~huge_page_mask(h)) - goto out; - if (new_addr & ~huge_page_mask(h)) - goto out; + /* + * [addr, old_len) spans precisely to the end of the VMA, so try to + * expand it in-place. + */ + if (vrm_can_expand_in_place(vrm)) { + err = expand_vma_in_place(vrm); + if (err) + return err; /* - * Don't allow remap expansion, because the underlying hugetlb - * reservation is not yet capable to handle split reservation. + * We want to populate the newly expanded portion of the VMA to + * satisfy the expectation that mlock()'ing a VMA maintains all + * of its pages in memory. */ - if (new_len > old_len) - goto out; - } + if (vrm->mlocked) + vrm->new_addr = addr; - if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { - ret = mremap_to(addr, old_len, new_addr, new_len, - &locked, flags, &uf, &uf_unmap_early, - &uf_unmap); - goto out; + /* OK we're done! */ + return addr; } /* - * Always allow a shrinking remap: that just unmaps - * the unnecessary pages.. - * do_vmi_munmap does all the needed commit accounting, and - * unlocks the mmap_lock if so directed. + * We weren't able to just expand or shrink the area, + * we need to create a new one and move it. */ - if (old_len >= new_len) { - VMA_ITERATOR(vmi, mm, addr + new_len); - if (old_len == new_len) { - ret = addr; - goto out; - } + /* We're not allowed to move the VMA, so error out. */ + if (!(vrm->flags & MREMAP_MAYMOVE)) + return -ENOMEM; + + /* Find a new location to move the VMA to. */ + err = vrm_set_new_addr(vrm); + if (err) + return err; + + return move_vma(vrm); +} + +/* + * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the + * first available address to perform the operation. + */ +static unsigned long mremap_at(struct vma_remap_struct *vrm) +{ + unsigned long res; - ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, - &uf_unmap, true); - if (ret) - goto out; + switch (vrm->remap_type) { + case MREMAP_INVALID: + break; + case MREMAP_NO_RESIZE: + /* NO-OP CASE - resizing to the same size. */ + return vrm->addr; + case MREMAP_SHRINK: + /* + * SHRINK CASE. Can always be done in-place. + * + * Simply unmap the shrunken portion of the VMA. This does all + * the needed commit accounting, and we indicate that the mmap + * lock should be dropped. + */ + res = shrink_vma(vrm, /* drop_lock= */true); + if (res) + return res; - ret = addr; - goto out_unlocked; + return vrm->addr; + case MREMAP_EXPAND: + return expand_vma(vrm); } - /* - * Ok, we need to grow.. - */ - ret = resize_is_valid(vma, addr, old_len, new_len, flags); + BUG(); +} + +static unsigned long do_mremap(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long ret; + + ret = check_mremap_params(vrm); if (ret) - goto out; + return ret; - /* old_len exactly to the end of the area.. - */ - if (old_len == vma->vm_end - addr) { - unsigned long delta = new_len - old_len; - - /* can we just expand the current mapping? */ - if (vma_expandable(vma, delta)) { - long pages = delta >> PAGE_SHIFT; - VMA_ITERATOR(vmi, mm, vma->vm_end); - long charged = 0; - - if (vma->vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, pages)) { - ret = -ENOMEM; - goto out; - } - charged = pages; - } + vrm->old_len = PAGE_ALIGN(vrm->old_len); + vrm->new_len = PAGE_ALIGN(vrm->new_len); + vrm_set_delta(vrm); - /* - * Function vma_merge_extend() is called on the - * extension we are adding to the already existing vma, - * vma_merge_extend() will merge this extension with the - * already existing vma (expand operation itself) and - * possibly also with the next vma if it becomes - * adjacent to the expanded vma and otherwise - * compatible. - */ - vma = vma_merge_extend(&vmi, vma, delta); - if (!vma) { - vm_unacct_memory(charged); - ret = -ENOMEM; - goto out; - } + if (mmap_write_lock_killable(mm)) + return -EINTR; + vrm->mmap_locked = true; - vm_stat_account(mm, vma->vm_flags, pages); - if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; - locked = true; - new_addr = addr; - } - ret = addr; - goto out; - } + vma = vrm->vma = vma_lookup(mm, vrm->addr); + if (!vma) { + ret = -EFAULT; + goto out; } - /* - * We weren't able to just expand or shrink the area, - * we need to create a new one and move it.. - */ - ret = -ENOMEM; - if (flags & MREMAP_MAYMOVE) { - unsigned long map_flags = 0; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; - - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, - vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(new_addr)) { - ret = new_addr; - goto out; - } + /* If mseal()'d, mremap() is prohibited. */ + if (!can_modify_vma(vma)) { + ret = -EPERM; + goto out; + } - ret = move_vma(vma, addr, old_len, new_len, new_addr, - &locked, flags, &uf, &uf_unmap); + /* Align to hugetlb page size, if required. */ + if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) { + ret = -EINVAL; + goto out; } + + vrm->remap_type = vrm_remap_type(vrm); + + /* Actually execute mremap. */ + ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); + out: - if (offset_in_page(ret)) - locked = false; - mmap_write_unlock(current->mm); - if (locked && new_len > old_len) - mm_populate(new_addr + old_len, new_len - old_len); -out_unlocked: - userfaultfd_unmap_complete(mm, &uf_unmap_early); - mremap_userfaultfd_complete(&uf, addr, ret, old_len); - userfaultfd_unmap_complete(mm, &uf_unmap); + if (vrm->mmap_locked) { + mmap_write_unlock(mm); + vrm->mmap_locked = false; + + if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len) + mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); + } + + userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); + mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len); + userfaultfd_unmap_complete(mm, vrm->uf_unmap); + return ret; } + +/* + * Expand (or shrink) an existing mapping, potentially moving it at the + * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) + * + * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise + * This option implies MREMAP_MAYMOVE. + */ +SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) +{ + struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; + LIST_HEAD(uf_unmap_early); + LIST_HEAD(uf_unmap); + /* + * There is a deliberate asymmetry here: we strip the pointer tag + * from the old address but leave the new address alone. This is + * for consistency with mmap(), where we prevent the creation of + * aliasing mappings in userspace by leaving the tag bits of the + * mapping address intact. A non-zero tag will cause the subsequent + * range checks to reject the address as invalid. + * + * See Documentation/arch/arm64/tagged-address-abi.rst for more + * information. + */ + struct vma_remap_struct vrm = { + .addr = untagged_addr(addr), + .old_len = old_len, + .new_len = new_len, + .flags = flags, + .new_addr = new_addr, + + .uf = &uf, + .uf_unmap_early = &uf_unmap_early, + .uf_unmap = &uf_unmap, + + .remap_type = MREMAP_INVALID, /* We set later. */ + }; + + return do_mremap(&vrm); +} diff --git a/mm/nommu.c b/mm/nommu.c index 753384666bae..617e7ba8022f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -42,17 +42,11 @@ #include <asm/mmu_context.h> #include "internal.h" -void *high_memory; -EXPORT_SYMBOL(high_memory); -struct page *mem_map; -unsigned long max_mapnr; -EXPORT_SYMBOL(max_mapnr); unsigned long highest_memmap_pfn; int heap_stack_gap = 0; atomic_long_t mmap_pages_allocated; -EXPORT_SYMBOL(mem_map); /* list of mapped, potentially shareable regions */ static struct kmem_cache *vm_region_jar; @@ -1204,7 +1198,7 @@ share: setup_vma_to_mm(vma, current->mm); current->mm->map_count++; /* add the VMA to the tree */ - vma_iter_store(&vmi, vma); + vma_iter_store_new(&vmi, vma); /* we flush the region from the icache only when the first executable * mapping of it is made */ @@ -1369,7 +1363,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, setup_vma_to_mm(vma, mm); setup_vma_to_mm(new, mm); - vma_iter_store(vmi, new); + vma_iter_store_new(vmi, new); mm->map_count++; return 0; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1cf121ad7085..25923cfec9c6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -563,7 +563,7 @@ static bool __oom_reap_task_mm(struct mm_struct *mm) } /* - * Reaps the address space of the give task. + * Reaps the address space of the given task. * * Returns true on success and false if none or part of the address space * has been reclaimed and the caller should retry later. diff --git a/mm/page-writeback.c b/mm/page-writeback.c index eb55ece39c56..18456ddd463b 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -120,29 +120,6 @@ EXPORT_SYMBOL(laptop_mode); struct wb_domain global_wb_domain; -/* consolidated parameters for balance_dirty_pages() and its subroutines */ -struct dirty_throttle_control { -#ifdef CONFIG_CGROUP_WRITEBACK - struct wb_domain *dom; - struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ -#endif - struct bdi_writeback *wb; - struct fprop_local_percpu *wb_completions; - - unsigned long avail; /* dirtyable */ - unsigned long dirty; /* file_dirty + write + nfs */ - unsigned long thresh; /* dirty threshold */ - unsigned long bg_thresh; /* dirty background threshold */ - - unsigned long wb_dirty; /* per-wb counterparts */ - unsigned long wb_thresh; - unsigned long wb_bg_thresh; - - unsigned long pos_ratio; - bool freerun; - bool dirty_exceeded; -}; - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -1095,7 +1072,7 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc) struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); - unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); + unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long wb_thresh = dtc->wb_thresh; unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ @@ -1962,11 +1939,7 @@ free_running: */ if (pause < min_pause) { trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, @@ -1991,11 +1964,7 @@ free_running: pause: trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, @@ -3109,6 +3078,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) int access_ret; VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3ea5bf5c459..f51aa6051a99 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -276,6 +276,7 @@ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; static int watermark_boost_factor __read_mostly = 15000; static int watermark_scale_factor = 10; +int defrag_mode; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -511,9 +512,9 @@ out: static inline unsigned int order_to_pindex(int migratetype, int order) { - bool __maybe_unused movable; #ifdef CONFIG_TRANSPARENT_HUGEPAGE + bool movable; if (order > PAGE_ALLOC_COSTLY_ORDER) { VM_BUG_ON(order != HPAGE_PMD_ORDER); @@ -617,6 +618,10 @@ compaction_capture(struct capture_control *capc, struct page *page, capc->cc->migratetype != MIGRATE_MOVABLE) return false; + if (migratetype != capc->cc->migratetype) + trace_mm_page_alloc_extfrag(page, capc->cc->order, order, + capc->cc->migratetype, migratetype); + capc->page = page; return true; } @@ -658,16 +663,20 @@ static inline void __add_to_free_list(struct page *page, struct zone *zone, bool tail) { struct free_area *area = &zone->free_area[order]; + int nr_pages = 1 << order; VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), migratetype, 1 << order); + get_pageblock_migratetype(page), migratetype, nr_pages); if (tail) list_add_tail(&page->buddy_list, &area->free_list[migratetype]); else list_add(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; + + if (order >= pageblock_order && !is_migrate_isolate(migratetype)) + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); } /* @@ -679,24 +688,34 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int old_mt, int new_mt) { struct free_area *area = &zone->free_area[order]; + int nr_pages = 1 << order; /* Free page moving can fail, so it happens before the type update */ VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), old_mt, 1 << order); + get_pageblock_migratetype(page), old_mt, nr_pages); list_move_tail(&page->buddy_list, &area->free_list[new_mt]); - account_freepages(zone, -(1 << order), old_mt); - account_freepages(zone, 1 << order, new_mt); + account_freepages(zone, -nr_pages, old_mt); + account_freepages(zone, nr_pages, new_mt); + + if (order >= pageblock_order && + is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { + if (!is_migrate_isolate(old_mt)) + nr_pages = -nr_pages; + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); + } } static inline void __del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) { + int nr_pages = 1 << order; + VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), migratetype, 1 << order); + get_pageblock_migratetype(page), migratetype, nr_pages); /* clear reported state and update reported page count */ if (page_reported(page)) @@ -706,6 +725,9 @@ static inline void __del_page_from_free_list(struct page *page, struct zone *zon __ClearPageBuddy(page); set_page_private(page, 0); zone->free_area[order].nr_free--; + + if (order >= pageblock_order && !is_migrate_isolate(migratetype)) + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); } static inline void del_page_from_free_list(struct page *page, struct zone *zone, @@ -950,21 +972,34 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) switch (page - head_page) { case 1: /* the first tail page: these may be in place of ->mapping */ - if (unlikely(folio_entire_mapcount(folio))) { - bad_page(page, "nonzero entire_mapcount"); - goto out; - } if (unlikely(folio_large_mapcount(folio))) { bad_page(page, "nonzero large_mapcount"); goto out; } - if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && + unlikely(atomic_read(&folio->_nr_pages_mapped))) { bad_page(page, "nonzero nr_pages_mapped"); goto out; } - if (unlikely(atomic_read(&folio->_pincount))) { - bad_page(page, "nonzero pincount"); - goto out; + if (IS_ENABLED(CONFIG_MM_ID)) { + if (unlikely(folio->_mm_id_mapcount[0] != -1)) { + bad_page(page, "nonzero mm mapcount 0"); + goto out; + } + if (unlikely(folio->_mm_id_mapcount[1] != -1)) { + bad_page(page, "nonzero mm mapcount 1"); + goto out; + } + } + if (IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { + bad_page(page, "nonzero entire_mapcount"); + goto out; + } + if (unlikely(atomic_read(&folio->_pincount))) { + bad_page(page, "nonzero pincount"); + goto out; + } } break; case 2: @@ -973,7 +1008,22 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) bad_page(page, "on deferred list"); goto out; } + if (!IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { + bad_page(page, "nonzero entire_mapcount"); + goto out; + } + if (unlikely(atomic_read(&folio->_pincount))) { + bad_page(page, "nonzero pincount"); + goto out; + } + } break; + case 3: + /* the third tail page: hugetlb specifics overlap ->mappings */ + if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) + break; + fallthrough; default: if (page->mapping != TAIL_MAPPING) { bad_page(page, "corrupted mapping in tail page"); @@ -1044,6 +1094,84 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); } +#ifdef CONFIG_MEM_ALLOC_PROFILING + +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + set_codetag_empty(&ref); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_add(page, task, nr); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub(&ref, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_sub(page, nr); +} + +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) +{ + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = __pgalloc_tag_get(page); + if (tag) + this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + __always_inline bool free_pages_prepare(struct page *page, unsigned int order) { @@ -1099,8 +1227,12 @@ __always_inline bool free_pages_prepare(struct page *page, if (unlikely(order)) { int i; - if (compound) + if (compound) { page[1].flags &= ~PAGE_FLAGS_SECOND; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + } for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_page_prepare(page, page + i); @@ -1859,47 +1991,6 @@ static void change_pageblock_range(struct page *pageblock_page, } } -/* - * When we are falling back to another migratetype during allocation, try to - * steal extra free pages from the same pageblocks to satisfy further - * allocations, instead of polluting multiple pageblocks. - * - * If we are stealing a relatively large buddy page, it is likely there will - * be more free pages in the pageblock, so try to steal them all. For - * reclaimable and unmovable allocations, we steal regardless of page size, - * as fragmentation caused by those allocations polluting movable pageblocks - * is worse than movable allocations stealing from unmovable and reclaimable - * pageblocks. - */ -static bool can_steal_fallback(unsigned int order, int start_mt) -{ - /* - * Leaving this order check is intended, although there is - * relaxed order check in next check. The reason is that - * we can actually steal whole pageblock if this condition met, - * but, below check doesn't guarantee it and that is just heuristic - * so could be changed anytime. - */ - if (order >= pageblock_order) - return true; - - /* - * Movable pages won't cause permanent fragmentation, so when you alloc - * small pages, you just need to temporarily steal unmovable or - * reclaimable pages that are closest to the request size. After a - * while, memory compaction may occur to form large contiguous pages, - * and the next movable allocation may not need to steal. Unmovable and - * reclaimable allocations need to actually steal pages. - */ - if (order >= pageblock_order / 2 || - start_mt == MIGRATE_RECLAIMABLE || - start_mt == MIGRATE_UNMOVABLE || - page_group_by_mobility_disabled) - return true; - - return false; -} - static inline bool boost_watermark(struct zone *zone) { unsigned long max_boost; @@ -1938,30 +2029,99 @@ static inline bool boost_watermark(struct zone *zone) } /* - * This function implements actual steal behaviour. If order is large enough, we - * can claim the whole pageblock for the requested migratetype. If not, we check - * the pageblock for constituent pages; if at least half of the pages are free - * or compatible, we can still claim the whole block, so pages freed in the - * future will be put on the correct free list. Otherwise, we isolate exactly - * the order we need from the fallback block and leave its migratetype alone. + * When we are falling back to another migratetype during allocation, should we + * try to claim an entire block to satisfy further allocations, instead of + * polluting multiple pageblocks? */ -static struct page * -steal_suitable_fallback(struct zone *zone, struct page *page, - int current_order, int order, int start_type, - unsigned int alloc_flags, bool whole_block) +static bool should_try_claim_block(unsigned int order, int start_mt) { - int free_pages, movable_pages, alike_pages; - unsigned long start_pfn; - int block_type; + /* + * Leaving this order check is intended, although there is + * relaxed order check in next check. The reason is that + * we can actually claim the whole pageblock if this condition met, + * but, below check doesn't guarantee it and that is just heuristic + * so could be changed anytime. + */ + if (order >= pageblock_order) + return true; - block_type = get_pageblock_migratetype(page); + /* + * Above a certain threshold, always try to claim, as it's likely there + * will be more free pages in the pageblock. + */ + if (order >= pageblock_order / 2) + return true; /* - * This can happen due to races and we want to prevent broken - * highatomic accounting. + * Unmovable/reclaimable allocations would cause permanent + * fragmentations if they fell back to allocating from a movable block + * (polluting it), so we try to claim the whole block regardless of the + * allocation size. Later movable allocations can always steal from this + * block, which is less problematic. */ - if (is_migrate_highatomic(block_type)) - goto single_page; + if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) + return true; + + if (page_group_by_mobility_disabled) + return true; + + /* + * Movable pages won't cause permanent fragmentation, so when you alloc + * small pages, we just need to temporarily steal unmovable or + * reclaimable pages that are closest to the request size. After a + * while, memory compaction may occur to form large contiguous pages, + * and the next movable allocation may not need to steal. + */ + return false; +} + +/* + * Check whether there is a suitable fallback freepage with requested order. + * Sets *claim_block to instruct the caller whether it should convert a whole + * pageblock to the returned migratetype. + * If only_claim is true, this function returns fallback_mt only if + * we would do this whole-block claiming. This would help to reduce + * fragmentation due to mixed migratetype pages in one pageblock. + */ +int find_suitable_fallback(struct free_area *area, unsigned int order, + int migratetype, bool only_claim, bool *claim_block) +{ + int i; + int fallback_mt; + + if (area->nr_free == 0) + return -1; + + *claim_block = false; + for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { + fallback_mt = fallbacks[migratetype][i]; + if (free_area_empty(area, fallback_mt)) + continue; + + if (should_try_claim_block(order, migratetype)) + *claim_block = true; + + if (*claim_block || !only_claim) + return fallback_mt; + } + + return -1; +} + +/* + * This function implements actual block claiming behaviour. If order is large + * enough, we can claim the whole pageblock for the requested migratetype. If + * not, we check the pageblock for constituent pages; if at least half of the + * pages are free or compatible, we can still claim the whole block, so pages + * freed in the future will be put on the correct free list. + */ +static struct page * +try_to_claim_block(struct zone *zone, struct page *page, + int current_order, int order, int start_type, + int block_type, unsigned int alloc_flags) +{ + int free_pages, movable_pages, alike_pages; + unsigned long start_pfn; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { @@ -1982,14 +2142,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page, if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); - /* We are not allowed to try stealing from the whole block */ - if (!whole_block) - goto single_page; - /* moving whole block can fail due to zone boundary conditions */ if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, &movable_pages)) - goto single_page; + return NULL; /* * Determine how many pages are compatible with our allocation. @@ -2022,198 +2178,24 @@ steal_suitable_fallback(struct zone *zone, struct page *page, return __rmqueue_smallest(zone, order, start_type); } -single_page: - page_del_and_expand(zone, page, order, current_order, block_type); - return page; -} - -/* - * Check whether there is a suitable fallback freepage with requested order. - * If only_stealable is true, this function returns fallback_mt only if - * we can steal other freepages all together. This would help to reduce - * fragmentation due to mixed migratetype pages in one pageblock. - */ -int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal) -{ - int i; - int fallback_mt; - - if (area->nr_free == 0) - return -1; - - *can_steal = false; - for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { - fallback_mt = fallbacks[migratetype][i]; - if (free_area_empty(area, fallback_mt)) - continue; - - if (can_steal_fallback(order, migratetype)) - *can_steal = true; - - if (!only_stealable) - return fallback_mt; - - if (*can_steal) - return fallback_mt; - } - - return -1; -} - -/* - * Reserve the pageblock(s) surrounding an allocation request for - * exclusive use of high-order atomic allocations if there are no - * empty page blocks that contain a page with a suitable order - */ -static void reserve_highatomic_pageblock(struct page *page, int order, - struct zone *zone) -{ - int mt; - unsigned long max_managed, flags; - - /* - * The number reserved as: minimum is 1 pageblock, maximum is - * roughly 1% of a zone. But if 1% of a zone falls below a - * pageblock size, then don't reserve any pageblocks. - * Check is race-prone but harmless. - */ - if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) - return; - max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); - if (zone->nr_reserved_highatomic >= max_managed) - return; - - spin_lock_irqsave(&zone->lock, flags); - - /* Recheck the nr_reserved_highatomic limit under the lock */ - if (zone->nr_reserved_highatomic >= max_managed) - goto out_unlock; - - /* Yoink! */ - mt = get_pageblock_migratetype(page); - /* Only reserve normal pageblocks (i.e., they can merge with others) */ - if (!migratetype_is_mergeable(mt)) - goto out_unlock; - - if (order < pageblock_order) { - if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) - goto out_unlock; - zone->nr_reserved_highatomic += pageblock_nr_pages; - } else { - change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); - zone->nr_reserved_highatomic += 1 << order; - } - -out_unlock: - spin_unlock_irqrestore(&zone->lock, flags); + return NULL; } /* - * Used when an allocation is about to fail under memory pressure. This - * potentially hurts the reliability of high-order allocations when under - * intense memory pressure but failed atomic allocations should be easier - * to recover from than an OOM. + * Try finding a free buddy page on the fallback list. * - * If @force is true, try to unreserve pageblocks even though highatomic - * pageblock is exhausted. - */ -static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, - bool force) -{ - struct zonelist *zonelist = ac->zonelist; - unsigned long flags; - struct zoneref *z; - struct zone *zone; - struct page *page; - int order; - int ret; - - for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, - ac->nodemask) { - /* - * Preserve at least one pageblock unless memory pressure - * is really high. - */ - if (!force && zone->nr_reserved_highatomic <= - pageblock_nr_pages) - continue; - - spin_lock_irqsave(&zone->lock, flags); - for (order = 0; order < NR_PAGE_ORDERS; order++) { - struct free_area *area = &(zone->free_area[order]); - int mt; - - page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); - if (!page) - continue; - - mt = get_pageblock_migratetype(page); - /* - * In page freeing path, migratetype change is racy so - * we can counter several free pages in a pageblock - * in this loop although we changed the pageblock type - * from highatomic to ac->migratetype. So we should - * adjust the count once. - */ - if (is_migrate_highatomic(mt)) { - unsigned long size; - /* - * It should never happen but changes to - * locking could inadvertently allow a per-cpu - * drain to add pages to MIGRATE_HIGHATOMIC - * while unreserving so be safe and watch for - * underflows. - */ - size = max(pageblock_nr_pages, 1UL << order); - size = min(size, zone->nr_reserved_highatomic); - zone->nr_reserved_highatomic -= size; - } - - /* - * Convert to ac->migratetype and avoid the normal - * pageblock stealing heuristics. Minimally, the caller - * is doing the work and needs the pages. More - * importantly, if the block was always converted to - * MIGRATE_UNMOVABLE or another type then the number - * of pageblocks that cannot be completely freed - * may increase. - */ - if (order < pageblock_order) - ret = move_freepages_block(zone, page, mt, - ac->migratetype); - else { - move_to_free_list(page, zone, order, mt, - ac->migratetype); - change_pageblock_range(page, order, - ac->migratetype); - ret = 1; - } - /* - * Reserving the block(s) already succeeded, - * so this should not fail on zone boundaries. - */ - WARN_ON_ONCE(ret == -1); - if (ret > 0) { - spin_unlock_irqrestore(&zone->lock, flags); - return ret; - } - } - spin_unlock_irqrestore(&zone->lock, flags); - } - - return false; -} - -/* - * Try finding a free buddy page on the fallback list and put it on the free - * list of requested migratetype, possibly along with other pages from the same - * block, depending on fragmentation avoidance heuristics. Returns true if - * fallback was found so that __rmqueue_smallest() can grab it. + * This will attempt to claim a whole pageblock for the requested type + * to ensure grouping of such requests in the future. + * + * If a whole block cannot be claimed, steal an individual page, regressing to + * __rmqueue_smallest() logic to at least break up as little contiguity as + * possible. * * The use of signed ints for order and current_order is a deliberate * deviation from the rest of this file, to make the for loop * condition simpler. + * + * Return the stolen page, or NULL if none can be found. */ static __always_inline struct page * __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, @@ -2224,7 +2206,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, int min_order = order; struct page *page; int fallback_mt; - bool can_steal; + bool claim_block; /* * Do not steal pages from freelists belonging to other pageblocks @@ -2243,49 +2225,40 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); + start_migratetype, false, &claim_block); if (fallback_mt == -1) continue; - /* - * We cannot steal all free pages from the pageblock and the - * requested migratetype is movable. In that case it's better to - * steal and split the smallest available page instead of the - * largest available page, because even if the next movable - * allocation falls back into a different pageblock than this - * one, it won't cause permanent fragmentation. - */ - if (!can_steal && start_migratetype == MIGRATE_MOVABLE - && current_order > order) - goto find_smallest; + if (!claim_block) + break; - goto do_steal; + page = get_page_from_free_area(area, fallback_mt); + page = try_to_claim_block(zone, page, current_order, order, + start_migratetype, fallback_mt, + alloc_flags); + if (page) + goto got_one; } - return NULL; + if (alloc_flags & ALLOC_NOFRAGMENT) + return NULL; -find_smallest: + /* No luck claiming pageblock. Find the smallest fallback page */ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); - if (fallback_mt != -1) - break; - } - - /* - * This should not happen - we already found a suitable fallback - * when looking for the largest page. - */ - VM_BUG_ON(current_order > MAX_PAGE_ORDER); + start_migratetype, false, &claim_block); + if (fallback_mt == -1) + continue; -do_steal: - page = get_page_from_free_area(area, fallback_mt); + page = get_page_from_free_area(area, fallback_mt); + page_del_and_expand(zone, page, order, current_order, fallback_mt); + goto got_one; + } - /* take off list, maybe claim block, expand remainder */ - page = steal_suitable_fallback(zone, page, current_order, order, - start_migratetype, alloc_flags, can_steal); + return NULL; +got_one: trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); @@ -2862,7 +2835,7 @@ void split_page(struct page *page, unsigned int order) set_page_refcounted(page + i); split_page_owner(page, order, 0); pgalloc_tag_split(page_folio(page), order, 0); - split_page_memcg(page, order, 0); + split_page_memcg(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -3156,6 +3129,142 @@ out: return page; } +/* + * Reserve the pageblock(s) surrounding an allocation request for + * exclusive use of high-order atomic allocations if there are no + * empty page blocks that contain a page with a suitable order + */ +static void reserve_highatomic_pageblock(struct page *page, int order, + struct zone *zone) +{ + int mt; + unsigned long max_managed, flags; + + /* + * The number reserved as: minimum is 1 pageblock, maximum is + * roughly 1% of a zone. But if 1% of a zone falls below a + * pageblock size, then don't reserve any pageblocks. + * Check is race-prone but harmless. + */ + if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) + return; + max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); + if (zone->nr_reserved_highatomic >= max_managed) + return; + + spin_lock_irqsave(&zone->lock, flags); + + /* Recheck the nr_reserved_highatomic limit under the lock */ + if (zone->nr_reserved_highatomic >= max_managed) + goto out_unlock; + + /* Yoink! */ + mt = get_pageblock_migratetype(page); + /* Only reserve normal pageblocks (i.e., they can merge with others) */ + if (!migratetype_is_mergeable(mt)) + goto out_unlock; + + if (order < pageblock_order) { + if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) + goto out_unlock; + zone->nr_reserved_highatomic += pageblock_nr_pages; + } else { + change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); + zone->nr_reserved_highatomic += 1 << order; + } + +out_unlock: + spin_unlock_irqrestore(&zone->lock, flags); +} + +/* + * Used when an allocation is about to fail under memory pressure. This + * potentially hurts the reliability of high-order allocations when under + * intense memory pressure but failed atomic allocations should be easier + * to recover from than an OOM. + * + * If @force is true, try to unreserve pageblocks even though highatomic + * pageblock is exhausted. + */ +static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, + bool force) +{ + struct zonelist *zonelist = ac->zonelist; + unsigned long flags; + struct zoneref *z; + struct zone *zone; + struct page *page; + int order; + int ret; + + for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, + ac->nodemask) { + /* + * Preserve at least one pageblock unless memory pressure + * is really high. + */ + if (!force && zone->nr_reserved_highatomic <= + pageblock_nr_pages) + continue; + + spin_lock_irqsave(&zone->lock, flags); + for (order = 0; order < NR_PAGE_ORDERS; order++) { + struct free_area *area = &(zone->free_area[order]); + unsigned long size; + + page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); + if (!page) + continue; + + size = max(pageblock_nr_pages, 1UL << order); + /* + * It should never happen but changes to + * locking could inadvertently allow a per-cpu + * drain to add pages to MIGRATE_HIGHATOMIC + * while unreserving so be safe and watch for + * underflows. + */ + if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) + size = zone->nr_reserved_highatomic; + zone->nr_reserved_highatomic -= size; + + /* + * Convert to ac->migratetype and avoid the normal + * pageblock stealing heuristics. Minimally, the caller + * is doing the work and needs the pages. More + * importantly, if the block was always converted to + * MIGRATE_UNMOVABLE or another type then the number + * of pageblocks that cannot be completely freed + * may increase. + */ + if (order < pageblock_order) + ret = move_freepages_block(zone, page, + MIGRATE_HIGHATOMIC, + ac->migratetype); + else { + move_to_free_list(page, zone, order, + MIGRATE_HIGHATOMIC, + ac->migratetype); + change_pageblock_range(page, order, + ac->migratetype); + ret = 1; + } + /* + * Reserving the block(s) already succeeded, + * so this should not fail on zone boundaries. + */ + WARN_ON_ONCE(ret == -1); + if (ret > 0) { + spin_unlock_irqrestore(&zone->lock, flags); + return ret; + } + } + spin_unlock_irqrestore(&zone->lock, flags); + } + + return false; +} + static inline long __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) { @@ -3359,6 +3468,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) */ alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); + if (defrag_mode) { + alloc_flags |= ALLOC_NOFRAGMENT; + return alloc_flags; + } + #ifdef CONFIG_ZONE_DMA32 if (!zone) return alloc_flags; @@ -3450,7 +3564,7 @@ retry: continue; } - if (no_fallback && nr_online_nodes > 1 && + if (no_fallback && !defrag_mode && nr_online_nodes > 1 && zone != zonelist_zone(ac->preferred_zoneref)) { int local_nid; @@ -3561,7 +3675,7 @@ try_this_zone: * It's possible on a UMA machine to get through all zones that are * fragmented. If avoiding fragmentation, reset and try again. */ - if (no_fallback) { + if (no_fallback && !defrag_mode) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; } @@ -4040,15 +4154,21 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, struct zone *zone; pg_data_t *last_pgdat = NULL; enum zone_type highest_zoneidx = ac->highest_zoneidx; + unsigned int reclaim_order; + + if (defrag_mode) + reclaim_order = max(order, pageblock_order); + else + reclaim_order = order; for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, ac->nodemask) { if (!managed_zone(zone)) continue; - if (last_pgdat != zone->zone_pgdat) { - wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); - last_pgdat = zone->zone_pgdat; - } + if (last_pgdat == zone->zone_pgdat) + continue; + wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); + last_pgdat = zone->zone_pgdat; } } @@ -4098,6 +4218,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); + if (defrag_mode) + alloc_flags |= ALLOC_NOFRAGMENT; + return alloc_flags; } @@ -4480,6 +4603,11 @@ retry: &compaction_retries)) goto retry; + /* Reclaim/compaction failed to prevent the fallback */ + if (defrag_mode) { + alloc_flags &= ALLOC_NOFRAGMENT; + goto retry; + } /* * Deal with possible cpuset update races or zonelist updates to avoid @@ -4901,12 +5029,11 @@ static void ___free_pages(struct page *page, unsigned int order, { /* get PageHead before we drop reference */ int head = PageHead(page); - struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) __free_frozen_pages(page, order, fpi_flags); else if (!head) { - pgalloc_tag_sub_pages(tag, (1 << order) - 1); + pgalloc_tag_sub_pages(page, (1 << order) - 1); while (order-- > 0) __free_frozen_pages(page + (1 << order), order, fpi_flags); @@ -4947,7 +5074,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, split_page_owner(page, order, 0); pgalloc_tag_split(page_folio(page), order, 0); - split_page_memcg(page, order, 0); + split_page_memcg(page, order); while (page < --last) set_page_refcounted(last); @@ -5910,6 +6037,7 @@ static void calculate_totalreserve_pages(void) } } totalreserve_pages = reserve_pages; + trace_mm_calculate_totalreserve_pages(totalreserve_pages); } /* @@ -5939,6 +6067,8 @@ static void setup_per_zone_lowmem_reserve(void) zone->lowmem_reserve[j] = 0; else zone->lowmem_reserve[j] = managed_pages / ratio; + trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, + zone->lowmem_reserve[j]); } } } @@ -6002,6 +6132,7 @@ static void __setup_per_zone_wmarks(void) zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; + trace_mm_setup_per_zone_wmarks(zone); spin_unlock_irqrestore(&zone->lock, flags); } @@ -6275,6 +6406,15 @@ static const struct ctl_table page_alloc_sysctl_table[] = { .extra2 = SYSCTL_THREE_THOUSAND, }, { + .procname = "defrag_mode", + .data = &defrag_mode, + .maxlen = sizeof(defrag_mode), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { .procname = "percpu_pagelist_high_fraction", .data = &percpu_pagelist_high_fraction, .maxlen = sizeof(percpu_pagelist_high_fraction), diff --git a/mm/page_counter.c b/mm/page_counter.c index af23f927611b..661e0f2a5127 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -121,6 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, { struct page_counter *c; bool protection = track_protection(counter); + bool track_failcnt = counter->track_failcnt; for (c = counter; c; c = c->parent) { long new; @@ -146,7 +147,8 @@ bool page_counter_try_charge(struct page_counter *counter, * inaccuracy in the failcnt which is only used * to report stats. */ - data_race(c->failcnt++); + if (track_failcnt) + data_race(c->failcnt++); *fail = c; goto failed; } diff --git a/mm/page_ext.c b/mm/page_ext.c index 641d93f6af4c..c351fdfe9e9a 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -508,6 +508,19 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) #endif /** + * page_ext_lookup() - Lookup a page extension for a PFN. + * @pfn: PFN of the page we're interested in. + * + * Must be called with RCU read lock taken and @pfn must be valid. + * + * Return: NULL if no page_ext exists for this page. + */ +struct page_ext *page_ext_lookup(unsigned long pfn) +{ + return lookup_page_ext(pfn_to_page(pfn)); +} + +/** * page_ext_get() - Get the extended information for a page. * @page: The page we're interested in. * diff --git a/mm/page_idle.c b/mm/page_idle.c index 947c7c7a3728..408aaf29a3ea 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -62,9 +62,14 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio, /* * For PTE-mapped THP, one sub page is referenced, * the whole THP is referenced. + * + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. */ - if (ptep_clear_young_notify(vma, addr, pvmw.pte)) - referenced = true; + if (likely(pte_present(ptep_get(pvmw.pte)))) + referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); + referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) referenced = true; diff --git a/mm/page_io.c b/mm/page_io.c index 9b983de351f9..4bce19df557b 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -638,11 +638,11 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug) if (swap_read_folio_zeromap(folio)) { folio_unlock(folio); goto finish; - } else if (zswap_load(folio)) { - folio_unlock(folio); - goto finish; } + if (zswap_load(folio) != -ENOENT) + goto finish; + /* We have to read from slower devices. Increase zswap protection. */ zswap_folio_swapin(folio); diff --git a/mm/page_owner.c b/mm/page_owner.c index 90e31d0e3ed7..cc4a6916eec6 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -229,17 +229,19 @@ static void dec_stack_record_count(depot_stack_handle_t handle, handle); } -static inline void __update_page_owner_handle(struct page_ext *page_ext, +static inline void __update_page_owner_handle(struct page *page, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) { - int i; + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - for (i = 0; i < (1 << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { page_owner = get_page_owner(page_ext); page_owner->handle = handle; page_owner->order = order; @@ -252,20 +254,22 @@ static inline void __update_page_owner_handle(struct page_ext *page_ext, sizeof(page_owner->comm)); __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); - page_ext = page_ext_next(page_ext); } + rcu_read_unlock(); } -static inline void __update_page_owner_free_handle(struct page_ext *page_ext, +static inline void __update_page_owner_free_handle(struct page *page, depot_stack_handle_t handle, unsigned short order, pid_t pid, pid_t tgid, u64 free_ts_nsec) { - int i; + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - for (i = 0; i < (1 << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { page_owner = get_page_owner(page_ext); /* Only __reset_page_owner() wants to clear the bit */ if (handle) { @@ -275,8 +279,8 @@ static inline void __update_page_owner_free_handle(struct page_ext *page_ext, page_owner->free_ts_nsec = free_ts_nsec; page_owner->free_pid = current->pid; page_owner->free_tgid = current->tgid; - page_ext = page_ext_next(page_ext); } + rcu_read_unlock(); } void __reset_page_owner(struct page *page, unsigned short order) @@ -293,6 +297,7 @@ void __reset_page_owner(struct page *page, unsigned short order) page_owner = get_page_owner(page_ext); alloc_handle = page_owner->handle; + page_ext_put(page_ext); /* * Do not specify GFP_NOWAIT to make gfpflags_allow_spinning() == false @@ -301,9 +306,8 @@ void __reset_page_owner(struct page *page, unsigned short order) * to signal stack_depot to avoid spin_locks. */ handle = save_stack(__GFP_NOWARN); - __update_page_owner_free_handle(page_ext, handle, order, current->pid, + __update_page_owner_free_handle(page, handle, order, current->pid, current->tgid, free_ts_nsec); - page_ext_put(page_ext); if (alloc_handle != early_handle) /* @@ -319,19 +323,13 @@ void __reset_page_owner(struct page *page, unsigned short order) noinline void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) { - struct page_ext *page_ext; u64 ts_nsec = local_clock(); depot_stack_handle_t handle; handle = save_stack(gfp_mask); - - page_ext = page_ext_get(page); - if (unlikely(!page_ext)) - return; - __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1, + __update_page_owner_handle(page, handle, order, gfp_mask, -1, ts_nsec, current->pid, current->tgid, current->comm); - page_ext_put(page_ext); inc_stack_record_count(handle, gfp_mask, 1 << order); } @@ -350,44 +348,42 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) void __split_page_owner(struct page *page, int old_order, int new_order) { - int i; - struct page_ext *page_ext = page_ext_get(page); + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - if (unlikely(!page_ext)) - return; - - for (i = 0; i < (1 << old_order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << old_order, page_ext, iter) { page_owner = get_page_owner(page_ext); page_owner->order = new_order; - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } void __folio_copy_owner(struct folio *newfolio, struct folio *old) { - int i; - struct page_ext *old_ext; - struct page_ext *new_ext; + struct page_ext *page_ext; + struct page_ext_iter iter; struct page_owner *old_page_owner; struct page_owner *new_page_owner; depot_stack_handle_t migrate_handle; - old_ext = page_ext_get(&old->page); - if (unlikely(!old_ext)) + page_ext = page_ext_get(&old->page); + if (unlikely(!page_ext)) return; - new_ext = page_ext_get(&newfolio->page); - if (unlikely(!new_ext)) { - page_ext_put(old_ext); + old_page_owner = get_page_owner(page_ext); + page_ext_put(page_ext); + + page_ext = page_ext_get(&newfolio->page); + if (unlikely(!page_ext)) return; - } - old_page_owner = get_page_owner(old_ext); - new_page_owner = get_page_owner(new_ext); + new_page_owner = get_page_owner(page_ext); + page_ext_put(page_ext); + migrate_handle = new_page_owner->handle; - __update_page_owner_handle(new_ext, old_page_owner->handle, + __update_page_owner_handle(&newfolio->page, old_page_owner->handle, old_page_owner->order, old_page_owner->gfp_mask, old_page_owner->last_migrate_reason, old_page_owner->ts_nsec, old_page_owner->pid, @@ -397,7 +393,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old) * will be freed after migration. Keep them until then as they may be * useful. */ - __update_page_owner_free_handle(new_ext, 0, old_page_owner->order, + __update_page_owner_free_handle(&newfolio->page, 0, old_page_owner->order, old_page_owner->free_pid, old_page_owner->free_tgid, old_page_owner->free_ts_nsec); @@ -406,14 +402,12 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old) * for the new one and the old folio otherwise there will be an imbalance * when subtracting those pages from the stack. */ - for (i = 0; i < (1 << new_page_owner->order); i++) { + rcu_read_lock(); + for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) { + old_page_owner = get_page_owner(page_ext); old_page_owner->handle = migrate_handle; - old_ext = page_ext_next(old_ext); - old_page_owner = get_page_owner(old_ext); } - - page_ext_put(new_ext); - page_ext_put(old_ext); + rcu_read_unlock(); } void pagetypeinfo_showmixedcount_print(struct seq_file *m, @@ -513,7 +507,7 @@ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, rcu_read_lock(); memcg_data = READ_ONCE(page->memcg_data); - if (!memcg_data) + if (!memcg_data || PageTail(page)) goto out_unlock; if (memcg_data & MEMCG_DATA_OBJEXTS) @@ -819,7 +813,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) goto ext_put_continue; /* Found early allocated page */ - __update_page_owner_handle(page_ext, early_handle, 0, 0, + __update_page_owner_handle(page, early_handle, 0, 0, -1, local_clock(), current->pid, current->tgid, current->comm); count++; diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 509c6ef8de40..68109ee93841 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext) */ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) { + struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; - unsigned long i; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - BUG_ON(PageSlab(page)); anon = PageAnon(page); - for (i = 0; i < pgcnt; i++) { + rcu_read_lock(); + for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { @@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0); } - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } /* @@ -102,24 +97,20 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, bool rw) { + struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; - unsigned long i; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - BUG_ON(PageSlab(page)); anon = PageAnon(page); - for (i = 0; i < pgcnt; i++) { + rcu_read_lock(); + for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { @@ -129,9 +120,8 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0); } - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } /* @@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, */ void __page_table_check_zero(struct page *page, unsigned int order) { + struct page_ext_iter iter; struct page_ext *page_ext; - unsigned long i; BUG_ON(PageSlab(page)); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - - for (i = 0; i < (1ul << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_read(&ptc->file_map_count)); - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) @@ -196,9 +181,8 @@ EXPORT_SYMBOL(__page_table_check_pud_clear); /* Whether the swap entry cached writable information */ static inline bool swap_cached_writable(swp_entry_t entry) { - return is_writable_device_exclusive_entry(entry) || - is_writable_device_private_entry(entry) || - is_writable_migration_entry(entry); + return is_writable_device_private_entry(entry) || + is_writable_migration_entry(entry); } static inline void page_table_check_pte_flags(pte_t pte) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 81839a9e74f1..e463c3be934a 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -84,6 +84,7 @@ again: * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking + * @pte_nr: the number of small pages described by @pvmw->pte. * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. @@ -100,7 +101,7 @@ again: * Otherwise, return false. * */ -static bool check_pte(struct page_vma_mapped_walk *pvmw) +static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr) { unsigned long pfn; pte_t ptent = ptep_get(pvmw->pte); @@ -111,8 +112,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) return false; entry = pte_to_swp_entry(ptent); - if (!is_migration_entry(entry) && - !is_device_exclusive_entry(entry)) + if (!is_migration_entry(entry)) return false; pfn = swp_offset_pfn(entry); @@ -133,7 +133,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) pfn = pte_pfn(ptent); } - return (pfn - pvmw->pfn) < pvmw->nr_pages; + if ((pfn + pte_nr - 1) < pvmw->pfn) + return false; + if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) + return false; + return true; } /* Returns true if the two ranges overlap. Careful to not overflow. */ @@ -208,7 +212,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) return false; pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); - if (!check_pte(pvmw)) + if (!check_pte(pvmw, pages_per_huge_page(hstate))) return not_found(pvmw); return true; } @@ -291,7 +295,7 @@ restart: goto next_pte; } this_pte: - if (check_pte(pvmw)) + if (check_pte(pvmw, 1)) return true; next_pte: do { diff --git a/mm/percpu.c b/mm/percpu.c index 7b5835356d1e..b35494c8ede2 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1745,7 +1745,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp = current_gfp_context(gfp); /* whitelisted flags that can be passed to the backing allocators */ pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; + is_atomic = !gfpflags_allow_blocking(gfp); do_warn = !(gfp & __GFP_NOWARN); /* @@ -2191,7 +2191,12 @@ static void pcpu_balance_workfn(struct work_struct *work) * to grow other chunks. This then gives pcpu_reclaim_populated() time * to move fully free chunks to the active list to be freed if * appropriate. + * + * Enforce GFP_NOIO allocations because we have pcpu_alloc users + * constrained to GFP_NOIO/NOFS contexts and they could form lock + * dependency through pcpu_alloc_mutex */ + unsigned int flags = memalloc_noio_save(); mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); @@ -2202,6 +2207,7 @@ static void pcpu_balance_workfn(struct work_struct *work) spin_unlock_irq(&pcpu_lock); mutex_unlock(&pcpu_alloc_mutex); + memalloc_noio_restore(flags); } /** diff --git a/mm/rmap.c b/mm/rmap.c index c6c4d4ea29a7..67bb273dfb80 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -672,7 +672,7 @@ void try_to_unmap_flush_dirty(void) (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, - unsigned long uaddr) + unsigned long start, unsigned long end) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; int batch; @@ -681,7 +681,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, if (!pte_accessible(mm, pteval)) return; - arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); + arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); tlb_ubc->flush_required = true; /* @@ -757,7 +757,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm) } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, - unsigned long uaddr) + unsigned long start, unsigned long end) { } @@ -889,7 +889,7 @@ static bool folio_referenced_one(struct folio *folio, if ((!atomic_read(&vma->vm_mm->mm_users) || check_stable_address_space(vma->vm_mm)) && folio_test_anon(folio) && folio_test_swapbacked(folio) && - !folio_likely_mapped_shared(folio)) { + !folio_maybe_mapped_shared(folio)) { pra->referenced = -1; page_vma_mapped_walk_done(&pvmw); return false; @@ -1044,6 +1044,14 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are clean and not writable from a + * CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (!pte_present(entry)) + continue; if (!pte_dirty(entry) && !pte_write(entry)) continue; @@ -1127,6 +1135,80 @@ int folio_mkclean(struct folio *folio) } EXPORT_SYMBOL_GPL(folio_mkclean); +struct wrprotect_file_state { + int cleaned; + pgoff_t pgoff; + unsigned long pfn; + unsigned long nr_pages; +}; + +static bool mapping_wrprotect_range_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long address, void *arg) +{ + struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; + struct page_vma_mapped_walk pvmw = { + .pfn = state->pfn, + .nr_pages = state->nr_pages, + .pgoff = state->pgoff, + .vma = vma, + .address = address, + .flags = PVMW_SYNC, + }; + + state->cleaned += page_vma_mkclean_one(&pvmw); + + return true; +} + +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked); + +/** + * mapping_wrprotect_range() - Write-protect all mappings in a specified range. + * + * @mapping: The mapping whose reverse mapping should be traversed. + * @pgoff: The page offset at which @pfn is mapped within @mapping. + * @pfn: The PFN of the page mapped in @mapping at @pgoff. + * @nr_pages: The number of physically contiguous base pages spanned. + * + * Traverses the reverse mapping, finding all VMAs which contain a shared + * mapping of the pages in the specified range in @mapping, and write-protects + * them (that is, updates the page tables to mark the mappings read-only such + * that a write protection fault arises when the mappings are written to). + * + * The @pfn value need not refer to a folio, but rather can reference a kernel + * allocation which is mapped into userland. We therefore do not require that + * the page maps to a folio with a valid mapping or index field, rather the + * caller specifies these in @mapping and @pgoff. + * + * Return: the number of write-protected PTEs, or an error. + */ +int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, + unsigned long pfn, unsigned long nr_pages) +{ + struct wrprotect_file_state state = { + .cleaned = 0, + .pgoff = pgoff, + .pfn = pfn, + .nr_pages = nr_pages, + }; + struct rmap_walk_control rwc = { + .arg = (void *)&state, + .rmap_one = mapping_wrprotect_range_one, + .invalid_vma = invalid_mkclean_vma, + }; + + if (!mapping) + return 0; + + __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, + /* locked = */false); + + return state.cleaned; +} +EXPORT_SYMBOL_GPL(mapping_wrprotect_range); + /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) @@ -1160,8 +1242,8 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, } static __always_inline unsigned int __folio_add_rmap(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level, - int *nr_pmdmapped) + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level, int *nr_pmdmapped) { atomic_t *mapped = &folio->_nr_pages_mapped; const int orig_nr_pages = nr_pages; @@ -1176,6 +1258,16 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, break; } + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); + if (nr == orig_nr_pages) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + do { first += atomic_inc_and_test(&page->_mapcount); } while (page++, --nr_pages > 0); @@ -1184,15 +1276,34 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) nr = first; - atomic_add(orig_nr_pages, &folio->_large_mapcount); + folio_add_large_mapcount(folio, orig_nr_pages, vma); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: first = atomic_inc_and_test(&folio->_entire_mapcount); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + if (level == RMAP_LEVEL_PMD && first) + *nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_inc_return_large_mapcount(folio, vma); + if (nr == 1) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { - *nr_pmdmapped = folio_nr_pages(folio); - nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + nr_pages = folio_large_nr_pages(folio); + /* + * We only track PMD mappings of PMD-sized + * folios separately. + */ + if (level == RMAP_LEVEL_PMD) + *nr_pmdmapped = nr_pages; + nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; @@ -1201,7 +1312,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, nr = 0; } } - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, vma); break; } return nr; @@ -1322,7 +1433,7 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); - nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); + nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); if (likely(!folio_test_ksm(folio))) __page_check_anon_rmap(folio, page, vma, address); @@ -1338,15 +1449,32 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, case RMAP_LEVEL_PMD: SetPageAnonExclusive(page); break; + case RMAP_LEVEL_PUD: + /* + * Keep the compiler happy, we don't support anonymous + * PUD mappings. + */ + WARN_ON_ONCE(1); + break; } } + + VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && + atomic_read(&folio->_mapcount) > 0, folio); for (i = 0; i < nr_pages; i++) { struct page *cur_page = page + i; - /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ - VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || - (folio_test_large(folio) && - folio_entire_mapcount(folio) > 1)) && + VM_WARN_ON_FOLIO(folio_test_large(folio) && + folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(cur_page), folio); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) + continue; + + /* + * While PTE-mapping a THP we have a PMD and a PTE + * mapping. + */ + VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && PageAnonExclusive(cur_page), folio); } @@ -1426,14 +1554,11 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { - const int nr = folio_nr_pages(folio); const bool exclusive = flags & RMAP_EXCLUSIVE; - int nr_pmdmapped = 0; + int nr = 1, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); - VM_BUG_ON_VMA(address < vma->vm_start || - address + (nr << PAGE_SHIFT) > vma->vm_end, vma); /* * VM_DROPPABLE mappings don't swap; instead they're just dropped when @@ -1451,29 +1576,35 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } else if (!folio_test_pmd_mappable(folio)) { int i; + nr = folio_large_nr_pages(folio); for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); - /* increment count (starts at -1) */ - atomic_set(&page->_mapcount, 0); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); if (exclusive) SetPageAnonExclusive(page); } - /* increment count (starts at -1) */ - atomic_set(&folio->_large_mapcount, nr - 1); - atomic_set(&folio->_nr_pages_mapped, nr); + folio_set_large_mapcount(folio, nr, vma); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, nr); } else { + nr = folio_large_nr_pages(folio); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); - /* increment count (starts at -1) */ - atomic_set(&folio->_large_mapcount, 0); - atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); + folio_set_large_mapcount(folio, 1, vma); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); if (exclusive) SetPageAnonExclusive(&folio->page); nr_pmdmapped = nr; } + VM_WARN_ON_ONCE(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end); + __folio_mod_stat(folio, nr, nr_pmdmapped); mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } @@ -1486,7 +1617,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); - nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); + nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); __folio_mod_stat(folio, nr, nr_pmdmapped); /* See comments in folio_add_anon_rmap_*() */ @@ -1531,6 +1662,27 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, #endif } +/** + * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_add_file_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} + static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) @@ -1548,7 +1700,20 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; } - atomic_sub(nr_pages, &folio->_large_mapcount); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_nr_pages(folio); + } else { + partially_mapped = nr < folio_large_nr_pages(folio) && + !folio_entire_mapcount(folio); + nr = 0; + } + break; + } + + folio_sub_large_mapcount(folio, nr_pages, vma); do { last += atomic_add_negative(-1, &page->_mapcount); } while (page++, --nr_pages > 0); @@ -1560,13 +1725,32 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, partially_mapped = nr && atomic_read(mapped); break; case RMAP_LEVEL_PMD: - atomic_dec(&folio->_large_mapcount); + case RMAP_LEVEL_PUD: + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + last = atomic_add_negative(-1, &folio->_entire_mapcount); + if (level == RMAP_LEVEL_PMD && last) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_dec_return_large_mapcount(folio, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = last && + nr < folio_large_nr_pages(folio); + nr = 0; + } + break; + } + + folio_dec_large_mapcount(folio, vma); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + nr_pages = folio_large_nr_pages(folio); + if (level == RMAP_LEVEL_PMD) + nr_pmdmapped = nr_pages; + nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; @@ -1640,6 +1824,46 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, #endif } +/** + * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} + +/* We support batch unmapping of PTEs for lazyfree large folios */ +static inline bool can_batch_unmap_folio_ptes(unsigned long addr, + struct folio *folio, pte_t *ptep) +{ + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; + int max_nr = folio_nr_pages(folio); + pte_t pte = ptep_get(ptep); + + if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) + return false; + if (pte_unused(pte)) + return false; + if (pte_pfn(pte) != folio_pfn(folio)) + return false; + + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, + NULL, NULL) == max_nr; +} + /* * @arg: enum ttu_flags will be passed to this argument */ @@ -1648,11 +1872,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, ret = true; pte_t pteval; struct page *subpage; - bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long nr_pages = 1, end_addr; unsigned long pfn; unsigned long hsz = 0; @@ -1702,9 +1927,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } if (!pvmw.pte) { - if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, - folio)) - goto walk_done; + if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) + goto walk_done; + /* + * unmap_huge_pmd_locked has either already marked + * the folio as swap-backed or decided to retain it + * due to GUP or speculative references. + */ + goto walk_abort; + } if (flags & TTU_SPLIT_HUGE_PMD) { /* @@ -1722,7 +1954,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - pfn = pte_pfn(ptep_get(pvmw.pte)); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); + } else { + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + } + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && @@ -1778,24 +2021,33 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); - } else { - flush_cache_page(vma, address, pfn); - /* Nuke the page table entry. */ - if (should_defer_flush(mm, flags)) { - /* - * We clear the PTE but do not flush so potentially - * a remote CPU could still be writing to the folio. - * If the entry was previously clean then the - * architecture must guarantee that a clear->dirty - * transition on a cached TLB entry is written through - * and traps if the PTE is unmapped. - */ - pteval = ptep_get_and_clear(mm, address, pvmw.pte); + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else if (likely(pte_present(pteval))) { + if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && + can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) + nr_pages = folio_nr_pages(folio); + end_addr = address + nr_pages * PAGE_SIZE; + flush_cache_range(vma, address, end_addr); - set_tlb_ubc_flush_pending(mm, pteval, address); - } else { - pteval = ptep_clear_flush(vma, address, pvmw.pte); - } + /* Nuke the page table entry. */ + pteval = get_and_clear_full_ptes(mm, address, pvmw.pte, nr_pages, 0); + /* + * We clear the PTE but do not flush so potentially + * a remote CPU could still be writing to the folio. + * If the entry was previously clean then the + * architecture must guarantee that a clear->dirty + * transition on a cached TLB entry is written through + * and traps if the PTE is unmapped. + */ + if (should_defer_flush(mm, flags)) + set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); + else + flush_tlb_range(vma, address, end_addr); + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else { + pte_clear(mm, address, pvmw.pte); } /* @@ -1805,10 +2057,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); - /* Update high watermark before we lower rss */ update_hiwater_rss(mm); @@ -1822,8 +2070,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } - - } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan @@ -1868,40 +2116,41 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ smp_rmb(); - /* - * The only page refs must be one from isolation - * plus the rmap(s) (dropped by discard:). - */ - if (ref_count == 1 + map_count && - (!folio_test_dirty(folio) || - /* - * Unlike MADV_FREE mappings, VM_DROPPABLE - * ones can be dropped even if they've - * been dirtied. - */ - (vma->vm_flags & VM_DROPPABLE))) { - dec_mm_counter(mm, MM_ANONPAGES); - goto discard; - } - - /* - * If the folio was redirtied, it cannot be - * discarded. Remap the page to page table. - */ - set_pte_at(mm, address, pvmw.pte, pteval); - /* - * Unlike MADV_FREE mappings, VM_DROPPABLE ones - * never get swap backed on failure to drop. - */ - if (!(vma->vm_flags & VM_DROPPABLE)) + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + /* + * redirtied either using the page table or a previously + * obtained GUP reference. + */ + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); folio_set_swapbacked(folio); - goto walk_abort; + goto walk_abort; + } else if (ref_count != 1 + map_count) { + /* + * Additional reference. Could be a GUP reference or any + * speculative reference. GUP users must mark the folio + * dirty if there was a modification. This folio cannot be + * reclaimed right now either way, so act just like nothing + * happened. + * We'll come back here later and detect if the folio was + * dirtied when the additional reference is gone. + */ + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); + goto walk_abort; + } + add_mm_counter(mm, MM_ANONPAGES, -nr_pages); + goto discard; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } + + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); @@ -1926,10 +2175,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (likely(pte_present(pteval))) { + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* @@ -1946,13 +2202,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter_file(folio)); } discard: - if (unlikely(folio_test_hugetlb(folio))) + if (unlikely(folio_test_hugetlb(folio))) { hugetlb_remove_rmap(folio); - else - folio_remove_rmap_pte(folio, subpage, vma); + } else { + folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); + folio_ref_sub(folio, nr_pages - 1); + } if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); + /* We have already batched the entire folio */ + if (nr_pages > 1) + goto walk_done; continue; walk_abort: ret = false; @@ -2013,9 +2274,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, writable, ret = true; pte_t pteval; struct page *subpage; - bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; @@ -2082,24 +2343,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - pfn = pte_pfn(ptep_get(pvmw.pte)); - - if (folio_is_zone_device(folio)) { - /* - * Our PTE is a non-present device exclusive entry and - * calculating the subpage as for the common case would - * result in an invalid pointer. - * - * Since only PAGE_SIZE pages can currently be - * migrated, just set it to page. This will need to be - * changed when hugepage migrations to device private - * memory are supported. - */ - VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); - subpage = &folio->page; + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); } else { - subpage = folio_page(folio, pfn - folio_pfn(folio)); + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } + + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); @@ -2155,7 +2411,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); - } else { + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else if (likely(pte_present(pteval))) { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { @@ -2169,58 +2428,27 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); - set_tlb_ubc_flush_pending(mm, pteval, address); + set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else { + pte_clear(mm, address, pvmw.pte); + writable = is_writable_device_private_entry(pte_to_swp_entry(pteval)); } - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); + VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && + !anon_exclusive, folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (folio_is_device_private(folio)) { - unsigned long pfn = folio_pfn(folio); - swp_entry_t entry; - pte_t swp_pte; - - if (anon_exclusive) - WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, - subpage)); - - /* - * Store the pfn of the page in a special migration - * pte. do_swap_page() will wait until the migration - * pte is removed and then restart fault handling. - */ - entry = pte_to_swp_entry(pteval); - if (is_writable_device_private_entry(entry)) - entry = make_writable_migration_entry(pfn); - else if (anon_exclusive) - entry = make_readable_exclusive_migration_entry(pfn); - else - entry = make_readable_migration_entry(pfn); - swp_pte = swp_entry_to_pte(entry); + if (PageHWPoison(subpage)) { + VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); - /* - * pteval maps a zone device page and is therefore - * a swap pte. - */ - if (pte_swp_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_swp_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); - set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); - trace_set_migration_pte(pvmw.address, pte_val(swp_pte), - folio_order(folio)); - /* - * No need to invalidate here it will synchronize on - * against the special swap migration pte. - */ - } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); @@ -2230,8 +2458,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } - - } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan @@ -2247,6 +2475,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, swp_entry_t entry; pte_t swp_pte; + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, @@ -2257,8 +2490,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } - VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && - !anon_exclusive, subpage); /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { @@ -2283,7 +2514,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ - if (pte_write(pteval)) + if (writable) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) @@ -2292,15 +2523,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, else entry = make_readable_migration_entry( page_to_pfn(subpage)); - if (pte_young(pteval)) - entry = make_migration_entry_young(entry); - if (pte_dirty(pteval)) - entry = make_migration_entry_dirty(entry); - swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (likely(pte_present(pteval))) { + if (pte_young(pteval)) + entry = make_migration_entry_young(entry); + if (pte_dirty(pteval)) + entry = make_migration_entry_dirty(entry); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + swp_pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte, hsz); @@ -2375,190 +2614,139 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) } #ifdef CONFIG_DEVICE_PRIVATE -struct make_exclusive_args { - struct mm_struct *mm; - unsigned long address; - void *owner; - bool valid; -}; - -static bool page_make_device_exclusive_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long address, void *priv) +/** + * make_device_exclusive() - Mark a page for exclusive use by a device + * @mm: mm_struct of associated target process + * @addr: the virtual address to mark for exclusive device access + * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering + * @foliop: folio pointer will be stored here on success. + * + * This function looks up the page mapped at the given address, grabs a + * folio reference, locks the folio and replaces the PTE with special + * device-exclusive PFN swap entry, preventing access through the process + * page tables. The function will return with the folio locked and referenced. + * + * On fault, the device-exclusive entries are replaced with the original PTE + * under folio lock, after calling MMU notifiers. + * + * Only anonymous non-hugetlb folios are supported and the VMA must have + * write permissions such that we can fault in the anonymous page writable + * in order to mark it exclusive. The caller must hold the mmap_lock in read + * mode. + * + * A driver using this to program access from a device must use a mmu notifier + * critical section to hold a device specific lock during programming. Once + * programming is complete it should drop the folio lock and reference after + * which point CPU access to the page will revoke the exclusive access. + * + * Notes: + * #. This function always operates on individual PTEs mapping individual + * pages. PMD-sized THPs are first remapped to be mapped by PTEs before + * the conversion happens on a single PTE corresponding to @addr. + * #. While concurrent access through the process page tables is prevented, + * concurrent access through other page references (e.g., earlier GUP + * invocation) is not handled and not supported. + * #. device-exclusive entries are considered "clean" and "old" by core-mm. + * Device drivers must update the folio state when informed by MMU + * notifiers. + * + * Returns: pointer to mapped page on success, otherwise a negative error. + */ +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, + void *owner, struct folio **foliop) { - struct mm_struct *mm = vma->vm_mm; - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); - struct make_exclusive_args *args = priv; - pte_t pteval; - struct page *subpage; - bool ret = true; struct mmu_notifier_range range; + struct folio *folio, *fw_folio; + struct vm_area_struct *vma; + struct folio_walk fw; + struct page *page; swp_entry_t entry; pte_t swp_pte; - pte_t ptent; - - mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, - vma->vm_mm, address, min(vma->vm_end, - address + folio_size(folio)), - args->owner); - mmu_notifier_invalidate_range_start(&range); - - while (page_vma_mapped_walk(&pvmw)) { - /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_FOLIO(!pvmw.pte, folio); - - ptent = ptep_get(pvmw.pte); - if (!pte_present(ptent)) { - ret = false; - page_vma_mapped_walk_done(&pvmw); - break; - } - - subpage = folio_page(folio, - pte_pfn(ptent) - folio_pfn(folio)); - address = pvmw.address; - - /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(ptent)); - pteval = ptep_clear_flush(vma, address, pvmw.pte); - - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); - - /* - * Check that our target page is still mapped at the expected - * address. - */ - if (args->mm == mm && args->address == address && - pte_write(pteval)) - args->valid = true; - - /* - * Store the pfn of the page in a special migration - * pte. do_swap_page() will wait until the migration - * pte is removed and then restart fault handling. - */ - if (pte_write(pteval)) - entry = make_writable_device_exclusive_entry( - page_to_pfn(subpage)); - else - entry = make_readable_device_exclusive_entry( - page_to_pfn(subpage)); - swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + int ret; - set_pte_at(mm, address, pvmw.pte, swp_pte); + mmap_assert_locked(mm); + addr = PAGE_ALIGN_DOWN(addr); - /* - * There is a reference on the page for the swap entry which has - * been removed, so shouldn't take another. - */ - folio_remove_rmap_pte(folio, subpage, vma); + /* + * Fault in the page writable and try to lock it; note that if the + * address would already be marked for exclusive use by a device, + * the GUP call would undo that first by triggering a fault. + * + * If any other device would already map this page exclusively, the + * fault will trigger a conversion to an ordinary + * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. + */ +retry: + page = get_user_page_vma_remote(mm, addr, + FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, + &vma); + if (IS_ERR(page)) + return page; + folio = page_folio(page); + + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { + folio_put(folio); + return ERR_PTR(-EOPNOTSUPP); } - mmu_notifier_invalidate_range_end(&range); - - return ret; -} - -/** - * folio_make_device_exclusive - Mark the folio exclusively owned by a device. - * @folio: The folio to replace page table entries for. - * @mm: The mm_struct where the folio is expected to be mapped. - * @address: Address where the folio is expected to be mapped. - * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks - * - * Tries to remove all the page table entries which are mapping this - * folio and replace them with special device exclusive swap entries to - * grant a device exclusive access to the folio. - * - * Context: Caller must hold the folio lock. - * Return: false if the page is still mapped, or if it could not be unmapped - * from the expected address. Otherwise returns true (success). - */ -static bool folio_make_device_exclusive(struct folio *folio, - struct mm_struct *mm, unsigned long address, void *owner) -{ - struct make_exclusive_args args = { - .mm = mm, - .address = address, - .owner = owner, - .valid = false, - }; - struct rmap_walk_control rwc = { - .rmap_one = page_make_device_exclusive_one, - .done = folio_not_mapped, - .anon_lock = folio_lock_anon_vma_read, - .arg = &args, - }; + ret = folio_lock_killable(folio); + if (ret) { + folio_put(folio); + return ERR_PTR(ret); + } /* - * Restrict to anonymous folios for now to avoid potential writeback - * issues. + * Inform secondary MMUs that we are going to convert this PTE to + * device-exclusive, such that they unmap it now. Note that the + * caller must filter this event out to prevent livelocks. */ - if (!folio_test_anon(folio)) - return false; - - rmap_walk(folio, &rwc); + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, + mm, addr, addr + PAGE_SIZE, owner); + mmu_notifier_invalidate_range_start(&range); - return args.valid && !folio_mapcount(folio); -} + /* + * Let's do a second walk and make sure we still find the same page + * mapped writable. Note that any page of an anonymous folio can + * only be mapped writable using exactly one PTE ("exclusive"), so + * there cannot be other mappings. + */ + fw_folio = folio_walk_start(&fw, vma, addr, 0); + if (fw_folio != folio || fw.page != page || + fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { + if (fw_folio) + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); + folio_unlock(folio); + folio_put(folio); + goto retry; + } -/** - * make_device_exclusive_range() - Mark a range for exclusive use by a device - * @mm: mm_struct of associated target process - * @start: start of the region to mark for exclusive device access - * @end: end address of region - * @pages: returns the pages which were successfully marked for exclusive access - * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering - * - * Returns: number of pages found in the range by GUP. A page is marked for - * exclusive access only if the page pointer is non-NULL. - * - * This function finds ptes mapping page(s) to the given address range, locks - * them and replaces mappings with special swap entries preventing userspace CPU - * access. On fault these entries are replaced with the original mapping after - * calling MMU notifiers. - * - * A driver using this to program access from a device must use a mmu notifier - * critical section to hold a device specific lock during programming. Once - * programming is complete it should drop the page lock and reference after - * which point CPU access to the page will revoke the exclusive access. - */ -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, - unsigned long end, struct page **pages, - void *owner) -{ - long npages = (end - start) >> PAGE_SHIFT; - long i; - - npages = get_user_pages_remote(mm, start, npages, - FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, - pages, NULL); - if (npages < 0) - return npages; - - for (i = 0; i < npages; i++, start += PAGE_SIZE) { - struct folio *folio = page_folio(pages[i]); - if (PageTail(pages[i]) || !folio_trylock(folio)) { - folio_put(folio); - pages[i] = NULL; - continue; - } + /* Nuke the page table entry so we get the uptodate dirty bit. */ + flush_cache_page(vma, addr, page_to_pfn(page)); + fw.pte = ptep_clear_flush(vma, addr, fw.ptep); - if (!folio_make_device_exclusive(folio, mm, start, owner)) { - folio_unlock(folio); - folio_put(folio); - pages[i] = NULL; - } - } + /* Set the dirty flag on the folio now the PTE is gone. */ + if (pte_dirty(fw.pte)) + folio_mark_dirty(folio); - return npages; + /* + * Store the pfn of the page in a special device-exclusive PFN swap PTE. + * do_swap_page() will trigger the conversion back while holding the + * folio lock. + */ + entry = make_device_exclusive_entry(page_to_pfn(page)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(fw.pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + /* The pte is writable, uffd-wp does not apply. */ + set_pte_at(mm, addr, fw.ptep, swp_pte); + + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); + *foliop = folio; + return page; } -EXPORT_SYMBOL_GPL(make_device_exclusive_range); +EXPORT_SYMBOL_GPL(make_device_exclusive); #endif void __put_anon_vma(struct anon_vma *anon_vma) @@ -2653,35 +2841,37 @@ static void rmap_walk_anon(struct folio *folio, anon_vma_unlock_read(anon_vma); } -/* - * rmap_walk_file - do something to file page using the object-based rmap method - * @folio: the folio to be handled - * @rwc: control variable according to each walk type - * @locked: caller holds relevant rmap lock +/** + * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping + * of a page mapped within a specified page cache object at a specified offset. * - * Find all the mappings of a folio using the mapping pointer and the vma chains - * contained in the address_space struct it points to. + * @folio: Either the folio whose mappings to traverse, or if NULL, + * the callbacks specified in @rwc will be configured such + * as to be able to look up mappings correctly. + * @mapping: The page cache object whose mapping VMAs we intend to + * traverse. If @folio is non-NULL, this should be equal to + * folio_mapping(folio). + * @pgoff_start: The offset within @mapping of the page which we are + * looking up. If @folio is non-NULL, this should be equal + * to folio_pgoff(folio). + * @nr_pages: The number of pages mapped by the mapping. If @folio is + * non-NULL, this should be equal to folio_nr_pages(folio). + * @rwc: The reverse mapping walk control object describing how + * the traversal should proceed. + * @locked: Is the @mapping already locked? If not, we acquire the + * lock. */ -static void rmap_walk_file(struct folio *folio, - struct rmap_walk_control *rwc, bool locked) +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked) { - struct address_space *mapping = folio_mapping(folio); - pgoff_t pgoff_start, pgoff_end; + pgoff_t pgoff_end = pgoff_start + nr_pages - 1; struct vm_area_struct *vma; - /* - * The page lock not only makes sure that page->mapping cannot - * suddenly be NULLified by truncation, it makes sure that the - * structure at mapping cannot be freed and reused yet, - * so we can safely take mapping->i_mmap_rwsem. - */ - VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); + VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); + VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); - if (!mapping) - return; - - pgoff_start = folio_pgoff(folio); - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; @@ -2696,8 +2886,7 @@ static void rmap_walk_file(struct folio *folio, lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_address(vma, pgoff_start, - folio_nr_pages(folio)); + unsigned long address = vma_address(vma, pgoff_start, nr_pages); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2710,12 +2899,38 @@ lookup: if (rwc->done && rwc->done(folio)) goto done; } - done: if (!locked) i_mmap_unlock_read(mapping); } +/* + * rmap_walk_file - do something to file page using the object-based rmap method + * @folio: the folio to be handled + * @rwc: control variable according to each walk type + * @locked: caller holds relevant rmap lock + * + * Find all the mappings of a folio using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + */ +static void rmap_walk_file(struct folio *folio, + struct rmap_walk_control *rwc, bool locked) +{ + /* + * The folio lock not only makes sure that folio->mapping cannot + * suddenly be NULLified by truncation, it makes sure that the structure + * at mapping cannot be freed and reused yet, so we can safely take + * mapping->i_mmap_rwsem. + */ + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (!folio->mapping) + return; + + __rmap_walk_file(folio, folio->mapping, folio->index, + folio_nr_pages(folio), rwc, locked); +} + void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) diff --git a/mm/shmem.c b/mm/shmem.c index ab61c8bb20e1..99327c30507c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -86,7 +86,6 @@ static struct vfsmount *shm_mnt __ro_after_init; #include "internal.h" -#define BLOCKS_PER_PAGE (PAGE_SIZE/512) #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) /* Pretend that each entry is of this size in directory's i_size */ @@ -526,9 +525,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, * enables huge pages for the mount; * SHMEM_HUGE_WITHIN_SIZE: * only allocate huge pages if the page will be fully within i_size, - * also respect fadvise()/madvise() hints; + * also respect madvise() hints; * SHMEM_HUGE_ADVISE: - * only allocate huge pages if requested with fadvise()/madvise(); + * only allocate huge pages if requested with madvise(); */ #define SHMEM_HUGE_NEVER 0 @@ -591,6 +590,28 @@ shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t w return order > 0 ? BIT(order + 1) - 1 : 0; } +static unsigned int shmem_get_orders_within_size(struct inode *inode, + unsigned long within_size_orders, pgoff_t index, + loff_t write_end) +{ + pgoff_t aligned_index; + unsigned long order; + loff_t i_size; + + order = highest_order(within_size_orders); + while (within_size_orders) { + aligned_index = round_up(index + 1, 1 << order); + i_size = max(write_end, i_size_read(inode)); + i_size = round_up(i_size, PAGE_SIZE); + if (i_size >> PAGE_SHIFT >= aligned_index) + return within_size_orders; + + order = next_order(&within_size_orders, order); + } + + return 0; +} + static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, loff_t write_end, bool shmem_huge_force, struct vm_area_struct *vma, @@ -599,9 +620,6 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : BIT(HPAGE_PMD_ORDER); unsigned long within_size_orders; - unsigned int order; - pgoff_t aligned_index; - loff_t i_size; if (!S_ISREG(inode->i_mode)) return 0; @@ -635,16 +653,11 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index within_size_orders = shmem_mapping_size_orders(inode->i_mapping, index, write_end); - order = highest_order(within_size_orders); - while (within_size_orders) { - aligned_index = round_up(index + 1, 1 << order); - i_size = max(write_end, i_size_read(inode)); - i_size = round_up(i_size, PAGE_SIZE); - if (i_size >> PAGE_SHIFT >= aligned_index) - return within_size_orders; + within_size_orders = shmem_get_orders_within_size(inode, within_size_orders, + index, write_end); + if (within_size_orders > 0) + return within_size_orders; - order = next_order(&within_size_orders, order); - } fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) @@ -1380,9 +1393,9 @@ static void shmem_evict_inode(struct inode *inode) #endif } -static int shmem_find_swap_entries(struct address_space *mapping, - pgoff_t start, struct folio_batch *fbatch, - pgoff_t *indices, unsigned int type) +static unsigned int shmem_find_swap_entries(struct address_space *mapping, + pgoff_t start, struct folio_batch *fbatch, + pgoff_t *indices, unsigned int type) { XA_STATE(xas, &mapping->i_pages, start); struct folio *folio; @@ -1415,7 +1428,7 @@ static int shmem_find_swap_entries(struct address_space *mapping, } rcu_read_unlock(); - return xas.xa_index; + return folio_batch_count(fbatch); } /* @@ -1462,8 +1475,8 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type) do { folio_batch_init(&fbatch); - shmem_find_swap_entries(mapping, start, &fbatch, indices, type); - if (folio_batch_count(&fbatch) == 0) { + if (!shmem_find_swap_entries(mapping, start, &fbatch, + indices, type)) { ret = 0; break; } @@ -1533,7 +1546,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); - swp_entry_t swap; pgoff_t index; int nr_pages; bool split = false; @@ -1615,14 +1627,6 @@ try_split: folio_mark_uptodate(folio); } - swap = folio_alloc_swap(folio); - if (!swap.val) { - if (nr_pages > 1) - goto try_split; - - goto redirty; - } - /* * Add inode to shmem_unuse()'s list of swapped-out inodes, * if it's not already there. Do it now before the folio is @@ -1635,20 +1639,20 @@ try_split: if (list_empty(&info->swaplist)) list_add(&info->swaplist, &shmem_swaplist); - if (add_to_swap_cache(folio, swap, - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, - NULL) == 0) { + if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) { shmem_recalc_inode(inode, 0, nr_pages); - swap_shmem_alloc(swap, nr_pages); - shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); + swap_shmem_alloc(folio->swap, nr_pages); + shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap)); mutex_unlock(&shmem_swaplist_mutex); BUG_ON(folio_mapped(folio)); return swap_writepage(&folio->page, wbc); } + list_del_init(&info->swaplist); mutex_unlock(&shmem_swaplist_mutex); - put_swap_folio(folio, swap); + if (nr_pages > 1) + goto try_split; redirty: folio_mark_dirty(folio); if (wbc->for_reclaim) @@ -1757,10 +1761,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma ? vma->vm_flags : 0; - pgoff_t aligned_index; unsigned int global_orders; - loff_t i_size; - int order; if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) return 0; @@ -1786,17 +1787,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, return READ_ONCE(huge_shmem_orders_inherit); /* Allow mTHP that will be fully within i_size. */ - order = highest_order(within_size_orders); - while (within_size_orders) { - aligned_index = round_up(index + 1, 1 << order); - i_size = round_up(i_size_read(inode), PAGE_SIZE); - if (i_size >> PAGE_SHIFT >= aligned_index) { - mask |= within_size_orders; - break; - } - - order = next_order(&within_size_orders, order); - } + mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0); if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_shmem_orders_madvise); @@ -2017,7 +2008,7 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode, __folio_set_swapbacked(new); new->swap = entry; - mem_cgroup_swapin_uncharge_swap(entry, nr_pages); + memcg1_swapin(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(new, shadow); @@ -2162,15 +2153,16 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, { struct address_space *mapping = inode->i_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); - void *alloced_shadow = NULL; - int alloced_order = 0, i; + int split_order = 0, entry_order; + int i; /* Convert user data gfp flags to xarray node gfp flags */ gfp &= GFP_RECLAIM_MASK; for (;;) { - int order = -1, split_order = 0; void *old = NULL; + int cur_order; + pgoff_t swap_index; xas_lock_irq(&xas); old = xas_load(&xas); @@ -2179,60 +2171,56 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, goto unlock; } - order = xas_get_order(&xas); + entry_order = xas_get_order(&xas); - /* Swap entry may have changed before we re-acquire the lock */ - if (alloced_order && - (old != alloced_shadow || order != alloced_order)) { - xas_destroy(&xas); - alloced_order = 0; - } + if (!entry_order) + goto unlock; /* Try to split large swap entry in pagecache */ - if (order > 0) { - if (!alloced_order) { - split_order = order; + cur_order = entry_order; + swap_index = round_down(index, 1 << entry_order); + + split_order = xas_try_split_min_order(cur_order); + + while (cur_order > 0) { + pgoff_t aligned_index = + round_down(index, 1 << cur_order); + pgoff_t swap_offset = aligned_index - swap_index; + + xas_set_order(&xas, index, split_order); + xas_try_split(&xas, old, cur_order); + if (xas_error(&xas)) goto unlock; - } - xas_split(&xas, old, order); /* * Re-set the swap entry after splitting, and the swap * offset of the original large entry must be continuous. */ - for (i = 0; i < 1 << order; i++) { - pgoff_t aligned_index = round_down(index, 1 << order); + for (i = 0; i < 1 << cur_order; + i += (1 << split_order)) { swp_entry_t tmp; - tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); + tmp = swp_entry(swp_type(swap), + swp_offset(swap) + swap_offset + + i); __xa_store(&mapping->i_pages, aligned_index + i, swp_to_radix_entry(tmp), 0); } + cur_order = split_order; + split_order = xas_try_split_min_order(split_order); } unlock: xas_unlock_irq(&xas); - /* split needed, alloc here and retry. */ - if (split_order) { - xas_split_alloc(&xas, old, split_order, gfp); - if (xas_error(&xas)) - goto error; - alloced_shadow = old; - alloced_order = split_order; - xas_reset(&xas); - continue; - } - if (!xas_nomem(&xas, gfp)) break; } -error: if (xas_error(&xas)) return xas_error(&xas); - return alloced_order; + return entry_order; } /* @@ -3302,8 +3290,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, if (ret) return ret; - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { + if (folio_contain_hwpoisoned_page(folio)) { folio_unlock(folio); folio_put(folio); return -EIO; @@ -5674,19 +5661,19 @@ static int __init setup_thp_shmem(char *str) THP_ORDERS_ALL_FILE_DEFAULT); } - if (start == -EINVAL) { + if (start < 0) { pr_err("invalid size %s in thp_shmem boot parameter\n", start_size); goto err; } - if (end == -EINVAL) { + if (end < 0) { pr_err("invalid size %s in thp_shmem boot parameter\n", end_size); goto err; } - if (start < 0 || end < 0 || start > end) + if (start > end) goto err; nr = end - start + 1; @@ -5853,7 +5840,7 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, * underlying inode. So users of this interface must do LSM checks at a * higher layer. The users are the big_key and shm implementations. LSM * checks are provided at the key or shm level rather than the inode. - * @name: name for dentry (to be seen in /proc/<pid>/maps + * @name: name for dentry (to be seen in /proc/<pid>/maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ @@ -5865,7 +5852,7 @@ EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); /** * shmem_file_setup - get an unlinked file living in tmpfs - * @name: name for dentry (to be seen in /proc/<pid>/maps + * @name: name for dentry (to be seen in /proc/<pid>/maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ @@ -5878,7 +5865,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup); /** * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs * @mnt: the tmpfs mount where the file will be created - * @name: name for dentry (to be seen in /proc/<pid>/maps + * @name: name for dentry (to be seen in /proc/<pid>/maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ diff --git a/mm/show_mem.c b/mm/show_mem.c index 43afb56abbd3..6af13bcd2ab3 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -260,6 +260,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z " pagetables:%lukB" " sec_pagetables:%lukB" " all_unreclaimable? %s" + " Balloon:%lukB" "\n", pgdat->node_id, K(node_page_state(pgdat, NR_ACTIVE_ANON)), @@ -285,7 +286,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z #endif K(node_page_state(pgdat, NR_PAGETABLE)), K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), - str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)); + str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES), + K(node_page_state(pgdat, NR_BALLOON_PAGES))); } for_each_populated_zone(zone) { diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index 794bd433cce0..20eaee3e97f7 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -214,10 +214,14 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) ret = debugfs_change_name(shrinker->debugfs_entry, "%s-%d", shrinker->name, shrinker->debugfs_id); + if (ret) { + shrinker->name = old; + kfree_const(new); + } else { + kfree_const(old); + } mutex_unlock(&shrinker_mutex); - kfree_const(old); - return ret; } EXPORT_SYMBOL(shrinker_debugfs_rename); diff --git a/mm/slub.c b/mm/slub.c index 5eac408e818e..b46f87662e71 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2023,7 +2023,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; } -static inline void free_slab_obj_exts(struct slab *slab) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts; @@ -2100,33 +2101,37 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return slab_obj_exts(slab) + obj_to_index(s, slab, p); } -static inline void -alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) { - struct slabobj_ext *obj_exts; + struct slabobj_ext *obj_exts; - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); - /* - * Currently obj_exts is used only for allocation profiling. - * If other users appear then mem_alloc_profiling_enabled() - * check should be added before alloc_tag_add(). - */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); - } + obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + /* + * Currently obj_exts is used only for allocation profiling. + * If other users appear then mem_alloc_profiling_enabled() + * check should be added before alloc_tag_add(). + */ + if (likely(obj_exts)) + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); } static inline void -alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, - int objects) +alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +{ + if (need_slab_obj_ext()) + __alloc_tagging_slab_alloc_hook(s, object, flags); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) { struct slabobj_ext *obj_exts; int i; - if (!mem_alloc_profiling_enabled()) - return; - /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) return; @@ -2142,6 +2147,14 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, } } +static inline void +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) +{ + if (mem_alloc_profiling_enabled()) + __alloc_tagging_slab_free_hook(s, slab, p, objects); +} + #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 3287ebadd167..fd2ab5118e13 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -30,6 +30,15 @@ #include <asm/dma.h> #include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#include "hugetlb_vmemmap.h" + +/* + * Flags for vmemmap_populate_range and friends. + */ +/* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */ +#define VMEMMAP_POPULATE_PAGEREF 0x0001 #include "internal.h" @@ -144,17 +153,18 @@ void __meminit vmemmap_verify(pte_t *pte, int node, pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, unsigned long flags) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(ptep_get(pte))) { pte_t entry; void *p; - if (!reuse) { + if (ptpfn == (unsigned long)-1) { p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); if (!p) return NULL; + ptpfn = PHYS_PFN(__pa(p)); } else { /* * When a PTE/PMD entry is freed from the init_mm @@ -165,10 +175,10 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, * and through vmemmap_populate_compound_pages() when * slab is available. */ - get_page(reuse); - p = page_to_virt(reuse); + if (flags & VMEMMAP_POPULATE_PAGEREF) + get_page(pfn_to_page(ptpfn)); } - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); + entry = pfn_pte(ptpfn, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; @@ -238,7 +248,8 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, + unsigned long flags) { pgd_t *pgd; p4d_t *p4d; @@ -258,7 +269,7 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return NULL; - pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); + pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags); if (!pte) return NULL; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); @@ -269,13 +280,15 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, static int __meminit vmemmap_populate_range(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, + unsigned long flags) { unsigned long addr = start; pte_t *pte; for (; addr < end; addr += PAGE_SIZE) { - pte = vmemmap_populate_address(addr, node, altmap, reuse); + pte = vmemmap_populate_address(addr, node, altmap, + ptpfn, flags); if (!pte) return -ENOMEM; } @@ -286,7 +299,107 @@ static int __meminit vmemmap_populate_range(unsigned long start, int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_range(start, end, node, altmap, NULL); + return vmemmap_populate_range(start, end, node, altmap, -1, 0); +} + +/* + * Undo populate_hvo, and replace it with a normal base page mapping. + * Used in memory init in case a HVO mapping needs to be undone. + * + * This can happen when it is discovered that a memblock allocated + * hugetlb page spans multiple zones, which can only be verified + * after zones have been initialized. + * + * We know that: + * 1) The first @headsize / PAGE_SIZE vmemmap pages were individually + * allocated through memblock, and mapped. + * + * 2) The rest of the vmemmap pages are mirrors of the last head page. + */ +int __meminit vmemmap_undo_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + unsigned long maddr, pfn; + pte_t *pte; + int headpages; + + /* + * Should only be called early in boot, so nothing will + * be accessing these page structures. + */ + WARN_ON(!early_boot_irqs_disabled); + + headpages = headsize >> PAGE_SHIFT; + + /* + * Clear mirrored mappings for tail page structs. + */ + for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + pte_clear(&init_mm, maddr, pte); + } + + /* + * Clear and free mappings for head page and first tail page + * structs. + */ + for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + pfn = pte_pfn(ptep_get(pte)); + pte_clear(&init_mm, maddr, pte); + memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE); + } + + flush_tlb_kernel_range(addr, end); + + return vmemmap_populate(addr, end, node, NULL); +} + +/* + * Write protect the mirrored tail page structs for HVO. This will be + * called from the hugetlb code when gathering and initializing the + * memblock allocated gigantic pages. The write protect can't be + * done earlier, since it can't be guaranteed that the reserved + * page structures will not be written to during initialization, + * even if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. + * + * The PTEs are known to exist, and nothing else should be touching + * these pages. The caller is responsible for any TLB flushing. + */ +void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + unsigned long maddr; + pte_t *pte; + + for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + ptep_set_wrprotect(&init_mm, maddr, pte); + } +} + +/* + * Populate vmemmap pages HVO-style. The first page contains the head + * page and needed tail pages, the other ones are mirrors of the first + * page. + */ +int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + pte_t *pte; + unsigned long maddr; + + for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) { + pte = vmemmap_populate_address(maddr, node, NULL, -1, 0); + if (!pte) + return -ENOMEM; + } + + /* + * Reuse the last page struct page mapped above for the rest. + */ + return vmemmap_populate_range(maddr, end, node, NULL, + pte_pfn(ptep_get(pte)), 0); } void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, @@ -409,7 +522,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, * with just tail struct pages. */ return vmemmap_populate_range(start, end, node, NULL, - pte_page(ptep_get(pte))); + pte_pfn(ptep_get(pte)), + VMEMMAP_POPULATE_PAGEREF); } size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); @@ -417,13 +531,13 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, unsigned long next, last = addr + size; /* Populate the head page vmemmap page */ - pte = vmemmap_populate_address(addr, node, NULL, NULL); + pte = vmemmap_populate_address(addr, node, NULL, -1, 0); if (!pte) return -ENOMEM; /* Populate the tail pages vmemmap page */ next = addr + PAGE_SIZE; - pte = vmemmap_populate_address(next, node, NULL, NULL); + pte = vmemmap_populate_address(next, node, NULL, -1, 0); if (!pte) return -ENOMEM; @@ -433,7 +547,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, */ next += PAGE_SIZE; rc = vmemmap_populate_range(next, last, node, NULL, - pte_page(ptep_get(pte))); + pte_pfn(ptep_get(pte)), + VMEMMAP_POPULATE_PAGEREF); if (rc) return -ENOMEM; } @@ -470,3 +585,28 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn, return pfn_to_page(pfn); } + +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +/* + * This is called just before initializing sections for a NUMA node. + * Any special initialization that needs to be done before the + * generic initialization can be done from here. Sections that + * are initialized in hooks called from here will be skipped by + * the generic initialization. + */ +void __init sparse_vmemmap_init_nid_early(int nid) +{ + hugetlb_vmemmap_init_early(nid); +} + +/* + * This is called just before the initialization of page structures + * through memmap_init. Zones are now initialized, so any work that + * needs to be done that needs zone information can be done from + * here. + */ +void __init sparse_vmemmap_init_nid_late(int nid) +{ + hugetlb_vmemmap_init_late(nid); +} +#endif diff --git a/mm/sparse.c b/mm/sparse.c index 133b033d0cba..3c012cf83cc2 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -170,11 +170,6 @@ static void __section_mark_present(struct mem_section *ms, ms->section_mem_map |= SECTION_MARKED_PRESENT; } -#define for_each_present_section_nr(start, section_nr) \ - for (section_nr = next_present_section_nr(start-1); \ - section_nr != -1; \ - section_nr = next_present_section_nr(section_nr)) - static inline unsigned long first_present_section_nr(void) { return next_present_section_nr(-1); @@ -408,13 +403,13 @@ static void __init check_usemap_section_nr(int nid, #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_SPARSEMEM_VMEMMAP -static unsigned long __init section_map_size(void) +unsigned long __init section_map_size(void) { return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); } #else -static unsigned long __init section_map_size(void) +unsigned long __init section_map_size(void) { return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); } @@ -495,6 +490,44 @@ void __weak __meminit vmemmap_populate_print_last(void) { } +static void *sparse_usagebuf __meminitdata; +static void *sparse_usagebuf_end __meminitdata; + +/* + * Helper function that is used for generic section initialization, and + * can also be used by any hooks added above. + */ +void __init sparse_init_early_section(int nid, struct page *map, + unsigned long pnum, unsigned long flags) +{ + BUG_ON(!sparse_usagebuf || sparse_usagebuf >= sparse_usagebuf_end); + check_usemap_section_nr(nid, sparse_usagebuf); + sparse_init_one_section(__nr_to_section(pnum), pnum, map, + sparse_usagebuf, SECTION_IS_EARLY | flags); + sparse_usagebuf = (void *)sparse_usagebuf + mem_section_usage_size(); +} + +static int __init sparse_usage_init(int nid, unsigned long map_count) +{ + unsigned long size; + + size = mem_section_usage_size() * map_count; + sparse_usagebuf = sparse_early_usemaps_alloc_pgdat_section( + NODE_DATA(nid), size); + if (!sparse_usagebuf) { + sparse_usagebuf_end = NULL; + return -ENOMEM; + } + + sparse_usagebuf_end = sparse_usagebuf + size; + return 0; +} + +static void __init sparse_usage_fini(void) +{ + sparse_usagebuf = sparse_usagebuf_end = NULL; +} + /* * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) * And number of present sections in this node is map_count. @@ -503,47 +536,54 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count) { - struct mem_section_usage *usage; unsigned long pnum; struct page *map; + struct mem_section *ms; - usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), - mem_section_usage_size() * map_count); - if (!usage) { + if (sparse_usage_init(nid, map_count)) { pr_err("%s: node[%d] usemap allocation failed", __func__, nid); goto failed; } + sparse_buffer_init(map_count * section_map_size(), nid); + + sparse_vmemmap_init_nid_early(nid); + for_each_present_section_nr(pnum_begin, pnum) { unsigned long pfn = section_nr_to_pfn(pnum); if (pnum >= pnum_end) break; - map = __populate_section_memmap(pfn, PAGES_PER_SECTION, - nid, NULL, NULL); - if (!map) { - pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", - __func__, nid); - pnum_begin = pnum; - sparse_buffer_fini(); - goto failed; + ms = __nr_to_section(pnum); + if (!preinited_vmemmap_section(ms)) { + map = __populate_section_memmap(pfn, PAGES_PER_SECTION, + nid, NULL, NULL); + if (!map) { + pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", + __func__, nid); + pnum_begin = pnum; + sparse_usage_fini(); + sparse_buffer_fini(); + goto failed; + } + sparse_init_early_section(nid, map, pnum, 0); } - check_usemap_section_nr(nid, usage); - sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, - SECTION_IS_EARLY); - usage = (void *) usage + mem_section_usage_size(); } + sparse_usage_fini(); sparse_buffer_fini(); return; failed: - /* We failed to allocate, mark all the following pnums as not present */ + /* + * We failed to allocate, mark all the following pnums as not present, + * except the ones already initialized earlier. + */ for_each_present_section_nr(pnum_begin, pnum) { - struct mem_section *ms; - if (pnum >= pnum_end) break; ms = __nr_to_section(pnum); + if (!preinited_vmemmap_section(ms)) + ms->section_mem_map = 0; ms->section_mem_map = 0; } } diff --git a/mm/swap.c b/mm/swap.c index b81cce146eb2..77b2d5997873 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -956,8 +956,6 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_folio_refs(folio, nr_refs)) - continue; if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); continue; diff --git a/mm/swap.h b/mm/swap.h index 274dcc6219a0..6f4a3f927edb 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -51,7 +51,6 @@ static inline pgoff_t swap_cache_index(swp_entry_t entry) } void show_swap_cache_info(void); -bool add_to_swap(struct folio *folio); void *get_shadow_from_swap_cache(swp_entry_t entry); int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp); @@ -164,11 +163,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping, return filemap_get_folio(mapping, index); } -static inline bool add_to_swap(struct folio *folio) -{ - return false; -} - static inline void *get_shadow_from_swap_cache(swp_entry_t entry) { return NULL; diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 1007c30f12e2..de779fed8c21 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -92,8 +92,7 @@ void swap_cgroup_record(struct folio *folio, unsigned short id, */ unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { - pgoff_t offset = swp_offset(ent); - pgoff_t end = offset + nr_ents; + pgoff_t offset, end; struct swap_cgroup *map; unsigned short old, iter = 0; diff --git a/mm/swap_slots.c b/mm/swap_slots.c deleted file mode 100644 index 9c7c171df7ba..000000000000 --- a/mm/swap_slots.c +++ /dev/null @@ -1,295 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Manage cache of swap slots to be used for and returned from - * swap. - * - * Copyright(c) 2016 Intel Corporation. - * - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * We allocate the swap slots from the global pool and put - * it into local per cpu caches. This has the advantage - * of no needing to acquire the swap_info lock every time - * we need a new slot. - * - * There is also opportunity to simply return the slot - * to local caches without needing to acquire swap_info - * lock. We do not reuse the returned slots directly but - * move them back to the global pool in a batch. This - * allows the slots to coalesce and reduce fragmentation. - * - * The swap entry allocated is marked with SWAP_HAS_CACHE - * flag in map_count that prevents it from being allocated - * again from the global pool. - * - * The swap slots cache is protected by a mutex instead of - * a spin lock as when we search for slots with scan_swap_map, - * we can possibly sleep. - */ - -#include <linux/swap_slots.h> -#include <linux/cpu.h> -#include <linux/cpumask.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/mutex.h> -#include <linux/mm.h> - -static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); -static bool swap_slot_cache_active; -bool swap_slot_cache_enabled; -static bool swap_slot_cache_initialized; -static DEFINE_MUTEX(swap_slots_cache_mutex); -/* Serialize swap slots cache enable/disable operations */ -static DEFINE_MUTEX(swap_slots_cache_enable_mutex); - -static void __drain_swap_slots_cache(void); - -#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) - -static void deactivate_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_mutex); - swap_slot_cache_active = false; - __drain_swap_slots_cache(); - mutex_unlock(&swap_slots_cache_mutex); -} - -static void reactivate_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_mutex); - swap_slot_cache_active = true; - mutex_unlock(&swap_slots_cache_mutex); -} - -/* Must not be called with cpu hot plug lock */ -void disable_swap_slots_cache_lock(void) -{ - mutex_lock(&swap_slots_cache_enable_mutex); - swap_slot_cache_enabled = false; - if (swap_slot_cache_initialized) { - /* serialize with cpu hotplug operations */ - cpus_read_lock(); - __drain_swap_slots_cache(); - cpus_read_unlock(); - } -} - -static void __reenable_swap_slots_cache(void) -{ - swap_slot_cache_enabled = has_usable_swap(); -} - -void reenable_swap_slots_cache_unlock(void) -{ - __reenable_swap_slots_cache(); - mutex_unlock(&swap_slots_cache_enable_mutex); -} - -static bool check_cache_active(void) -{ - long pages; - - if (!swap_slot_cache_enabled) - return false; - - pages = get_nr_swap_pages(); - if (!swap_slot_cache_active) { - if (pages > num_online_cpus() * - THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) - reactivate_swap_slots_cache(); - goto out; - } - - /* if global pool of slot caches too low, deactivate cache */ - if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) - deactivate_swap_slots_cache(); -out: - return swap_slot_cache_active; -} - -static int alloc_swap_slot_cache(unsigned int cpu) -{ - struct swap_slots_cache *cache; - swp_entry_t *slots; - - /* - * Do allocation outside swap_slots_cache_mutex - * as kvzalloc could trigger reclaim and folio_alloc_swap, - * which can lock swap_slots_cache_mutex. - */ - slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), - GFP_KERNEL); - if (!slots) - return -ENOMEM; - - mutex_lock(&swap_slots_cache_mutex); - cache = &per_cpu(swp_slots, cpu); - if (cache->slots) { - /* cache already allocated */ - mutex_unlock(&swap_slots_cache_mutex); - - kvfree(slots); - - return 0; - } - - if (!cache->lock_initialized) { - mutex_init(&cache->alloc_lock); - cache->lock_initialized = true; - } - cache->nr = 0; - cache->cur = 0; - cache->n_ret = 0; - /* - * We initialized alloc_lock and free_lock earlier. We use - * !cache->slots or !cache->slots_ret to know if it is safe to acquire - * the corresponding lock and use the cache. Memory barrier below - * ensures the assumption. - */ - mb(); - cache->slots = slots; - mutex_unlock(&swap_slots_cache_mutex); - return 0; -} - -static void drain_slots_cache_cpu(unsigned int cpu, bool free_slots) -{ - struct swap_slots_cache *cache; - - cache = &per_cpu(swp_slots, cpu); - if (cache->slots) { - mutex_lock(&cache->alloc_lock); - swapcache_free_entries(cache->slots + cache->cur, cache->nr); - cache->cur = 0; - cache->nr = 0; - if (free_slots && cache->slots) { - kvfree(cache->slots); - cache->slots = NULL; - } - mutex_unlock(&cache->alloc_lock); - } -} - -static void __drain_swap_slots_cache(void) -{ - unsigned int cpu; - - /* - * This function is called during - * 1) swapoff, when we have to make sure no - * left over slots are in cache when we remove - * a swap device; - * 2) disabling of swap slot cache, when we run low - * on swap slots when allocating memory and need - * to return swap slots to global pool. - * - * We cannot acquire cpu hot plug lock here as - * this function can be invoked in the cpu - * hot plug path: - * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback - * -> memory allocation -> direct reclaim -> folio_alloc_swap - * -> drain_swap_slots_cache - * - * Hence the loop over current online cpu below could miss cpu that - * is being brought online but not yet marked as online. - * That is okay as we do not schedule and run anything on a - * cpu before it has been marked online. Hence, we will not - * fill any swap slots in slots cache of such cpu. - * There are no slots on such cpu that need to be drained. - */ - for_each_online_cpu(cpu) - drain_slots_cache_cpu(cpu, false); -} - -static int free_slot_cache(unsigned int cpu) -{ - mutex_lock(&swap_slots_cache_mutex); - drain_slots_cache_cpu(cpu, true); - mutex_unlock(&swap_slots_cache_mutex); - return 0; -} - -void enable_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_enable_mutex); - if (!swap_slot_cache_initialized) { - int ret; - - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", - alloc_swap_slot_cache, free_slot_cache); - if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " - "without swap slots cache.\n", __func__)) - goto out_unlock; - - swap_slot_cache_initialized = true; - } - - __reenable_swap_slots_cache(); -out_unlock: - mutex_unlock(&swap_slots_cache_enable_mutex); -} - -/* called with swap slot cache's alloc lock held */ -static int refill_swap_slots_cache(struct swap_slots_cache *cache) -{ - if (!use_swap_slot_cache) - return 0; - - cache->cur = 0; - if (swap_slot_cache_active) - cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, - cache->slots, 0); - - return cache->nr; -} - -swp_entry_t folio_alloc_swap(struct folio *folio) -{ - swp_entry_t entry; - struct swap_slots_cache *cache; - - entry.val = 0; - - if (folio_test_large(folio)) { - if (IS_ENABLED(CONFIG_THP_SWAP)) - get_swap_pages(1, &entry, folio_order(folio)); - goto out; - } - - /* - * Preemption is allowed here, because we may sleep - * in refill_swap_slots_cache(). But it is safe, because - * accesses to the per-CPU data structure are protected by the - * mutex cache->alloc_lock. - * - * The alloc path here does not touch cache->slots_ret - * so cache->free_lock is not taken. - */ - cache = raw_cpu_ptr(&swp_slots); - - if (likely(check_cache_active() && cache->slots)) { - mutex_lock(&cache->alloc_lock); - if (cache->slots) { -repeat: - if (cache->nr) { - entry = cache->slots[cache->cur]; - cache->slots[cache->cur++].val = 0; - cache->nr--; - } else if (refill_swap_slots_cache(cache)) { - goto repeat; - } - } - mutex_unlock(&cache->alloc_lock); - if (entry.val) - goto out; - } - - get_swap_pages(1, &entry, 0); -out: - if (mem_cgroup_try_charge_swap(folio, entry)) { - put_swap_folio(folio, entry); - entry.val = 0; - } - return entry; -} diff --git a/mm/swap_state.c b/mm/swap_state.c index ca42b2be64d9..68fd981b514f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -20,7 +20,6 @@ #include <linux/blkdev.h> #include <linux/migrate.h> #include <linux/vmalloc.h> -#include <linux/swap_slots.h> #include <linux/huge_mm.h> #include <linux/shmem_fs.h> #include "internal.h" @@ -85,7 +84,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry) /* * add_to_swap_cache resembles filemap_add_folio on swapper_space, - * but sets SwapCache flag and private instead of mapping and index. + * but sets SwapCache flag and 'swap' instead of mapping and index. */ int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp) @@ -167,67 +166,6 @@ void __delete_from_swap_cache(struct folio *folio, __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); } -/** - * add_to_swap - allocate swap space for a folio - * @folio: folio we want to move to swap - * - * Allocate swap space for the folio and add the folio to the - * swap cache. - * - * Context: Caller needs to hold the folio lock. - * Return: Whether the folio was added to the swap cache. - */ -bool add_to_swap(struct folio *folio) -{ - swp_entry_t entry; - int err; - - VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); - - entry = folio_alloc_swap(folio); - if (!entry.val) - return false; - - /* - * XArray node allocations from PF_MEMALLOC contexts could - * completely exhaust the page allocator. __GFP_NOMEMALLOC - * stops emergency reserves from being allocated. - * - * TODO: this could cause a theoretical memory reclaim - * deadlock in the swap out path. - */ - /* - * Add it to the swap cache. - */ - err = add_to_swap_cache(folio, entry, - __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); - if (err) - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ - goto fail; - /* - * Normally the folio will be dirtied in unmap because its - * pte should be dirty. A special case is MADV_FREE page. The - * page's pte could have dirty bit cleared but the folio's - * SwapBacked flag is still set because clearing the dirty bit - * and SwapBacked flag has no lock protected. For such folio, - * unmap will not set dirty bit for it, so folio reclaim will - * not write the folio out. This can cause data corruption when - * the folio is swapped in later. Always setting the dirty flag - * for the folio solves the problem. - */ - folio_mark_dirty(folio); - - return true; - -fail: - put_swap_folio(folio, entry); - return false; -} - /* * This must be called only on folios that have * been verified to be in the swap cache and locked. @@ -270,9 +208,7 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, xa_unlock_irq(&address_space->i_pages); /* search the next swapcache until we meet end */ - curr >>= SWAP_ADDRESS_SPACE_SHIFT; - curr++; - curr <<= SWAP_ADDRESS_SPACE_SHIFT; + curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES); if (curr > end) break; } @@ -432,17 +368,13 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, bool skip_if_exists) { - struct swap_info_struct *si; + struct swap_info_struct *si = swp_swap_info(entry); struct folio *folio; struct folio *new_folio = NULL; struct folio *result = NULL; void *shadow = NULL; *new_page_allocated = false; - si = get_swap_device(entry); - if (!si) - return NULL; - for (;;) { int err; /* @@ -457,13 +389,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* * Just skip read ahead for unused swap slot. - * During swap_off when swap_slot_cache is disabled, - * we have to handle the race between putting - * swap entry in swap cache and marking swap slot - * as SWAP_HAS_CACHE. That's done in later part of code or - * else swap_off will be aborted if we return NULL. */ - if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) + if (!swap_entry_swapped(si, entry)) goto put_and_return; /* @@ -521,7 +448,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; - mem_cgroup_swapin_uncharge_swap(entry, 1); + memcg1_swapin(entry, 1); if (shadow) workingset_refault(new_folio, shadow); @@ -538,7 +465,6 @@ fail_unlock: put_swap_folio(new_folio, entry); folio_unlock(new_folio); put_and_return: - put_swap_device(si); if (!(*new_page_allocated) && new_folio) folio_put(new_folio); return result; @@ -558,11 +484,16 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug) { + struct swap_info_struct *si; bool page_allocated; struct mempolicy *mpol; pgoff_t ilx; struct folio *folio; + si = get_swap_device(entry); + if (!si) + return NULL; + mpol = get_vma_policy(vma, addr, 0, &ilx); folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, &page_allocated, false); @@ -570,6 +501,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (page_allocated) swap_read_folio(folio, plug); + + put_swap_device(si); return folio; } diff --git a/mm/swapfile.c b/mm/swapfile.c index df7c4e8b089c..2eff8b51a945 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -37,7 +37,6 @@ #include <linux/oom.h> #include <linux/swapfile.h> #include <linux/export.h> -#include <linux/swap_slots.h> #include <linux/sort.h> #include <linux/completion.h> #include <linux/suspend.h> @@ -116,6 +115,18 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0); atomic_t nr_rotate_swap = ATOMIC_INIT(0); +struct percpu_swap_cluster { + struct swap_info_struct *si[SWAP_NR_ORDERS]; + unsigned long offset[SWAP_NR_ORDERS]; + local_lock_t lock; +}; + +static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = { + .si = { NULL }, + .offset = { SWAP_ENTRY_INVALID }, + .lock = INIT_LOCAL_LOCK(), +}; + static struct swap_info_struct *swap_type_to_swap_info(int type) { if (type >= MAX_SWAPFILES) @@ -158,10 +169,8 @@ static long swap_usage_in_pages(struct swap_info_struct *si) #define TTRS_UNMAPPED 0x2 /* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 -/* Reclaim directly, bypass the slot cache and don't touch device lock */ -#define TTRS_DIRECT 0x8 -static bool swap_is_has_cache(struct swap_info_struct *si, +static bool swap_only_has_cache(struct swap_info_struct *si, unsigned long offset, int nr_pages) { unsigned char *map = si->swap_map + offset; @@ -210,6 +219,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, int ret, nr_pages; bool need_reclaim; +again: folio = filemap_get_folio(address_space, swap_cache_index(entry)); if (IS_ERR(folio)) return 0; @@ -227,8 +237,16 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, if (!folio_trylock(folio)) goto out; - /* offset could point to the middle of a large folio */ + /* + * Offset could point to the middle of a large folio, or folio + * may no longer point to the expected offset before it's locked. + */ entry = folio->swap; + if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) { + folio_unlock(folio); + folio_put(folio); + goto again; + } offset = swp_offset(entry); need_reclaim = ((flags & TTRS_ANYWAY) || @@ -243,28 +261,13 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, * reference or pending writeback, and can't be allocated to others. */ ci = lock_cluster(si, offset); - need_reclaim = swap_is_has_cache(si, offset, nr_pages); + need_reclaim = swap_only_has_cache(si, offset, nr_pages); unlock_cluster(ci); if (!need_reclaim) goto out_unlock; - if (!(flags & TTRS_DIRECT)) { - /* Free through slot cache */ - delete_from_swap_cache(folio); - folio_set_dirty(folio); - ret = nr_pages; - goto out_unlock; - } - - xa_lock_irq(&address_space->i_pages); - __delete_from_swap_cache(folio, entry, NULL); - xa_unlock_irq(&address_space->i_pages); - folio_ref_sub(folio, nr_pages); + delete_from_swap_cache(folio); folio_set_dirty(folio); - - ci = lock_cluster(si, offset); - swap_entry_range_free(si, ci, entry, nr_pages); - unlock_cluster(ci); ret = nr_pages; out_unlock: folio_unlock(folio); @@ -479,15 +482,6 @@ static void move_cluster(struct swap_info_struct *si, static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) { - unsigned int idx = cluster_index(si, ci); - /* - * If scan_swap_map_slots() can't find a free cluster, it will check - * si->swap_map directly. To make sure the discarding cluster isn't - * taken by scan_swap_map_slots(), mark the swap entries bad (occupied). - * It will be cleared after discard - */ - memset(si->swap_map + idx * SWAPFILE_CLUSTER, - SWAP_MAP_BAD, SWAPFILE_CLUSTER); VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); @@ -556,7 +550,7 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si) ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); /* * Delete the cluster from list to prepare for discard, but keep - * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster + * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be * pointing to it, or ran into by relocate_cluster. */ list_del(&ci->list); @@ -571,8 +565,6 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si) * return the cluster to allocation list. */ ci->flags = CLUSTER_FLAG_NONE; - memset(si->swap_map + idx * SWAPFILE_CLUSTER, - 0, SWAPFILE_CLUSTER); __free_cluster(si, ci); spin_unlock(&ci->lock); ret = true; @@ -699,7 +691,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si, offset++; break; case SWAP_HAS_CACHE: - nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); + nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); if (nr_reclaim > 0) offset += nr_reclaim; else @@ -730,6 +722,9 @@ static bool cluster_scan_range(struct swap_info_struct *si, unsigned long offset, end = start + nr_pages; unsigned char *map = si->swap_map; + if (cluster_is_empty(ci)) + return true; + for (offset = start; offset < end; offset++) { switch (READ_ONCE(map[offset])) { case 0: @@ -821,14 +816,15 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, out: relocate_cluster(si, ci); unlock_cluster(ci); - if (si->flags & SWP_SOLIDSTATE) - __this_cpu_write(si->percpu_cluster->next[order], next); - else + if (si->flags & SWP_SOLIDSTATE) { + this_cpu_write(percpu_swap_cluster.offset[order], next); + this_cpu_write(percpu_swap_cluster.si[order], si); + } else { si->global_cluster->next[order] = next; + } return found; } -/* Return true if reclaimed a whole cluster */ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; @@ -849,7 +845,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { spin_unlock(&ci->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, - TTRS_ANYWAY | TTRS_DIRECT); + TTRS_ANYWAY); spin_lock(&ci->lock); if (nr_reclaim) { offset += abs(nr_reclaim); @@ -879,27 +875,29 @@ static void swap_reclaim_work(struct work_struct *work) } /* - * Try to get swap entries with specified order from current cpu's swap entry - * pool (a cluster). This might involve allocating a new cluster for current CPU - * too. + * Try to allocate swap entries with specified order and try set a new + * cluster for current CPU too. */ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, unsigned char usage) { struct swap_cluster_info *ci; - unsigned int offset, found = 0; + unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; - if (si->flags & SWP_SOLIDSTATE) { - /* Fast path using per CPU cluster */ - local_lock(&si->percpu_cluster->lock); - offset = __this_cpu_read(si->percpu_cluster->next[order]); - } else { + /* + * Swapfile is not block device so unable + * to allocate large entries. + */ + if (order && !(si->flags & SWP_BLKDEV)) + return 0; + + if (!(si->flags & SWP_SOLIDSTATE)) { /* Serialize HDD SWAP allocation for each device. */ spin_lock(&si->global_cluster_lock); offset = si->global_cluster->next[order]; - } + if (offset == SWAP_ENTRY_INVALID) + goto new_cluster; - if (offset) { ci = lock_cluster(si, offset); /* Cluster could have been used by another order */ if (cluster_is_usable(ci, order)) { @@ -990,9 +988,7 @@ new_cluster: } } done: - if (si->flags & SWP_SOLIDSTATE) - local_unlock(&si->percpu_cluster->lock); - else + if (!(si->flags & SWP_SOLIDSTATE)) spin_unlock(&si->global_cluster_lock); return found; } @@ -1106,7 +1102,7 @@ static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) /* * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, - * remove it from the plist. + * add it to the plist. */ if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) add_to_avail_list(si, false); @@ -1160,61 +1156,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_usage_sub(si, nr_entries); } -static int cluster_alloc_swap(struct swap_info_struct *si, - unsigned char usage, int nr, - swp_entry_t slots[], int order) -{ - int n_ret = 0; - - while (n_ret < nr) { - unsigned long offset = cluster_alloc_swap_entry(si, order, usage); - - if (!offset) - break; - slots[n_ret++] = swp_entry(si->type, offset); - } - - return n_ret; -} - -static int scan_swap_map_slots(struct swap_info_struct *si, - unsigned char usage, int nr, - swp_entry_t slots[], int order) -{ - unsigned int nr_pages = 1 << order; - - /* - * We try to cluster swap pages by allocating them sequentially - * in swap. Once we've allocated SWAPFILE_CLUSTER pages this - * way, however, we resort to first-free allocation, starting - * a new cluster. This prevents us from scattering swap pages - * all over the entire swap partition, so that we reduce - * overall disk seek times between swap pages. -- sct - * But we do now try to find an empty cluster. -Andrea - * And we let swap pages go all over an SSD partition. Hugh - */ - if (order > 0) { - /* - * Should not even be attempting large allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP) || - nr_pages > SWAPFILE_CLUSTER) { - VM_WARN_ON_ONCE(1); - return 0; - } - - /* - * Swapfile is not block device so unable - * to allocate large entries. - */ - if (!(si->flags & SWP_BLKDEV)) - return 0; - } - - return cluster_alloc_swap(si, usage, nr, slots, order); -} - static bool get_swap_device_info(struct swap_info_struct *si) { if (!percpu_ref_tryget_live(&si->users)) @@ -1231,39 +1172,65 @@ static bool get_swap_device_info(struct swap_info_struct *si) return true; } -int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) +/* + * Fast path try to get swap entries with specified order from current + * CPU's swap entry pool (a cluster). + */ +static bool swap_alloc_fast(swp_entry_t *entry, + int order) { - int order = swap_entry_order(entry_order); - unsigned long size = 1 << order; - struct swap_info_struct *si, *next; - long avail_pgs; - int n_ret = 0; - int node; + struct swap_cluster_info *ci; + struct swap_info_struct *si; + unsigned int offset, found = SWAP_ENTRY_INVALID; - spin_lock(&swap_avail_lock); + /* + * Once allocated, swap_info_struct will never be completely freed, + * so checking it's liveness by get_swap_device_info is enough. + */ + si = this_cpu_read(percpu_swap_cluster.si[order]); + offset = this_cpu_read(percpu_swap_cluster.offset[order]); + if (!si || !offset || !get_swap_device_info(si)) + return false; - avail_pgs = atomic_long_read(&nr_swap_pages) / size; - if (avail_pgs <= 0) { - spin_unlock(&swap_avail_lock); - goto noswap; + ci = lock_cluster(si, offset); + if (cluster_is_usable(ci, order)) { + if (cluster_is_empty(ci)) + offset = cluster_offset(si, ci); + found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); + if (found) + *entry = swp_entry(si->type, found); + } else { + unlock_cluster(ci); } - n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); + put_swap_device(si); + return !!found; +} - atomic_long_sub(n_goal * size, &nr_swap_pages); +/* Rotate the device and switch to a new cluster */ +static bool swap_alloc_slow(swp_entry_t *entry, + int order) +{ + int node; + unsigned long offset; + struct swap_info_struct *si, *next; -start_over: node = numa_node_id(); + spin_lock(&swap_avail_lock); +start_over: plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { - /* requeue si to after same-priority siblings */ + /* Rotate the device and switch to a new cluster */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries, order); + offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); put_swap_device(si); - if (n_ret || size > 1) - goto check_out; + if (offset) { + *entry = swp_entry(si->type, offset); + return true; + } + if (order) + return false; } spin_lock(&swap_avail_lock); @@ -1281,15 +1248,68 @@ start_over: if (plist_node_empty(&next->avail_lists[node])) goto start_over; } - spin_unlock(&swap_avail_lock); + return false; +} + +/** + * folio_alloc_swap - allocate swap space for a folio + * @folio: folio we want to move to swap + * @gfp: gfp mask for shadow nodes + * + * Allocate swap space for the folio and add the folio to the + * swap cache. + * + * Context: Caller needs to hold the folio lock. + * Return: Whether the folio was added to the swap cache. + */ +int folio_alloc_swap(struct folio *folio, gfp_t gfp) +{ + unsigned int order = folio_order(folio); + unsigned int size = 1 << order; + swp_entry_t entry = {}; + + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); + + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) { + VM_WARN_ON_ONCE(1); + return -EINVAL; + } + + local_lock(&percpu_swap_cluster.lock); + if (!swap_alloc_fast(&entry, order)) + swap_alloc_slow(&entry, order); + local_unlock(&percpu_swap_cluster.lock); -check_out: - if (n_ret < n_goal) - atomic_long_add((long)(n_goal - n_ret) * size, - &nr_swap_pages); -noswap: - return n_ret; + /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ + if (mem_cgroup_try_charge_swap(folio, entry)) + goto out_free; + + if (!entry.val) + return -ENOMEM; + + /* + * XArray node allocations from PF_MEMALLOC contexts could + * completely exhaust the page allocator. __GFP_NOMEMALLOC + * stops emergency reserves from being allocated. + * + * TODO: this could cause a theoretical memory reclaim + * deadlock in the swap out path. + */ + if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL)) + goto out_free; + + atomic_long_sub(size, &nr_swap_pages); + return 0; + +out_free: + put_swap_folio(folio, entry); + return -ENOMEM; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) @@ -1574,7 +1594,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) return; ci = lock_cluster(si, offset); - if (swap_is_has_cache(si, offset, size)) + if (swap_only_has_cache(si, offset, size)) swap_entry_range_free(si, ci, entry, size); else { for (int i = 0; i < size; i++, entry.val++) { @@ -1585,25 +1605,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster(ci); } -void swapcache_free_entries(swp_entry_t *entries, int n) -{ - int i; - struct swap_cluster_info *ci; - struct swap_info_struct *si = NULL; - - if (n <= 0) - return; - - for (i = 0; i < n; ++i) { - si = _swap_info_get(entries[i]); - if (si) { - ci = lock_cluster(si, swp_offset(entries[i])); - swap_entry_range_free(si, ci, entries[i], 1); - unlock_cluster(ci); - } - } -} - int __swap_count(swp_entry_t entry) { struct swap_info_struct *si = swp_swap_info(entry); @@ -1617,7 +1618,7 @@ int __swap_count(swp_entry_t entry) * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ -int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; @@ -1626,7 +1627,7 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); unlock_cluster(ci); - return count; + return !!count; } /* @@ -1712,7 +1713,7 @@ static bool folio_swapped(struct folio *folio) return false; if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) - return swap_swapcount(si, entry) != 0; + return swap_entry_swapped(si, entry); return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } @@ -1786,9 +1787,6 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr) bool any_only_cache = false; unsigned long offset; - if (non_swap_entry(entry)) - return; - si = get_swap_device(entry); if (!si) return; @@ -1847,6 +1845,7 @@ out: swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type); + unsigned long offset; swp_entry_t entry = {0}; if (!si) @@ -1854,8 +1853,13 @@ swp_entry_t get_swap_page_of_type(int type) /* This is called for allocating swap entry, not cache */ if (get_swap_device_info(si)) { - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) - atomic_long_dec(&nr_swap_pages); + if (si->flags & SWP_WRITEOK) { + offset = cluster_alloc_swap_entry(si, 0, 1); + if (offset) { + entry = swp_entry(si->type, offset); + atomic_long_dec(&nr_swap_pages); + } + } put_swap_device(si); } fail: @@ -2616,21 +2620,6 @@ static void reinsert_swap_info(struct swap_info_struct *si) spin_unlock(&swap_lock); } -static bool __has_usable_swap(void) -{ - return !plist_head_empty(&swap_active_head); -} - -bool has_usable_swap(void) -{ - bool ret; - - spin_lock(&swap_lock); - ret = __has_usable_swap(); - spin_unlock(&swap_lock); - return ret; -} - /* * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range * see the updated flags, so there will be no more allocations. @@ -2649,6 +2638,28 @@ static void wait_for_allocation(struct swap_info_struct *si) } } +/* + * Called after swap device's reference count is dead, so + * neither scan nor allocation will use it. + */ +static void flush_percpu_swap_cluster(struct swap_info_struct *si) +{ + int cpu, i; + struct swap_info_struct **pcp_si; + + for_each_possible_cpu(cpu) { + pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); + /* + * Invalidate the percpu swap cluster cache, si->users + * is dead, so no new user will point to it, just flush + * any existing user. + */ + for (i = 0; i < SWAP_NR_ORDERS; i++) + cmpxchg(&pcp_si[i], si, NULL); + } +} + + SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; @@ -2721,8 +2732,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) wait_for_allocation(p); - disable_swap_slots_cache_lock(); - set_current_oom_origin(); err = try_to_unuse(p->type); clear_current_oom_origin(); @@ -2730,12 +2739,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (err) { /* re-insert swap space back into swap_list */ reinsert_swap_info(p); - reenable_swap_slots_cache_unlock(); goto out_dput; } - reenable_swap_slots_cache_unlock(); - /* * Wait for swap operations protected by get/put_swap_device() * to complete. Because of synchronize_rcu() here, all swap @@ -2750,6 +2756,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) flush_work(&p->discard_work); flush_work(&p->reclaim_work); + flush_percpu_swap_cluster(p); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) @@ -2777,8 +2784,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) arch_swap_invalidate_area(p->type); zswap_swapoff(p->type); mutex_unlock(&swapon_mutex); - free_percpu(p->percpu_cluster); - p->percpu_cluster = NULL; kfree(p->global_cluster); p->global_cluster = NULL; vfree(swap_map); @@ -3124,13 +3129,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si, return maxpages; } -#define SWAP_CLUSTER_INFO_COLS \ - DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) -#define SWAP_CLUSTER_SPACE_COLS \ - DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) -#define SWAP_CLUSTER_COLS \ - max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) - static int setup_swap_map_and_extents(struct swap_info_struct *si, union swap_header *swap_header, unsigned char *swap_map, @@ -3170,14 +3168,21 @@ static int setup_swap_map_and_extents(struct swap_info_struct *si, return nr_extents; } +#define SWAP_CLUSTER_INFO_COLS \ + DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) +#define SWAP_CLUSTER_SPACE_COLS \ + DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) +#define SWAP_CLUSTER_COLS \ + max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) + static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, union swap_header *swap_header, unsigned long maxpages) { unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); struct swap_cluster_info *cluster_info; - unsigned long i, j, k, idx; - int cpu, err = -ENOMEM; + unsigned long i, j, idx; + int err = -ENOMEM; cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) @@ -3186,20 +3191,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); - if (si->flags & SWP_SOLIDSTATE) { - si->percpu_cluster = alloc_percpu(struct percpu_cluster); - if (!si->percpu_cluster) - goto err_free; - - for_each_possible_cpu(cpu) { - struct percpu_cluster *cluster; - - cluster = per_cpu_ptr(si->percpu_cluster, cpu); - for (i = 0; i < SWAP_NR_ORDERS; i++) - cluster->next[i] = SWAP_ENTRY_INVALID; - local_lock_init(&cluster->lock); - } - } else { + if (!(si->flags & SWP_SOLIDSTATE)) { si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); if (!si->global_cluster) @@ -3237,8 +3229,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, * Reduce false cache line sharing between cluster_info and * sharing same address space. */ - for (k = 0; k < SWAP_CLUSTER_COLS; k++) { - j = k % SWAP_CLUSTER_COLS; + for (j = 0; j < SWAP_CLUSTER_COLS; j++) { for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; @@ -3453,8 +3444,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) mutex_lock(&swapon_mutex); prio = -1; if (swap_flags & SWAP_FLAG_PREFER) - prio = - (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; + prio = swap_flags & SWAP_FLAG_PRIO_MASK; enable_swap_info(si, prio, swap_map, cluster_info, zeromap); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", @@ -3478,8 +3468,6 @@ free_swap_address_space: bad_swap_unlock_inode: inode_unlock(inode); bad_swap: - free_percpu(si->percpu_cluster); - si->percpu_cluster = NULL; kfree(si->global_cluster); si->global_cluster = NULL; inode = NULL; @@ -3503,8 +3491,6 @@ out: putname(name); if (inode) inode_unlock(inode); - if (!error) - enable_swap_slots_cache(); return error; } @@ -3531,7 +3517,6 @@ void si_swapinfo(struct sysinfo *val) * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL - * - swp_entry is migration entry -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM @@ -3795,8 +3780,8 @@ outer: * into, carry if so, or else fail until a new continuation page is allocated; * when the original swap_map count is decremented from 0 with continuation, * borrow from the continuation and report whether it still holds more. - * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster - * lock. + * Called while __swap_duplicate() or caller of __swap_entry_free_locked() + * holds cluster lock. */ static bool swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) @@ -3901,6 +3886,11 @@ static void free_swap_count_continuations(struct swap_info_struct *si) } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +static bool __has_usable_swap(void) +{ + return !plist_head_empty(&swap_active_head); +} + void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { struct swap_info_struct *si, *next; diff --git a/mm/truncate.c b/mm/truncate.c index 76d8fcd89bd0..5d98054094d1 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -78,8 +78,22 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping, if (dax_mapping(mapping)) { for (i = j; i < nr; i++) { - if (xa_is_value(fbatch->folios[i])) + if (xa_is_value(fbatch->folios[i])) { + /* + * File systems should already have called + * dax_break_layout_entry() to remove all DAX + * entries while holding a lock to prevent + * establishing new entries. Therefore we + * shouldn't find any here. + */ + WARN_ON_ONCE(1); + + /* + * Delete the mapping so truncate_pagecache() + * doesn't loop forever. + */ dax_delete_mapping_entry(mapping, indices[i]); + } } goto out; } @@ -178,6 +192,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); unsigned int offset, length; + struct page *split_at, *split_at2; if (pos < start) offset = start - pos; @@ -207,8 +222,42 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; - if (split_folio(folio) == 0) + + split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); + split_at2 = folio_page(folio, + PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); + + if (!try_folio_split(folio, split_at, NULL)) { + /* + * try to split at offset + length to make sure folios within + * the range can be dropped, especially to avoid memory waste + * for shmem truncate + */ + struct folio *folio2 = page_folio(split_at2); + + if (!folio_try_get(folio2)) + goto no_split; + + if (!folio_test_large(folio2)) + goto out; + + if (!folio_trylock(folio2)) + goto out; + + /* + * make sure folio2 is large and does not change its mapping. + * Its split result does not matter here. + */ + if (folio_test_large(folio2) && + folio2->mapping == folio->mapping) + try_folio_split(folio2, split_at2, NULL); + + folio_unlock(folio2); +out: + folio_put(folio2); +no_split: return true; + } if (folio_test_dirty(folio)) return false; truncate_inode_folio(folio->mapping, folio); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index d06453fa8aba..fbf2cf62ab9f 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -86,14 +86,10 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, mmap_read_lock(mm); vma = find_vma_and_prepare_anon(mm, address); if (!IS_ERR(vma)) { - /* - * We cannot use vma_start_read() as it may fail due to - * false locked (see comment in vma_start_read()). We - * can avoid that by directly locking vm_lock under - * mmap_lock, which guarantees that nobody can lock the - * vma for write (vma_start_write()) under us. - */ - down_read(&vma->vm_lock->lock); + bool locked = vma_start_read_locked(vma); + + if (!locked) + vma = ERR_PTR(-EAGAIN); } mmap_read_unlock(mm); @@ -1563,16 +1559,24 @@ static int uffd_move_lock(struct mm_struct *mm, mmap_read_lock(mm); err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); - if (!err) { - /* - * See comment in uffd_lock_vma() as to why not using - * vma_start_read() here. - */ - down_read(&(*dst_vmap)->vm_lock->lock); - if (*dst_vmap != *src_vmap) - down_read_nested(&(*src_vmap)->vm_lock->lock, - SINGLE_DEPTH_NESTING); + if (err) + goto out; + + if (!vma_start_read_locked(*dst_vmap)) { + err = -EAGAIN; + goto out; + } + + /* Nothing further to do if both vmas are locked. */ + if (*dst_vmap == *src_vmap) + goto out; + + if (!vma_start_read_locked_nested(*src_vmap, SINGLE_DEPTH_NESTING)) { + /* Undo dst_vmap locking if src_vmap failed to lock */ + vma_end_read(*dst_vmap); + err = -EAGAIN; } +out: mmap_read_unlock(mm); return err; } @@ -52,10 +52,9 @@ struct mmap_state { .pgoff = (map_)->pgoff, \ .file = (map_)->file, \ .prev = (map_)->prev, \ - .vma = vma_, \ + .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) @@ -107,29 +106,44 @@ static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, * init_multi_vma_prep() - Initializer for struct vma_prepare * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked - * @next: The next vma if it is to be adjusted - * @remove: The first vma to be removed - * @remove2: The second vma to be removed + * @vmg: The merge state that will be used to determine adjustment and VMA + * removal. */ static void init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, - struct vm_area_struct *next, - struct vm_area_struct *remove, - struct vm_area_struct *remove2) + struct vma_merge_struct *vmg) { + struct vm_area_struct *adjust; + struct vm_area_struct **remove = &vp->remove; + memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; - vp->remove = remove; - vp->remove2 = remove2; - vp->adj_next = next; - if (!vp->anon_vma && next) - vp->anon_vma = next->anon_vma; + + if (vmg && vmg->__remove_middle) { + *remove = vmg->middle; + remove = &vp->remove2; + } + if (vmg && vmg->__remove_next) + *remove = vmg->next; + + if (vmg && vmg->__adjust_middle_start) + adjust = vmg->middle; + else if (vmg && vmg->__adjust_next_start) + adjust = vmg->next; + else + adjust = NULL; + + vp->adj_next = adjust; + if (!vp->anon_vma && adjust) + vp->anon_vma = adjust->anon_vma; + + VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && + vp->anon_vma != adjust->anon_vma); vp->file = vma->vm_file; if (vp->file) vp->mapping = vma->vm_file->f_mapping; - } /* @@ -306,7 +320,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - vma_iter_store(vmi, vp->insert); + vma_iter_store_new(vmi, vp->insert); mm->map_count++; } @@ -327,7 +341,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, if (vp->remove) { again: - vma_mark_detached(vp->remove, true); + vma_mark_detached(vp->remove); if (vp->file) { uprobe_munmap(vp->remove, vp->remove->vm_start, vp->remove->vm_end); @@ -362,7 +376,7 @@ again: */ static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) { - init_multi_vma_prep(vp, vma, NULL, NULL, NULL); + init_multi_vma_prep(vp, vma, NULL); } /* @@ -406,17 +420,14 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg, /* * Close a vm structure and free it. */ -void remove_vma(struct vm_area_struct *vma, bool unreachable) +void remove_vma(struct vm_area_struct *vma) { might_sleep(); vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - if (unreachable) - __vm_area_free(vma); - else - vm_area_free(vma); + vm_area_free(vma); } /* @@ -499,7 +510,7 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); - vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); if (new_below) { vma->vm_start = addr; @@ -629,49 +640,66 @@ void validate_mm(struct mm_struct *mm) } #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ -/* Actually perform the VMA merge operation. */ -static int commit_merge(struct vma_merge_struct *vmg, - struct vm_area_struct *adjust, - struct vm_area_struct *remove, - struct vm_area_struct *remove2, - long adj_start, - bool expanded) +/* + * Based on the vmg flag indicating whether we need to adjust the vm_start field + * for the middle or next VMA, we calculate what the range of the newly adjusted + * VMA ought to be, and set the VMA's range accordingly. + */ +static void vmg_adjust_set_range(struct vma_merge_struct *vmg) { - struct vma_prepare vp; + struct vm_area_struct *adjust; + pgoff_t pgoff; - init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2); + if (vmg->__adjust_middle_start) { + adjust = vmg->middle; + pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); + } else if (vmg->__adjust_next_start) { + adjust = vmg->next; + pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); + } else { + return; + } - VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && - vp.anon_vma != adjust->anon_vma); + vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); +} - if (expanded) { - /* Note: vma iterator must be pointing to 'start'. */ - vma_iter_config(vmg->vmi, vmg->start, vmg->end); +/* + * Actually perform the VMA merge operation. + * + * Returns 0 on success, or an error value on failure. + */ +static int commit_merge(struct vma_merge_struct *vmg) +{ + struct vm_area_struct *vma; + struct vma_prepare vp; + + if (vmg->__adjust_next_start) { + /* We manipulate middle and adjust next, which is the target. */ + vma = vmg->middle; + vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); } else { - vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, - adjust->vm_end); + vma = vmg->target; + /* Note: vma iterator must be pointing to 'start'. */ + vma_iter_config(vmg->vmi, vmg->start, vmg->end); } - if (vma_iter_prealloc(vmg->vmi, vmg->vma)) + init_multi_vma_prep(&vp, vma, vmg); + + if (vma_iter_prealloc(vmg->vmi, vma)) return -ENOMEM; vma_prepare(&vp); - vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start); - vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); - - if (expanded) - vma_iter_store(vmg->vmi, vmg->vma); - - if (adj_start) { - adjust->vm_start += adj_start; - adjust->vm_pgoff += PHYS_PFN(adj_start); - if (adj_start < 0) { - WARN_ON(expanded); - vma_iter_store(vmg->vmi, adjust); - } - } + /* + * THP pages may need to do additional splits if we increase + * middle->vm_start. + */ + vma_adjust_trans_huge(vma, vmg->start, vmg->end, + vmg->__adjust_middle_start ? vmg->middle : NULL); + vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); + vmg_adjust_set_range(vmg); + vma_iter_store_overwrite(vmg->vmi, vmg->target); - vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); + vma_complete(&vp, vmg->vmi, vma->vm_mm); return 0; } @@ -694,8 +722,9 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * identical properties. * * This function checks for the existence of any such mergeable VMAs and updates - * the maple tree describing the @vmg->vma->vm_mm address space to account for - * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge. + * the maple tree describing the @vmg->middle->vm_mm address space to account + * for this, as well as any VMAs shrunk/expanded/deleted as a result of this + * merge. * * As part of this operation, if a merge occurs, the @vmg object will have its * vma, start, end, and pgoff fields modified to execute the merge. Subsequent @@ -704,45 +733,43 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * Returns: The merged VMA if merge succeeds, or NULL otherwise. * * ASSUMPTIONS: - * - The caller must assign the VMA to be modifed to @vmg->vma. + * - The caller must assign the VMA to be modifed to @vmg->middle. * - The caller must have set @vmg->prev to the previous VMA, if there is one. * - The caller must not set @vmg->next, as we determine this. * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. - * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end). + * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). */ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; - struct vm_area_struct *next, *res; + struct vm_area_struct *next; struct vm_area_struct *anon_dup = NULL; - struct vm_area_struct *adjust = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; - bool left_side = vma && start == vma->vm_start; - bool right_side = vma && end == vma->vm_end; + bool left_side = middle && start == middle->vm_start; + bool right_side = middle && end == middle->vm_end; int err = 0; - long adj_start = 0; - bool merge_will_delete_vma, merge_will_delete_next; bool merge_left, merge_right, merge_both; - bool expanded; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */ + VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); VM_WARN_ON_VMG(start >= end, vmg); /* - * If vma == prev, then we are offset into a VMA. Otherwise, if we are + * If middle == prev, then we are offset into a VMA. Otherwise, if we are * not, we must span a portion of the VMA. */ - VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) || - vmg->end > vma->vm_end), vmg); - /* The vmi must be positioned within vmg->vma. */ - VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && - vma_iter_addr(vmg->vmi) < vma->vm_end), vmg); + VM_WARN_ON_VMG(middle && + ((middle != prev && vmg->start != middle->vm_start) || + vmg->end > middle->vm_end), vmg); + /* The vmi must be positioned within vmg->middle. */ + VM_WARN_ON_VMG(middle && + !(vma_iter_addr(vmg->vmi) >= middle->vm_start && + vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); vmg->state = VMA_MERGE_NOMERGE; @@ -776,49 +803,52 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ - merge_will_delete_vma = left_side && right_side; + vmg->__remove_middle = left_side && right_side; /* - * If we need to remove vma in its entirety but are unable to do so, + * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ - if (merge_will_delete_vma && !can_merge_remove_vma(vma)) + if (vmg->__remove_middle && !can_merge_remove_vma(middle)) return NULL; /* * If we merge both VMAs, then next is also deleted. This implies * merge_will_delete_vma also. */ - merge_will_delete_next = merge_both; + vmg->__remove_next = merge_both; /* * If we cannot delete next, then we can reduce the operation to merging - * prev and vma (thereby deleting vma). + * prev and middle (thereby deleting middle). */ - if (merge_will_delete_next && !can_merge_remove_vma(next)) { - merge_will_delete_next = false; + if (vmg->__remove_next && !can_merge_remove_vma(next)) { + vmg->__remove_next = false; merge_right = false; merge_both = false; } - /* No matter what happens, we will be adjusting vma. */ - vma_start_write(vma); - - if (merge_left) - vma_start_write(prev); + /* No matter what happens, we will be adjusting middle. */ + vma_start_write(middle); - if (merge_right) + if (merge_right) { vma_start_write(next); + vmg->target = next; + } + + if (merge_left) { + vma_start_write(prev); + vmg->target = prev; + } if (merge_both) { /* - * |<----->| - * |-------*********-------| - * prev vma next - * extend delete delete + * |<-------------------->| + * |-------********-------| + * prev middle next + * extend delete delete */ - vmg->vma = prev; vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; @@ -826,80 +856,62 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* * We already ensured anon_vma compatibility above, so now it's * simply a case of, if prev has no anon_vma object, which of - * next or vma contains the anon_vma we must duplicate. + * next or middle contains the anon_vma we must duplicate. */ - err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup); + err = dup_anon_vma(prev, next->anon_vma ? next : middle, + &anon_dup); } else if (merge_left) { /* - * |<----->| OR - * |<--------->| + * |<------------>| OR + * |<----------------->| * |-------************* - * prev vma + * prev middle * extend shrink/delete */ - vmg->vma = prev; vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - if (!merge_will_delete_vma) { - adjust = vma; - adj_start = vmg->end - vma->vm_start; - } + if (!vmg->__remove_middle) + vmg->__adjust_middle_start = true; - err = dup_anon_vma(prev, vma, &anon_dup); + err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ /* - * |<----->| OR - * |<--------->| + * |<------------->| OR + * |<----------------->| * *************-------| - * vma next + * middle next * shrink/delete extend */ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); VM_WARN_ON_VMG(!merge_right, vmg); - /* If we are offset into a VMA, then prev must be vma. */ - VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg); + /* If we are offset into a VMA, then prev must be middle. */ + VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); - if (merge_will_delete_vma) { - vmg->vma = next; + if (vmg->__remove_middle) { vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { - /* - * We shrink vma and expand next. - * - * IMPORTANT: This is the ONLY case where the final - * merged VMA is NOT vmg->vma, but rather vmg->next. - */ - - vmg->start = vma->vm_start; + /* We shrink middle and expand next. */ + vmg->__adjust_next_start = true; + vmg->start = middle->vm_start; vmg->end = start; - vmg->pgoff = vma->vm_pgoff; - - adjust = next; - adj_start = -(vma->vm_end - start); + vmg->pgoff = middle->vm_pgoff; } - err = dup_anon_vma(next, vma, &anon_dup); + err = dup_anon_vma(next, middle, &anon_dup); } if (err) goto abort; - /* - * In nearly all cases, we expand vmg->vma. There is one exception - - * merge_right where we partially span the VMA. In this case we shrink - * the end of vmg->vma and adjust the start of vmg->next accordingly. - */ - expanded = !merge_right || merge_will_delete_vma; + err = commit_merge(vmg); + if (err) { + VM_WARN_ON(err != -ENOMEM); - if (commit_merge(vmg, adjust, - merge_will_delete_vma ? vma : NULL, - merge_will_delete_next ? next : NULL, - adj_start, expanded)) { if (anon_dup) unlink_anon_vmas(anon_dup); @@ -907,11 +919,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( return NULL; } - res = merge_left ? prev : next; - khugepaged_enter_vma(res, vmg->flags); - + khugepaged_enter_vma(vmg->target, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return res; + return vmg->target; abort: vma_iter_set(vmg->vmi, start); @@ -970,10 +980,9 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) struct vm_area_struct *next = vmg->next; unsigned long end = vmg->end; bool can_merge_left, can_merge_right; - bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(vmg->vma, vmg); + VM_WARN_ON_VMG(vmg->middle, vmg); /* vmi must point at or before the gap. */ VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); @@ -984,18 +993,18 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) return NULL; can_merge_left = can_vma_merge_left(vmg); - can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left); + can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { vmg->end = next->vm_end; - vmg->vma = next; + vmg->middle = next; } /* If we can merge with the previous VMA, adjust vmg accordingly. */ if (can_merge_left) { vmg->start = prev->vm_start; - vmg->vma = prev; + vmg->middle = prev; vmg->pgoff = prev->vm_pgoff; /* @@ -1007,7 +1016,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) vmg->end = end; /* In expand-only case we are already positioned at prev. */ - if (!just_expand) { + if (!vmg->just_expand) { /* Equivalent to going to the previous range. */ vma_prev(vmg->vmi); } @@ -1017,10 +1026,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * Now try to expand adjacent VMA(s). This takes care of removing the * following VMA if we have VMAs on both sides. */ - if (vmg->vma && !vma_expand(vmg)) { - khugepaged_enter_vma(vmg->vma, vmg->flags); + if (vmg->middle && !vma_expand(vmg)) { + khugepaged_enter_vma(vmg->middle, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return vmg->vma; + return vmg->middle; } return NULL; @@ -1032,45 +1041,50 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * @vmg: Describes a VMA expansion operation. * * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. - * Will expand over vmg->next if it's different from vmg->vma and vmg->end == - * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with + * Will expand over vmg->next if it's different from vmg->middle and vmg->end == + * vmg->next->vm_end. Checking if the vmg->middle can expand and merge with * vmg->next needs to be handled by the caller. * * Returns: 0 on success. * * ASSUMPTIONS: - * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. - * - The caller must have set @vmg->vma and @vmg->next. + * - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock. + * - The caller must have set @vmg->middle and @vmg->next. */ int vma_expand(struct vma_merge_struct *vmg) { struct vm_area_struct *anon_dup = NULL; bool remove_next = false; - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *next = vmg->next; mmap_assert_write_locked(vmg->mm); - vma_start_write(vma); - if (next && (vma != next) && (vmg->end == next->vm_end)) { + vma_start_write(middle); + if (next && (middle != next) && (vmg->end == next->vm_end)) { int ret; remove_next = true; /* This should already have been checked by this point. */ VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg); vma_start_write(next); - ret = dup_anon_vma(vma, next, &anon_dup); + ret = dup_anon_vma(middle, next, &anon_dup); if (ret) return ret; } /* Not merging but overwriting any part of next is not handled. */ VM_WARN_ON_VMG(next && !remove_next && - next != vma && vmg->end > next->vm_start, vmg); + next != middle && vmg->end > next->vm_start, vmg); /* Only handles expanding */ - VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg); + VM_WARN_ON_VMG(middle->vm_start < vmg->start || + middle->vm_end > vmg->end, vmg); + + vmg->target = middle; + if (remove_next) + vmg->__remove_next = true; - if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) + if (commit_merge(vmg)) goto nomem; return 0; @@ -1110,7 +1124,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vma_prepare(&vp); - vma_adjust_trans_huge(vma, start, end, 0); + vma_adjust_trans_huge(vma, start, end, NULL); vma_iter_clear(vmi); vma_set_range(vma, start, end, pgoff); @@ -1199,7 +1213,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, /* Remove and clean up vmas */ mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - remove_vma(vma, /* unreachable = */ false); + remove_vma(vma); vm_unacct_memory(vms->nr_accounted); validate_mm(mm); @@ -1221,7 +1235,7 @@ static void reattach_vmas(struct ma_state *mas_detach) mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - vma_mark_detached(vma, false); + vma_mark_attached(vma); __mt_destroy(mas_detach->tree); } @@ -1296,7 +1310,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, if (error) goto munmap_gather_failed; - vma_mark_detached(next, true); + vma_mark_detached(next); nrpages = vma_pages(next); vms->nr_pages += nrpages; @@ -1508,7 +1522,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, */ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *vma = vmg->middle; unsigned long start = vmg->start; unsigned long end = vmg->end; struct vm_area_struct *merged; @@ -1609,7 +1623,7 @@ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); vmg.next = vma_iter_next_rewind(vmi, NULL); - vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ + vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ return vma_merge_new_range(&vmg); } @@ -1694,7 +1708,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) return -ENOMEM; vma_start_write(vma); - vma_iter_store(&vmi, vma); + vma_iter_store_new(&vmi, vma); vma_link_file(vma); mm->map_count++; validate_mm(mm); @@ -1730,7 +1744,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ - vmg.vma = NULL; /* New VMA range. */ + vmg.middle = NULL; /* New VMA range. */ vmg.pgoff = pgoff; vmg.next = vma_iter_next_rewind(&vmi, NULL); new_vma = vma_merge_new_range(&vmg); @@ -2373,7 +2387,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); - vma_iter_store(vmi, vma); + vma_iter_store_new(vmi, vma); map->mm->map_count++; vma_link_file(vma); @@ -2587,7 +2601,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, vmg.prev = vma; /* vmi is positioned at prev, which this mode expects. */ - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; if (vma_merge_new_range(&vmg)) goto out; @@ -2850,7 +2864,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); @@ -2930,7 +2944,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); @@ -58,35 +58,85 @@ enum vma_merge_state { VMA_MERGE_SUCCESS, }; -enum vma_merge_flags { - VMG_FLAG_DEFAULT = 0, - /* - * If we can expand, simply do so. We know there is nothing to merge to - * the right. Does not reset state upon failure to merge. The VMA - * iterator is assumed to be positioned at the previous VMA, rather than - * at the gap. - */ - VMG_FLAG_JUST_EXPAND = 1 << 0, -}; - -/* Represents a VMA merge operation. */ +/* + * Describes a VMA merge operation and is threaded throughout it. + * + * Any of the fields may be mutated by the merge operation, so no guarantees are + * made to the contents of this structure after a merge operation has completed. + */ struct vma_merge_struct { struct mm_struct *mm; struct vma_iterator *vmi; - pgoff_t pgoff; + /* + * Adjacent VMAs, any of which may be NULL if not present: + * + * |------|--------|------| + * | prev | middle | next | + * |------|--------|------| + * + * middle may not yet exist in the case of a proposed new VMA being + * merged, or it may be an existing VMA. + * + * next may be assigned by the caller. + */ struct vm_area_struct *prev; - struct vm_area_struct *next; /* Modified by vma_merge(). */ - struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ + struct vm_area_struct *middle; + struct vm_area_struct *next; + /* This is the VMA we ultimately target to become the merged VMA. */ + struct vm_area_struct *target; + /* + * Initially, the start, end, pgoff fields are provided by the caller + * and describe the proposed new VMA range, whether modifying an + * existing VMA (which will be 'middle'), or adding a new one. + * + * During the merge process these fields are updated to describe the new + * range _including those VMAs which will be merged_. + */ unsigned long start; unsigned long end; + pgoff_t pgoff; + unsigned long flags; struct file *file; struct anon_vma *anon_vma; struct mempolicy *policy; struct vm_userfaultfd_ctx uffd_ctx; struct anon_vma_name *anon_name; - enum vma_merge_flags merge_flags; enum vma_merge_state state; + + /* Flags which callers can use to modify merge behaviour: */ + + /* + * If we can expand, simply do so. We know there is nothing to merge to + * the right. Does not reset state upon failure to merge. The VMA + * iterator is assumed to be positioned at the previous VMA, rather than + * at the gap. + */ + bool just_expand :1; + + /* Internal flags set during merge process: */ + + /* + * Internal flag indicating the merge increases vmg->middle->vm_start + * (and thereby, vmg->prev->vm_end). + */ + bool __adjust_middle_start :1; + /* + * Internal flag indicating the merge decreases vmg->next->vm_start + * (and thereby, vmg->middle->vm_end). + */ + bool __adjust_next_start :1; + /* + * Internal flag used during the merge operation to indicate we will + * remove vmg->middle. + */ + bool __remove_middle :1; + /* + * Internal flag used during the merge operationr to indicate we will + * remove vmg->next. + */ + bool __remove_next :1; + }; static inline bool vmg_nomem(struct vma_merge_struct *vmg) @@ -110,7 +160,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .flags = flags_, \ .pgoff = pgoff_, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ @@ -118,8 +167,8 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .mm = vma_->vm_mm, \ .vmi = vmi_, \ .prev = prev_, \ + .middle = vma_, \ .next = NULL, \ - .vma = vma_, \ .start = start_, \ .end = end_, \ .flags = vma_->vm_flags, \ @@ -130,7 +179,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .uffd_ctx = vma_->vm_userfaultfd_ctx, \ .anon_name = anon_vma_name(vma_), \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE @@ -157,6 +205,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } @@ -169,7 +218,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); -void remove_vma(struct vm_area_struct *vma, bool unreachable); +void remove_vma(struct vm_area_struct *vma); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); @@ -364,9 +413,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) } /* Store a VMA with preallocated memory */ -static inline void vma_iter_store(struct vma_iterator *vmi, - struct vm_area_struct *vma) +static inline void vma_iter_store_overwrite(struct vma_iterator *vmi, + struct vm_area_struct *vma) { + vma_assert_attached(vma); #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && @@ -391,6 +441,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi, mas_store_prealloc(&vmi->mas, vma); } +static inline void vma_iter_store_new(struct vma_iterator *vmi, + struct vm_area_struct *vma) +{ + vma_mark_attached(vma); + vma_iter_store_overwrite(vmi, vma); +} + static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) { return vmi->mas.index; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 61981ee1c9d2..3ed720a787ec 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3771,8 +3771,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, struct vm_struct *area; void *ret; kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; - unsigned long real_size = size; - unsigned long real_align = align; + unsigned long original_align = align; unsigned int shift = PAGE_SHIFT; if (WARN_ON_ONCE(!size)) @@ -3781,7 +3780,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, exceeds total pages", - real_size); + size); return NULL; } @@ -3798,19 +3797,18 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, else shift = arch_vmap_pte_supported_shift(size); - align = max(real_align, 1UL << shift); - size = ALIGN(real_size, 1UL << shift); + align = max(original_align, 1UL << shift); } again: - area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | + area = __get_vm_area_node(size, align, shift, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) { bool nofail = gfp_mask & __GFP_NOFAIL; warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, vm_struct allocation failed%s", - real_size, (nofail) ? ". Retrying." : ""); + size, (nofail) ? ". Retrying." : ""); if (nofail) { schedule_timeout_uninterruptible(1); goto again; @@ -3860,7 +3858,7 @@ again: (gfp_mask & __GFP_SKIP_ZERO)) kasan_flags |= KASAN_VMALLOC_INIT; /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ - area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); + area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -3869,17 +3867,15 @@ again: */ clear_vm_uninitialized_flag(area); - size = PAGE_ALIGN(size); if (!(vm_flags & VM_DEFER_KMEMLEAK)) - kmemleak_vmalloc(area, size, gfp_mask); + kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); return area->addr; fail: if (shift > PAGE_SHIFT) { shift = PAGE_SHIFT; - align = real_align; - size = real_size; + align = original_align; goto again; } diff --git a/mm/vmscan.c b/mm/vmscan.c index eb228a8cd769..b620d74b0f66 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -271,6 +271,25 @@ static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) } #endif +/* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to + * and including the specified highidx + * @zone: The current zone in the iterator + * @pgdat: The pgdat which node_zones are being iterated + * @idx: The index variable + * @highidx: The index of the highest zone to return + * + * This macro iterates through all managed zones up to and including the specified highidx. + * The zone iterator enters an invalid state after macro call and must be reinitialized + * before it can be used again. + */ +#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \ + for ((idx) = 0, (zone) = (pgdat)->node_zones; \ + (idx) <= (highidx); \ + (idx)++, (zone)++) \ + if (!managed_zone(zone)) \ + continue; \ + else + static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) { @@ -396,13 +415,9 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, { unsigned long size = 0; int zid; + struct zone *zone; - for (zid = 0; zid <= zone_idx; zid++) { - struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) { if (!mem_cgroup_disabled()) size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); else @@ -441,21 +456,26 @@ void drop_slab(void) } while ((freed >> shift++) > 1); } -static int reclaimer_offset(void) +#define CHECK_RECLAIMER_OFFSET(type) \ + do { \ + BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \ + PGDEMOTE_##type - PGDEMOTE_KSWAPD); \ + BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \ + PGSCAN_##type - PGSCAN_KSWAPD); \ + } while (0) + +static int reclaimer_offset(struct scan_control *sc) { - BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != - PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); - BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != - PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); - BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != - PGSCAN_DIRECT - PGSCAN_KSWAPD); - BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != - PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); + CHECK_RECLAIMER_OFFSET(DIRECT); + CHECK_RECLAIMER_OFFSET(KHUGEPAGED); + CHECK_RECLAIMER_OFFSET(PROACTIVE); if (current_is_kswapd()) return 0; if (current_is_khugepaged()) return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; + if (sc->proactive) + return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD; return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; } @@ -495,7 +515,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat) { int reclaimable = 0, write_pending = 0; int i; - + struct zone *zone; /* * If kswapd is disabled, reschedule if necessary but do not * throttle as the system is likely near OOM. @@ -508,12 +528,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat) * throttle as throttling will occur when the folios cycle * towards the end of the LRU if still under writeback. */ - for (i = 0; i < MAX_NR_ZONES; i++) { - struct zone *zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) { reclaimable += zone_reclaimable_pages(zone); write_pending += zone_page_state_snapshot(zone, NR_ZONE_WRITE_PENDING); @@ -769,7 +784,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); __delete_from_swap_cache(folio, swap, shadow); - mem_cgroup_swapout(folio, swap); + memcg1_swapout(folio, swap); xa_unlock_irq(&mapping->i_pages); put_swap_folio(folio, swap); } else { @@ -1112,6 +1127,13 @@ retry: if (!folio_trylock(folio)) goto keep; + if (folio_contain_hwpoisoned_page(folio)) { + unmap_poisoned_folio(folio, folio_pfn(folio), false); + folio_unlock(folio); + folio_put(folio); + continue; + } + VM_BUG_ON_FOLIO(folio_test_active(folio), folio); nr_pages = folio_nr_pages(folio); @@ -1279,7 +1301,7 @@ retry: split_folio_to_list(folio, folio_list)) goto activate_locked; } - if (!add_to_swap(folio)) { + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) { int __maybe_unused order = folio_order(folio); if (!folio_test_large(folio)) @@ -1295,9 +1317,21 @@ retry: } #endif count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); - if (!add_to_swap(folio)) + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) goto activate_locked_split; } + /* + * Normally the folio will be dirtied in unmap because its + * pte should be dirty. A special case is MADV_FREE page. The + * page's pte could have dirty bit cleared but the folio's + * SwapBacked flag is still set because clearing the dirty bit + * and SwapBacked flag has no lock protected. For such folio, + * unmap will not set dirty bit for it, so folio reclaim will + * not write the folio out. This can cause data corruption when + * the folio is swapped in later. Always setting the dirty flag + * for the folio solves the problem. + */ + folio_mark_dirty(folio); } } @@ -1986,7 +2020,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, &nr_scanned, sc, lru); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); - item = PGSCAN_KSWAPD + reclaimer_offset(); + item = PGSCAN_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_scanned); __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); @@ -2002,10 +2036,10 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, spin_lock_irq(&lruvec->lru_lock); move_folios_to_lru(lruvec, &folio_list); - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), + __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); - item = PGSTEAL_KSWAPD + reclaimer_offset(); + item = PGSTEAL_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_reclaimed); __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); @@ -2372,17 +2406,13 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) unsigned long total_high_wmark = 0; unsigned long free, anon; int z; + struct zone *zone; free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); file = node_page_state(pgdat, NR_ACTIVE_FILE) + node_page_state(pgdat, NR_INACTIVE_FILE); - for (z = 0; z < MAX_NR_ZONES; z++) { - struct zone *zone = &pgdat->node_zones[z]; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) { total_high_wmark += high_wmark_pages(zone); } @@ -2400,6 +2430,43 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) } } +static inline void calculate_pressure_balance(struct scan_control *sc, + int swappiness, u64 *fraction, u64 *denominator) +{ + unsigned long anon_cost, file_cost, total_cost; + unsigned long ap, fp; + + /* + * Calculate the pressure balance between anon and file pages. + * + * The amount of pressure we put on each LRU is inversely + * proportional to the cost of reclaiming each list, as + * determined by the share of pages that are refaulting, times + * the relative IO cost of bringing back a swapped out + * anonymous page vs reloading a filesystem page (swappiness). + * + * Although we limit that influence to ensure no list gets + * left behind completely: at least a third of the pressure is + * applied, before swappiness. + * + * With swappiness at 100, anon and file have equal IO cost. + */ + total_cost = sc->anon_cost + sc->file_cost; + anon_cost = total_cost + sc->anon_cost; + file_cost = total_cost + sc->file_cost; + total_cost = anon_cost + file_cost; + + ap = swappiness * (total_cost + 1); + ap /= anon_cost + 1; + + fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); + fp /= file_cost + 1; + + fraction[WORKINGSET_ANON] = ap; + fraction[WORKINGSET_FILE] = fp; + *denominator = ap + fp; +} + /* * Determine how aggressively the anon and file LRU lists should be * scanned. @@ -2412,12 +2479,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, { struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); - unsigned long anon_cost, file_cost, total_cost; int swappiness = sc_swappiness(sc, memcg); u64 fraction[ANON_AND_FILE]; u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; - unsigned long ap, fp; enum lru_list lru; /* If we have no swap space, do not bother scanning anon folios. */ @@ -2466,35 +2531,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, } scan_balance = SCAN_FRACT; - /* - * Calculate the pressure balance between anon and file pages. - * - * The amount of pressure we put on each LRU is inversely - * proportional to the cost of reclaiming each list, as - * determined by the share of pages that are refaulting, times - * the relative IO cost of bringing back a swapped out - * anonymous page vs reloading a filesystem page (swappiness). - * - * Although we limit that influence to ensure no list gets - * left behind completely: at least a third of the pressure is - * applied, before swappiness. - * - * With swappiness at 100, anon and file have equal IO cost. - */ - total_cost = sc->anon_cost + sc->file_cost; - anon_cost = total_cost + sc->anon_cost; - file_cost = total_cost + sc->file_cost; - total_cost = anon_cost + file_cost; + calculate_pressure_balance(sc, swappiness, fraction, &denominator); - ap = swappiness * (total_cost + 1); - ap /= anon_cost + 1; - - fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); - fp /= file_cost + 1; - - fraction[0] = ap; - fraction[1] = fp; - denominator = ap + fp; out: for_each_evictable_lru(lru) { bool file = is_file_lru(lru); @@ -4545,7 +4583,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, break; } - item = PGSCAN_KSWAPD + reclaimer_offset(); + item = PGSCAN_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) { __count_vm_events(item, isolated); __count_vm_events(PGREFILL, sorted); @@ -4695,10 +4733,10 @@ retry: reset_batch_size(walk); } - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), + __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); - item = PGSTEAL_KSWAPD + reclaimer_offset(); + item = PGSTEAL_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, reclaimed); __count_memcg_events(memcg, item, reclaimed); @@ -5843,6 +5881,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, unsigned long pages_for_compaction; unsigned long inactive_lru_pages; int z; + struct zone *zone; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) @@ -5862,17 +5901,16 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return false; /* If compaction would go ahead or the allocation would succeed, stop */ - for (z = 0; z <= sc->reclaim_idx; z++) { - struct zone *zone = &pgdat->node_zones[z]; - if (!managed_zone(zone)) - continue; + for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { + unsigned long watermark = min_wmark_pages(zone); /* Allocation can already succeed, nothing to do */ - if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + if (zone_watermark_ok(zone, sc->order, watermark, sc->reclaim_idx, 0)) return false; - if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) + if (compaction_suitable(zone, sc->order, watermark, + sc->reclaim_idx)) return false; } @@ -6099,22 +6137,21 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) sc->reclaim_idx, 0)) return true; - /* Compaction cannot yet proceed. Do reclaim. */ - if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) - return false; - /* - * Compaction is already possible, but it takes time to run and there - * are potentially other callers using the pages just freed. So proceed - * with reclaim to make a buffer of free pages available to give - * compaction a reasonable chance of completing and allocating the page. + * Direct reclaim usually targets the min watermark, but compaction + * takes time to run and there are potentially other callers using the + * pages just freed. So target a higher buffer to give compaction a + * reasonable chance of completing and allocating the pages. + * * Note that we won't actually reclaim the whole buffer in one attempt * as the target watermark in should_continue_reclaim() is lower. But if * we are already above the high+gap watermark, don't reclaim at all. */ - watermark = high_wmark_pages(zone) + compact_gap(sc->order); + watermark = high_wmark_pages(zone); + if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx)) + return true; - return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); + return false; } static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) @@ -6393,11 +6430,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) return true; - for (i = 0; i <= ZONE_NORMAL; i++) { - zone = &pgdat->node_zones[i]; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) { if (!zone_reclaimable_pages(zone)) continue; @@ -6702,17 +6735,25 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) * Check watermarks bottom-up as lower zones are more likely to * meet watermarks. */ - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { + unsigned long free_pages; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) mark = promo_wmark_pages(zone); else mark = high_wmark_pages(zone); - if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) + + /* + * In defrag_mode, watermarks must be met in whole + * blocks to avoid polluting allocator fallbacks. + */ + if (defrag_mode) + free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); + else + free_pages = zone_page_state(zone, NR_FREE_PAGES); + + if (__zone_watermark_ok(zone, order, mark, highest_zoneidx, + 0, free_pages)) return true; } @@ -6792,11 +6833,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, /* Reclaim a number of pages proportional to the number of zones */ sc->nr_to_reclaim = 0; - for (z = 0; z <= sc->reclaim_idx; z++) { - zone = pgdat->node_zones + z; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); } @@ -6827,12 +6864,7 @@ update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) int i; struct zone *zone; - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { if (active) set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); else @@ -6893,11 +6925,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) * stall or direct reclaim until kswapd is finished. */ nr_boost_reclaim = 0; - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { nr_boost_reclaim += zone->watermark_boost; zone_boosts[i] = zone->watermark_boost; } @@ -7599,11 +7627,11 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) return NODE_RECLAIM_NOSCAN; - if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) + if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); - clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); + clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); if (ret) count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); diff --git a/mm/vmstat.c b/mm/vmstat.c index 651318765ebf..4c268ce39ff2 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1193,6 +1193,7 @@ int fragmentation_index(struct zone *zone, unsigned int order) const char * const vmstat_text[] = { /* enum zone_stat_item counters */ "nr_free_pages", + "nr_free_pages_blocks", "nr_zone_inactive_anon", "nr_zone_active_anon", "nr_zone_inactive_file", @@ -1276,9 +1277,11 @@ const char * const vmstat_text[] = { "pgdemote_kswapd", "pgdemote_direct", "pgdemote_khugepaged", + "pgdemote_proactive", #ifdef CONFIG_HUGETLB_PAGE "nr_hugetlb", #endif + "nr_balloon_pages", /* system-wide enum vm_stat_item counters */ "nr_dirty_threshold", "nr_dirty_background_threshold", @@ -1310,9 +1313,11 @@ const char * const vmstat_text[] = { "pgsteal_kswapd", "pgsteal_direct", "pgsteal_khugepaged", + "pgsteal_proactive", "pgscan_kswapd", "pgscan_direct", "pgscan_khugepaged", + "pgscan_proactive", "pgscan_direct_throttle", "pgscan_anon", "pgscan_file", diff --git a/mm/z3fold.c b/mm/z3fold.c deleted file mode 100644 index 379d24b4fef9..000000000000 --- a/mm/z3fold.c +++ /dev/null @@ -1,1447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * z3fold.c - * - * Author: Vitaly Wool <vitaly.wool@konsulko.com> - * Copyright (C) 2016, Sony Mobile Communications Inc. - * - * This implementation is based on zbud written by Seth Jennings. - * - * z3fold is an special purpose allocator for storing compressed pages. It - * can store up to three compressed pages per page which improves the - * compression ratio of zbud while retaining its main concepts (e. g. always - * storing an integral number of objects per page) and simplicity. - * It still has simple and deterministic reclaim properties that make it - * preferable to a higher density approach (with no requirement on integral - * number of object per page) when reclaim is used. - * - * As in zbud, pages are divided into "chunks". The size of the chunks is - * fixed at compile time and is determined by NCHUNKS_ORDER below. - * - * z3fold doesn't export any API and is meant to be used via zpool API. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/atomic.h> -#include <linux/sched.h> -#include <linux/cpumask.h> -#include <linux/list.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/page-flags.h> -#include <linux/migrate.h> -#include <linux/node.h> -#include <linux/compaction.h> -#include <linux/percpu.h> -#include <linux/preempt.h> -#include <linux/workqueue.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/zpool.h> -#include <linux/kmemleak.h> - -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks - * in the beginning of an allocated page are occupied by z3fold header, so - * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), - * which shows the max number of free chunks in z3fold page, also there will - * be 63, or 62, respectively, freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) -#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) -#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) -#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS) - -#define BUDDY_MASK (0x3) -#define BUDDY_SHIFT 2 -#define SLOTS_ALIGN (0x40) - -/***************** - * Structures -*****************/ -struct z3fold_pool; - -enum buddy { - HEADLESS = 0, - FIRST, - MIDDLE, - LAST, - BUDDIES_MAX = LAST -}; - -struct z3fold_buddy_slots { - /* - * we are using BUDDY_MASK in handle_to_buddy etc. so there should - * be enough slots to hold all possible variants - */ - unsigned long slot[BUDDY_MASK + 1]; - unsigned long pool; /* back link */ - rwlock_t lock; -}; -#define HANDLE_FLAG_MASK (0x03) - -/* - * struct z3fold_header - z3fold page metadata occupying first chunks of each - * z3fold page, except for HEADLESS pages - * @buddy: links the z3fold page into the relevant list in the - * pool - * @page_lock: per-page lock - * @refcount: reference count for the z3fold page - * @work: work_struct for page layout optimization - * @slots: pointer to the structure holding buddy slots - * @pool: pointer to the containing pool - * @cpu: CPU which this page "belongs" to - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @middle_chunks: the size of the middle buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - * @first_num: the starting number (for the first handle) - * @mapped_count: the number of objects currently mapped - */ -struct z3fold_header { - struct list_head buddy; - spinlock_t page_lock; - struct kref refcount; - struct work_struct work; - struct z3fold_buddy_slots *slots; - struct z3fold_pool *pool; - short cpu; - unsigned short first_chunks; - unsigned short middle_chunks; - unsigned short last_chunks; - unsigned short start_middle; - unsigned short first_num:2; - unsigned short mapped_count:2; - unsigned short foreign_handles:2; -}; - -/** - * struct z3fold_pool - stores metadata for each z3fold pool - * @name: pool name - * @lock: protects pool unbuddied lists - * @stale_lock: protects pool stale page list - * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- - * buddies; the list each z3fold page is added to depends on - * the size of its free region. - * @stale: list of pages marked for freeing - * @pages_nr: number of z3fold pages in the pool. - * @c_handle: cache for z3fold_buddy_slots allocation - * @compact_wq: workqueue for page layout background optimization - * @release_wq: workqueue for safe page release - * @work: work_struct for safe page release - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular z3fold pool. - */ -struct z3fold_pool { - const char *name; - spinlock_t lock; - spinlock_t stale_lock; - struct list_head __percpu *unbuddied; - struct list_head stale; - atomic64_t pages_nr; - struct kmem_cache *c_handle; - struct workqueue_struct *compact_wq; - struct workqueue_struct *release_wq; - struct work_struct work; -}; - -/* - * Internal z3fold page flags - */ -enum z3fold_page_flags { - PAGE_HEADLESS = 0, - MIDDLE_CHUNK_MAPPED, - NEEDS_COMPACTING, - PAGE_STALE, - PAGE_CLAIMED, /* by either reclaim or free */ - PAGE_MIGRATED, /* page is migrated and soon to be released */ -}; - -/* - * handle flags, go under HANDLE_FLAG_MASK - */ -enum z3fold_handle_flags { - HANDLES_NOFREE = 0, -}; - -/* - * Forward declarations - */ -static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); -static void compact_page_work(struct work_struct *w); - -/***************** - * Helpers -*****************/ - -/* Converts an allocation size in bytes to size in z3fold chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, - gfp_t gfp) -{ - struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, - gfp); - - if (slots) { - /* It will be freed separately in free_handle(). */ - kmemleak_not_leak(slots); - slots->pool = (unsigned long)pool; - rwlock_init(&slots->lock); - } - - return slots; -} - -static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) -{ - return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); -} - -static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) -{ - return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); -} - -/* Lock a z3fold page */ -static inline void z3fold_page_lock(struct z3fold_header *zhdr) -{ - spin_lock(&zhdr->page_lock); -} - -/* Try to lock a z3fold page */ -static inline int z3fold_page_trylock(struct z3fold_header *zhdr) -{ - return spin_trylock(&zhdr->page_lock); -} - -/* Unlock a z3fold page */ -static inline void z3fold_page_unlock(struct z3fold_header *zhdr) -{ - spin_unlock(&zhdr->page_lock); -} - -/* return locked z3fold page if it's not headless */ -static inline struct z3fold_header *get_z3fold_header(unsigned long handle) -{ - struct z3fold_buddy_slots *slots; - struct z3fold_header *zhdr; - int locked = 0; - - if (!(handle & (1 << PAGE_HEADLESS))) { - slots = handle_to_slots(handle); - do { - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - locked = z3fold_page_trylock(zhdr); - read_unlock(&slots->lock); - if (locked) { - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_MIGRATED, &page->private)) - break; - z3fold_page_unlock(zhdr); - } - cpu_relax(); - } while (true); - } else { - zhdr = (struct z3fold_header *)(handle & PAGE_MASK); - } - - return zhdr; -} - -static inline void put_z3fold_header(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_HEADLESS, &page->private)) - z3fold_page_unlock(zhdr); -} - -static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) -{ - struct z3fold_buddy_slots *slots; - int i; - bool is_free; - - if (WARN_ON(*(unsigned long *)handle == 0)) - return; - - slots = handle_to_slots(handle); - write_lock(&slots->lock); - *(unsigned long *)handle = 0; - - if (test_bit(HANDLES_NOFREE, &slots->pool)) { - write_unlock(&slots->lock); - return; /* simple case, nothing else to do */ - } - - if (zhdr->slots != slots) - zhdr->foreign_handles--; - - is_free = true; - for (i = 0; i <= BUDDY_MASK; i++) { - if (slots->slot[i]) { - is_free = false; - break; - } - } - write_unlock(&slots->lock); - - if (is_free) { - struct z3fold_pool *pool = slots_to_pool(slots); - - if (zhdr->slots == slots) - zhdr->slots = NULL; - kmem_cache_free(pool->c_handle, slots); - } -} - -/* Initializes the z3fold header of a newly allocated z3fold page */ -static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, - struct z3fold_pool *pool, gfp_t gfp) -{ - struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots; - - clear_bit(PAGE_HEADLESS, &page->private); - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - clear_bit(PAGE_STALE, &page->private); - clear_bit(PAGE_CLAIMED, &page->private); - clear_bit(PAGE_MIGRATED, &page->private); - if (headless) - return zhdr; - - slots = alloc_slots(pool, gfp); - if (!slots) - return NULL; - - memset(zhdr, 0, sizeof(*zhdr)); - spin_lock_init(&zhdr->page_lock); - kref_init(&zhdr->refcount); - zhdr->cpu = -1; - zhdr->slots = slots; - zhdr->pool = pool; - INIT_LIST_HEAD(&zhdr->buddy); - INIT_WORK(&zhdr->work, compact_page_work); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_z3fold_page(struct page *page, bool headless) -{ - if (!headless) { - lock_page(page); - __ClearPageMovable(page); - unlock_page(page); - } - __free_page(page); -} - -/* Helper function to build the index */ -static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) -{ - return (bud + zhdr->first_num) & BUDDY_MASK; -} - -/* - * Encodes the handle of a particular buddy within a z3fold page. - * Zhdr->page_lock should be held as this function accesses first_num - * if bud != HEADLESS. - */ -static unsigned long __encode_handle(struct z3fold_header *zhdr, - struct z3fold_buddy_slots *slots, - enum buddy bud) -{ - unsigned long h = (unsigned long)zhdr; - int idx = 0; - - /* - * For a headless page, its handle is its pointer with the extra - * PAGE_HEADLESS bit set - */ - if (bud == HEADLESS) - return h | (1 << PAGE_HEADLESS); - - /* otherwise, return pointer to encoded handle */ - idx = __idx(zhdr, bud); - h += idx; - if (bud == LAST) - h |= (zhdr->last_chunks << BUDDY_SHIFT); - - write_lock(&slots->lock); - slots->slot[idx] = h; - write_unlock(&slots->lock); - return (unsigned long)&slots->slot[idx]; -} - -static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) -{ - return __encode_handle(zhdr, zhdr->slots, bud); -} - -/* only for LAST bud, returns zero otherwise */ -static unsigned short handle_to_chunks(unsigned long handle) -{ - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; -} - -/* - * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle - * but that doesn't matter. because the masking will result in the - * correct buddy number. - */ -static enum buddy handle_to_buddy(unsigned long handle) -{ - struct z3fold_header *zhdr; - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - WARN_ON(handle & (1 << PAGE_HEADLESS)); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - return (addr - zhdr->first_num) & BUDDY_MASK; -} - -static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) -{ - return zhdr->pool; -} - -static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) -{ - struct page *page = virt_to_page(zhdr); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - WARN_ON(!list_empty(&zhdr->buddy)); - set_bit(PAGE_STALE, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - spin_lock(&pool->lock); - spin_unlock(&pool->lock); - - if (locked) - z3fold_page_unlock(zhdr); - - spin_lock(&pool->stale_lock); - list_add(&zhdr->buddy, &pool->stale); - queue_work(pool->release_wq, &pool->work); - spin_unlock(&pool->stale_lock); - - atomic64_dec(&pool->pages_nr); -} - -static void release_z3fold_page_locked(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static void release_z3fold_page_locked_list(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static inline int put_z3fold_locked(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked); -} - -static inline int put_z3fold_locked_list(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked_list); -} - -static void free_pages_work(struct work_struct *w) -{ - struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); - - spin_lock(&pool->stale_lock); - while (!list_empty(&pool->stale)) { - struct z3fold_header *zhdr = list_first_entry(&pool->stale, - struct z3fold_header, buddy); - struct page *page = virt_to_page(zhdr); - - list_del(&zhdr->buddy); - if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) - continue; - spin_unlock(&pool->stale_lock); - cancel_work_sync(&zhdr->work); - free_z3fold_page(page, false); - cond_resched(); - spin_lock(&pool->stale_lock); - } - spin_unlock(&pool->stale_lock); -} - -/* - * Returns the number of free chunks in a z3fold page. - * NB: can't be used with HEADLESS pages. - */ -static int num_free_chunks(struct z3fold_header *zhdr) -{ - int nfree; - /* - * If there is a middle object, pick up the bigger free space - * either before or after it. Otherwise just subtract the number - * of chunks occupied by the first and the last objects. - */ - if (zhdr->middle_chunks != 0) { - int nfree_before = zhdr->first_chunks ? - 0 : zhdr->start_middle - ZHDR_CHUNKS; - int nfree_after = zhdr->last_chunks ? - 0 : TOTAL_CHUNKS - - (zhdr->start_middle + zhdr->middle_chunks); - nfree = max(nfree_before, nfree_after); - } else - nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; - return nfree; -} - -/* Add to the appropriate unbuddied list */ -static inline void add_to_unbuddied(struct z3fold_pool *pool, - struct z3fold_header *zhdr) -{ - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || - zhdr->middle_chunks == 0) { - struct list_head *unbuddied; - int freechunks = num_free_chunks(zhdr); - - migrate_disable(); - unbuddied = this_cpu_ptr(pool->unbuddied); - spin_lock(&pool->lock); - list_add(&zhdr->buddy, &unbuddied[freechunks]); - spin_unlock(&pool->lock); - zhdr->cpu = smp_processor_id(); - migrate_enable(); - } -} - -static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) -{ - enum buddy bud = HEADLESS; - - if (zhdr->middle_chunks) { - if (!zhdr->first_chunks && - chunks <= zhdr->start_middle - ZHDR_CHUNKS) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - } else { - if (!zhdr->first_chunks) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - else - bud = MIDDLE; - } - - return bud; -} - -static inline void *mchunk_memmove(struct z3fold_header *zhdr, - unsigned short dst_chunk) -{ - void *beg = zhdr; - return memmove(beg + (dst_chunk << CHUNK_SHIFT), - beg + (zhdr->start_middle << CHUNK_SHIFT), - zhdr->middle_chunks << CHUNK_SHIFT); -} - -static inline bool buddy_single(struct z3fold_header *zhdr) -{ - return !((zhdr->first_chunks && zhdr->middle_chunks) || - (zhdr->first_chunks && zhdr->last_chunks) || - (zhdr->middle_chunks && zhdr->last_chunks)); -} - -static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - void *p = zhdr; - unsigned long old_handle = 0; - size_t sz = 0; - struct z3fold_header *new_zhdr = NULL; - int first_idx = __idx(zhdr, FIRST); - int middle_idx = __idx(zhdr, MIDDLE); - int last_idx = __idx(zhdr, LAST); - unsigned short *moved_chunks = NULL; - - /* - * No need to protect slots here -- all the slots are "local" and - * the page lock is already taken - */ - if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { - p += ZHDR_SIZE_ALIGNED; - sz = zhdr->first_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; - moved_chunks = &zhdr->first_chunks; - } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { - p += zhdr->start_middle << CHUNK_SHIFT; - sz = zhdr->middle_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; - moved_chunks = &zhdr->middle_chunks; - } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { - p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - sz = zhdr->last_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; - moved_chunks = &zhdr->last_chunks; - } - - if (sz > 0) { - enum buddy new_bud = HEADLESS; - short chunks = size_to_chunks(sz); - void *q; - - new_zhdr = __z3fold_alloc(pool, sz, false); - if (!new_zhdr) - return NULL; - - if (WARN_ON(new_zhdr == zhdr)) - goto out_fail; - - new_bud = get_free_buddy(new_zhdr, chunks); - q = new_zhdr; - switch (new_bud) { - case FIRST: - new_zhdr->first_chunks = chunks; - q += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - new_zhdr->middle_chunks = chunks; - new_zhdr->start_middle = - new_zhdr->first_chunks + ZHDR_CHUNKS; - q += new_zhdr->start_middle << CHUNK_SHIFT; - break; - case LAST: - new_zhdr->last_chunks = chunks; - q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); - break; - default: - goto out_fail; - } - new_zhdr->foreign_handles++; - memcpy(q, p, sz); - write_lock(&zhdr->slots->lock); - *(unsigned long *)old_handle = (unsigned long)new_zhdr + - __idx(new_zhdr, new_bud); - if (new_bud == LAST) - *(unsigned long *)old_handle |= - (new_zhdr->last_chunks << BUDDY_SHIFT); - write_unlock(&zhdr->slots->lock); - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - - *moved_chunks = 0; - } - - return new_zhdr; - -out_fail: - if (new_zhdr && !put_z3fold_locked(new_zhdr)) { - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - } - return NULL; - -} - -#define BIG_CHUNK_GAP 3 -/* Has to be called with lock held */ -static int z3fold_compact_page(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) - return 0; /* can't move middle chunk, it's used */ - - if (unlikely(PageIsolated(page))) - return 0; - - if (zhdr->middle_chunks == 0) - return 0; /* nothing to compact */ - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* move to the beginning */ - mchunk_memmove(zhdr, ZHDR_CHUNKS); - zhdr->first_chunks = zhdr->middle_chunks; - zhdr->middle_chunks = 0; - zhdr->start_middle = 0; - zhdr->first_num++; - return 1; - } - - /* - * moving data is expensive, so let's only do that if - * there's substantial gain (at least BIG_CHUNK_GAP chunks) - */ - if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && - zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= - BIG_CHUNK_GAP) { - mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - return 1; - } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && - TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle - + zhdr->middle_chunks) >= - BIG_CHUNK_GAP) { - unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - - zhdr->middle_chunks; - mchunk_memmove(zhdr, new_start); - zhdr->start_middle = new_start; - return 1; - } - - return 0; -} - -static void do_compact_page(struct z3fold_header *zhdr, bool locked) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - struct page *page; - - page = virt_to_page(zhdr); - if (locked) - WARN_ON(z3fold_page_trylock(zhdr)); - else - z3fold_page_lock(zhdr); - if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { - z3fold_page_unlock(zhdr); - return; - } - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - if (put_z3fold_locked(zhdr)) - return; - - if (test_bit(PAGE_STALE, &page->private) || - test_and_set_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - return; - } - - if (!zhdr->foreign_handles && buddy_single(zhdr) && - zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { - if (!put_z3fold_locked(zhdr)) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - } - return; - } - - z3fold_compact_page(zhdr); - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static void compact_page_work(struct work_struct *w) -{ - struct z3fold_header *zhdr = container_of(w, struct z3fold_header, - work); - - do_compact_page(zhdr, false); -} - -/* returns _locked_ z3fold page header or NULL */ -static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, - size_t size, bool can_sleep) -{ - struct z3fold_header *zhdr = NULL; - struct page *page; - struct list_head *unbuddied; - int chunks = size_to_chunks(size), i; - -lookup: - migrate_disable(); - /* First, try to find an unbuddied z3fold page. */ - unbuddied = this_cpu_ptr(pool->unbuddied); - for_each_unbuddied_list(i, chunks) { - struct list_head *l = &unbuddied[i]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr) - continue; - - /* Re-check under lock. */ - spin_lock(&pool->lock); - if (unlikely(zhdr != list_first_entry(READ_ONCE(l), - struct z3fold_header, buddy)) || - !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - - /* - * this page could not be removed from its unbuddied - * list while pool lock was held, and then we've taken - * page lock so kref_put could not be called before - * we got here, so it's safe to just call kref_get() - */ - kref_get(&zhdr->refcount); - break; - } - migrate_enable(); - - if (!zhdr) { - int cpu; - - /* look for _exact_ match on other cpus' lists */ - for_each_online_cpu(cpu) { - struct list_head *l; - - unbuddied = per_cpu_ptr(pool->unbuddied, cpu); - spin_lock(&pool->lock); - l = &unbuddied[chunks]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr || !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - continue; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - if (can_sleep) - cond_resched(); - continue; - } - kref_get(&zhdr->refcount); - break; - } - } - - if (zhdr && !zhdr->slots) { - zhdr->slots = alloc_slots(pool, GFP_ATOMIC); - if (!zhdr->slots) - goto out_fail; - } - return zhdr; - -out_fail: - if (!put_z3fold_locked(zhdr)) { - add_to_unbuddied(pool, zhdr); - z3fold_page_unlock(zhdr); - } - return NULL; -} - -/* - * API Functions - */ - -/** - * z3fold_create_pool() - create a new z3fold pool - * @name: pool name - * @gfp: gfp flags when allocating the z3fold pool structure - * - * Return: pointer to the new z3fold pool or NULL if the metadata allocation - * failed. - */ -static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) -{ - struct z3fold_pool *pool = NULL; - int i, cpu; - - pool = kzalloc(sizeof(struct z3fold_pool), gfp); - if (!pool) - goto out; - pool->c_handle = kmem_cache_create("z3fold_handle", - sizeof(struct z3fold_buddy_slots), - SLOTS_ALIGN, 0, NULL); - if (!pool->c_handle) - goto out_c; - spin_lock_init(&pool->lock); - spin_lock_init(&pool->stale_lock); - pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, - __alignof__(struct list_head)); - if (!pool->unbuddied) - goto out_pool; - for_each_possible_cpu(cpu) { - struct list_head *unbuddied = - per_cpu_ptr(pool->unbuddied, cpu); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&unbuddied[i]); - } - INIT_LIST_HEAD(&pool->stale); - atomic64_set(&pool->pages_nr, 0); - pool->name = name; - pool->compact_wq = create_singlethread_workqueue(pool->name); - if (!pool->compact_wq) - goto out_unbuddied; - pool->release_wq = create_singlethread_workqueue(pool->name); - if (!pool->release_wq) - goto out_wq; - INIT_WORK(&pool->work, free_pages_work); - return pool; - -out_wq: - destroy_workqueue(pool->compact_wq); -out_unbuddied: - free_percpu(pool->unbuddied); -out_pool: - kmem_cache_destroy(pool->c_handle); -out_c: - kfree(pool); -out: - return NULL; -} - -/** - * z3fold_destroy_pool() - destroys an existing z3fold pool - * @pool: the z3fold pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void z3fold_destroy_pool(struct z3fold_pool *pool) -{ - kmem_cache_destroy(pool->c_handle); - - /* - * We need to destroy pool->compact_wq before pool->release_wq, - * as any pending work on pool->compact_wq will call - * queue_work(pool->release_wq, &pool->work). - * - * There are still outstanding pages until both workqueues are drained, - * so we cannot unregister migration until then. - */ - - destroy_workqueue(pool->compact_wq); - destroy_workqueue(pool->release_wq); - free_percpu(pool->unbuddied); - kfree(pool); -} - -static const struct movable_operations z3fold_mops; - -/** - * z3fold_alloc() - allocates a region of a given size - * @pool: z3fold pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks = size_to_chunks(size); - struct z3fold_header *zhdr = NULL; - struct page *page = NULL; - enum buddy bud; - bool can_sleep = gfpflags_allow_blocking(gfp); - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - - if (size > PAGE_SIZE) - return -ENOSPC; - - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - bud = HEADLESS; - else { -retry: - zhdr = __z3fold_alloc(pool, size, can_sleep); - if (zhdr) { - bud = get_free_buddy(zhdr, chunks); - if (bud == HEADLESS) { - if (!put_z3fold_locked(zhdr)) - z3fold_page_unlock(zhdr); - pr_err("No free chunks in unbuddied\n"); - WARN_ON(1); - goto retry; - } - page = virt_to_page(zhdr); - goto found; - } - bud = FIRST; - } - - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - - zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); - if (!zhdr) { - __free_page(page); - return -ENOMEM; - } - atomic64_inc(&pool->pages_nr); - - if (bud == HEADLESS) { - set_bit(PAGE_HEADLESS, &page->private); - goto headless; - } - if (can_sleep) { - lock_page(page); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } else { - WARN_ON(!trylock_page(page)); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } - z3fold_page_lock(zhdr); - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else if (bud == LAST) - zhdr->last_chunks = chunks; - else { - zhdr->middle_chunks = chunks; - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - } - add_to_unbuddied(pool, zhdr); - -headless: - spin_lock(&pool->lock); - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - if (bud != HEADLESS) - z3fold_page_unlock(zhdr); - - return 0; -} - -/** - * z3fold_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by z3fold_alloc() - * - * In the case that the z3fold page in which the allocation resides is under - * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function - * only sets the first|middle|last_chunks to 0. The page is actually freed - * once all buddies are evicted (see z3fold_reclaim_page() below). - */ -static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy bud; - bool page_claimed; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); - - if (test_bit(PAGE_HEADLESS, &page->private)) { - /* if a headless page is under reclaim, just leave. - * NB: we use test_and_set_bit for a reason: if the bit - * has not been set before, we release this page - * immediately so we don't care about its value any more. - */ - if (!page_claimed) { - put_z3fold_header(zhdr); - free_z3fold_page(page, true); - atomic64_dec(&pool->pages_nr); - } - return; - } - - /* Non-headless case */ - bud = handle_to_buddy(handle); - - switch (bud) { - case FIRST: - zhdr->first_chunks = 0; - break; - case MIDDLE: - zhdr->middle_chunks = 0; - break; - case LAST: - zhdr->last_chunks = 0; - break; - default: - pr_err("%s: unknown bud %d\n", __func__, bud); - WARN_ON(1); - put_z3fold_header(zhdr); - return; - } - - if (!page_claimed) - free_handle(handle, zhdr); - if (put_z3fold_locked_list(zhdr)) - return; - if (page_claimed) { - /* the page has not been claimed by us */ - put_z3fold_header(zhdr); - return; - } - if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { - clear_bit(PAGE_CLAIMED, &page->private); - put_z3fold_header(zhdr); - return; - } - if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { - zhdr->cpu = -1; - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - do_compact_page(zhdr, true); - return; - } - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); - put_z3fold_header(zhdr); -} - -/** - * z3fold_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * Extracts the buddy number from handle and constructs the pointer to the - * correct starting chunk within the page. - * - * Returns: a pointer to the mapped allocation - */ -static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - void *addr; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - addr = zhdr; - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - goto out; - - buddy = handle_to_buddy(handle); - switch (buddy) { - case FIRST: - addr += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - addr += zhdr->start_middle << CHUNK_SHIFT; - set_bit(MIDDLE_CHUNK_MAPPED, &page->private); - break; - case LAST: - addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); - break; - default: - pr_err("unknown buddy id %d\n", buddy); - WARN_ON(1); - addr = NULL; - break; - } - - if (addr) - zhdr->mapped_count++; -out: - put_z3fold_header(zhdr); - return addr; -} - -/** - * z3fold_unmap() - unmaps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return; - - buddy = handle_to_buddy(handle); - if (buddy == MIDDLE) - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - zhdr->mapped_count--; - put_z3fold_header(zhdr); -} - -/** - * z3fold_get_pool_pages() - gets the z3fold pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. - */ -static u64 z3fold_get_pool_pages(struct z3fold_pool *pool) -{ - return atomic64_read(&pool->pages_nr); -} - -static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(PageIsolated(page), page); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return false; - - zhdr = page_address(page); - z3fold_page_lock(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_STALE, &page->private)) - goto out; - - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) - goto out; - - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) - goto out; - pool = zhdr_to_pool(zhdr); - spin_lock(&pool->lock); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - kref_get(&zhdr->refcount); - z3fold_page_unlock(zhdr); - return true; - -out: - z3fold_page_unlock(zhdr); - return false; -} - -static int z3fold_page_migrate(struct page *newpage, struct page *page, - enum migrate_mode mode) -{ - struct z3fold_header *zhdr, *new_zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(!PageIsolated(page), page); - VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); - VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - if (!z3fold_page_trylock(zhdr)) - return -EAGAIN; - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - return -EBUSY; - } - if (work_pending(&zhdr->work)) { - z3fold_page_unlock(zhdr); - return -EAGAIN; - } - new_zhdr = page_address(newpage); - memcpy(new_zhdr, zhdr, PAGE_SIZE); - newpage->private = page->private; - set_bit(PAGE_MIGRATED, &page->private); - z3fold_page_unlock(zhdr); - spin_lock_init(&new_zhdr->page_lock); - INIT_WORK(&new_zhdr->work, compact_page_work); - /* - * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, - * so we only have to reinitialize it. - */ - INIT_LIST_HEAD(&new_zhdr->buddy); - __ClearPageMovable(page); - - get_page(newpage); - z3fold_page_lock(new_zhdr); - if (new_zhdr->first_chunks) - encode_handle(new_zhdr, FIRST); - if (new_zhdr->last_chunks) - encode_handle(new_zhdr, LAST); - if (new_zhdr->middle_chunks) - encode_handle(new_zhdr, MIDDLE); - set_bit(NEEDS_COMPACTING, &newpage->private); - new_zhdr->cpu = smp_processor_id(); - __SetPageMovable(newpage, &z3fold_mops); - z3fold_page_unlock(new_zhdr); - - queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); - - /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */ - page->private = 0; - put_page(page); - return 0; -} - -static void z3fold_page_putback(struct page *page) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - z3fold_page_lock(zhdr); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - INIT_LIST_HEAD(&page->lru); - if (put_z3fold_locked(zhdr)) - return; - if (list_empty(&zhdr->buddy)) - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static const struct movable_operations z3fold_mops = { - .isolate_page = z3fold_page_isolate, - .migrate_page = z3fold_page_migrate, - .putback_page = z3fold_page_putback, -}; - -/***************** - * zpool - ****************/ - -static void *z3fold_zpool_create(const char *name, gfp_t gfp) -{ - return z3fold_create_pool(name, gfp); -} - -static void z3fold_zpool_destroy(void *pool) -{ - z3fold_destroy_pool(pool); -} - -static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return z3fold_alloc(pool, size, gfp, handle); -} -static void z3fold_zpool_free(void *pool, unsigned long handle) -{ - z3fold_free(pool, handle); -} - -static void *z3fold_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return z3fold_map(pool, handle); -} -static void z3fold_zpool_unmap(void *pool, unsigned long handle) -{ - z3fold_unmap(pool, handle); -} - -static u64 z3fold_zpool_total_pages(void *pool) -{ - return z3fold_get_pool_pages(pool); -} - -static struct zpool_driver z3fold_zpool_driver = { - .type = "z3fold", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = z3fold_zpool_create, - .destroy = z3fold_zpool_destroy, - .malloc = z3fold_zpool_malloc, - .free = z3fold_zpool_free, - .map = z3fold_zpool_map, - .unmap = z3fold_zpool_unmap, - .total_pages = z3fold_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-z3fold"); - -static int __init init_z3fold(void) -{ - /* - * Make sure the z3fold header is not larger than the page size and - * there has remaining spaces for its buddy. - */ - BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE); - zpool_register_driver(&z3fold_zpool_driver); - - return 0; -} - -static void __exit exit_z3fold(void) -{ - zpool_unregister_driver(&z3fold_zpool_driver); -} - -module_init(init_z3fold); -module_exit(exit_z3fold); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); -MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); diff --git a/mm/zbud.c b/mm/zbud.c deleted file mode 100644 index e9836fff9438..000000000000 --- a/mm/zbud.c +++ /dev/null @@ -1,455 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * zbud.c - * - * Copyright (C) 2013, Seth Jennings, IBM - * - * Concepts based on zcache internal zbud allocator by Dan Magenheimer. - * - * zbud is an special purpose allocator for storing compressed pages. Contrary - * to what its name may suggest, zbud is not a buddy allocator, but rather an - * allocator that "buddies" two compressed pages together in a single memory - * page. - * - * While this design limits storage density, it has simple and deterministic - * reclaim properties that make it preferable to a higher density approach when - * reclaim will be used. - * - * zbud works by storing compressed pages, or "zpages", together in pairs in a - * single memory page called a "zbud page". The first buddy is "left - * justified" at the beginning of the zbud page, and the last buddy is "right - * justified" at the end of the zbud page. The benefit is that if either - * buddy is freed, the freed buddy space, coalesced with whatever slack space - * that existed between the buddies, results in the largest possible free region - * within the zbud page. - * - * zbud also provides an attractive lower bound on density. The ratio of zpages - * to zbud pages can not be less than 1. This ensures that zbud can never "do - * harm" by using more pages to store zpages than the uncompressed zpages would - * have used on their own. - * - * zbud pages are divided into "chunks". The size of the chunks is fixed at - * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages - * into chunks allows organizing unbuddied zbud pages into a manageable number - * of unbuddied lists according to the number of free chunks available in the - * zbud page. - * - * The zbud API differs from that of conventional allocators in that the - * allocation function, zbud_alloc(), returns an opaque handle to the user, - * not a dereferenceable pointer. The user must map the handle using - * zbud_map() in order to get a usable pointer by which to access the - * allocation data and unmap the handle with zbud_unmap() when operations - * on the allocation data are complete. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/atomic.h> -#include <linux/list.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/preempt.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/zpool.h> - -/***************** - * Structures -*****************/ -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk - * in allocated page is occupied by zbud header, NCHUNKS will be calculated to - * 63 which shows the max number of free chunks in zbud page, also there will be - * 63 freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED CHUNK_SIZE -#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) - -struct zbud_pool; - -/** - * struct zbud_pool - stores metadata for each zbud pool - * @lock: protects all pool fields and first|last_chunk fields of any - * zbud page in the pool - * @unbuddied: array of lists tracking zbud pages that only contain one buddy; - * the lists each zbud page is added to depends on the size of - * its free region. - * @buddied: list tracking the zbud pages that contain two buddies; - * these zbud pages are full - * @pages_nr: number of zbud pages in the pool. - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular zbud pool. - */ -struct zbud_pool { - spinlock_t lock; - union { - /* - * Reuse unbuddied[0] as buddied on the ground that - * unbuddied[0] is unused. - */ - struct list_head buddied; - struct list_head unbuddied[NCHUNKS]; - }; - u64 pages_nr; -}; - -/* - * struct zbud_header - zbud page metadata occupying the first chunk of each - * zbud page. - * @buddy: links the zbud page into the unbuddied/buddied lists in the pool - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - */ -struct zbud_header { - struct list_head buddy; - unsigned int first_chunks; - unsigned int last_chunks; -}; - -/***************** - * Helpers -*****************/ -/* Just to make the code easier to read */ -enum buddy { - FIRST, - LAST -}; - -/* Converts an allocation size in bytes to size in zbud chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -/* Initializes the zbud header of a newly allocated zbud page */ -static struct zbud_header *init_zbud_page(struct page *page) -{ - struct zbud_header *zhdr = page_address(page); - zhdr->first_chunks = 0; - zhdr->last_chunks = 0; - INIT_LIST_HEAD(&zhdr->buddy); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_zbud_page(struct zbud_header *zhdr) -{ - __free_page(virt_to_page(zhdr)); -} - -/* - * Encodes the handle of a particular buddy within a zbud page - * Pool lock should be held as this function accesses first|last_chunks - */ -static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) -{ - unsigned long handle; - - /* - * For now, the encoded handle is actually just the pointer to the data - * but this might not always be the case. A little information hiding. - * Add CHUNK_SIZE to the handle if it is the first allocation to jump - * over the zbud header in the first chunk. - */ - handle = (unsigned long)zhdr; - if (bud == FIRST) - /* skip over zbud header */ - handle += ZHDR_SIZE_ALIGNED; - else /* bud == LAST */ - handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - return handle; -} - -/* Returns the zbud page where a given handle is stored */ -static struct zbud_header *handle_to_zbud_header(unsigned long handle) -{ - return (struct zbud_header *)(handle & PAGE_MASK); -} - -/* Returns the number of free chunks in a zbud page */ -static int num_free_chunks(struct zbud_header *zhdr) -{ - /* - * Rather than branch for different situations, just use the fact that - * free buddies have a length of zero to simplify everything. - */ - return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; -} - -/***************** - * API Functions -*****************/ -/** - * zbud_create_pool() - create a new zbud pool - * @gfp: gfp flags when allocating the zbud pool structure - * - * Return: pointer to the new zbud pool or NULL if the metadata allocation - * failed. - */ -static struct zbud_pool *zbud_create_pool(gfp_t gfp) -{ - struct zbud_pool *pool; - int i; - - pool = kzalloc(sizeof(struct zbud_pool), gfp); - if (!pool) - return NULL; - spin_lock_init(&pool->lock); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&pool->unbuddied[i]); - INIT_LIST_HEAD(&pool->buddied); - pool->pages_nr = 0; - return pool; -} - -/** - * zbud_destroy_pool() - destroys an existing zbud pool - * @pool: the zbud pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void zbud_destroy_pool(struct zbud_pool *pool) -{ - kfree(pool); -} - -/** - * zbud_alloc() - allocates a region of a given size - * @pool: zbud pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used - * as zbud pool pages. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks, i, freechunks; - struct zbud_header *zhdr = NULL; - enum buddy bud; - struct page *page; - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - return -ENOSPC; - chunks = size_to_chunks(size); - spin_lock(&pool->lock); - - /* First, try to find an unbuddied zbud page. */ - for_each_unbuddied_list(i, chunks) { - if (!list_empty(&pool->unbuddied[i])) { - zhdr = list_first_entry(&pool->unbuddied[i], - struct zbud_header, buddy); - list_del(&zhdr->buddy); - if (zhdr->first_chunks == 0) - bud = FIRST; - else - bud = LAST; - goto found; - } - } - - /* Couldn't find unbuddied zbud page, create new one */ - spin_unlock(&pool->lock); - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - spin_lock(&pool->lock); - pool->pages_nr++; - zhdr = init_zbud_page(page); - bud = FIRST; - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else - zhdr->last_chunks = chunks; - - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } else { - /* Add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } - - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - - return 0; -} - -/** - * zbud_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by zbud_alloc() - */ -static void zbud_free(struct zbud_pool *pool, unsigned long handle) -{ - struct zbud_header *zhdr; - int freechunks; - - spin_lock(&pool->lock); - zhdr = handle_to_zbud_header(handle); - - /* If first buddy, handle will be page aligned */ - if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) - zhdr->last_chunks = 0; - else - zhdr->first_chunks = 0; - - /* Remove from existing buddy list */ - list_del(&zhdr->buddy); - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* zbud page is empty, free */ - free_zbud_page(zhdr); - pool->pages_nr--; - } else { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } - - spin_unlock(&pool->lock); -} - -/** - * zbud_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * While trivial for zbud, the mapping functions for others allocators - * implementing this allocation API could have more complex information encoded - * in the handle and could create temporary mappings to make the data - * accessible to the user. - * - * Returns: a pointer to the mapped allocation - */ -static void *zbud_map(struct zbud_pool *pool, unsigned long handle) -{ - return (void *)(handle); -} - -/** - * zbud_unmap() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void zbud_unmap(struct zbud_pool *pool, unsigned long handle) -{ -} - -/** - * zbud_get_pool_pages() - gets the zbud pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. The pool lock need not be - * taken to access pages_nr. - */ -static u64 zbud_get_pool_pages(struct zbud_pool *pool) -{ - return pool->pages_nr; -} - -/***************** - * zpool - ****************/ - -static void *zbud_zpool_create(const char *name, gfp_t gfp) -{ - return zbud_create_pool(gfp); -} - -static void zbud_zpool_destroy(void *pool) -{ - zbud_destroy_pool(pool); -} - -static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return zbud_alloc(pool, size, gfp, handle); -} -static void zbud_zpool_free(void *pool, unsigned long handle) -{ - zbud_free(pool, handle); -} - -static void *zbud_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return zbud_map(pool, handle); -} -static void zbud_zpool_unmap(void *pool, unsigned long handle) -{ - zbud_unmap(pool, handle); -} - -static u64 zbud_zpool_total_pages(void *pool) -{ - return zbud_get_pool_pages(pool); -} - -static struct zpool_driver zbud_zpool_driver = { - .type = "zbud", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = zbud_zpool_create, - .destroy = zbud_zpool_destroy, - .malloc = zbud_zpool_malloc, - .free = zbud_zpool_free, - .map = zbud_zpool_map, - .unmap = zbud_zpool_unmap, - .total_pages = zbud_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-zbud"); - -static int __init init_zbud(void) -{ - /* Make sure the zbud header will fit in one chunk */ - BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); - pr_info("loaded\n"); - - zpool_register_driver(&zbud_zpool_driver); - - return 0; -} - -static void __exit exit_zbud(void) -{ - zpool_unregister_driver(&zbud_zpool_driver); - pr_info("unloaded\n"); -} - -module_init(init_zbud); -module_exit(exit_zbud); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); -MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages"); diff --git a/mm/zpool.c b/mm/zpool.c index b9fda1fa857d..6d6d88930932 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -95,7 +95,7 @@ static void zpool_put_driver(struct zpool_driver *driver) /** * zpool_has_pool() - Check if the pool driver is available - * @type: The type of the zpool to check (e.g. zbud, zsmalloc) + * @type: The type of the zpool to check (e.g. zsmalloc) * * This checks if the @type pool driver is available. This will try to load * the requested module, if needed, but there is no guarantee the module will @@ -130,7 +130,7 @@ EXPORT_SYMBOL(zpool_has_pool); /** * zpool_create_pool() - Create a new zpool - * @type: The type of the zpool to create (e.g. zbud, zsmalloc) + * @type: The type of the zpool to create (e.g. zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. * @@ -221,22 +221,6 @@ const char *zpool_get_type(struct zpool *zpool) } /** - * zpool_malloc_support_movable() - Check if the zpool supports - * allocating movable memory - * @zpool: The zpool to check - * - * This returns if the zpool supports allocating movable memory. - * - * Implementations must guarantee this to be thread-safe. - * - * Returns: true if the zpool supports allocating movable memory, false if not - */ -bool zpool_malloc_support_movable(struct zpool *zpool) -{ - return zpool->driver->malloc_support_movable; -} - -/** * zpool_malloc() - Allocate memory * @zpool: The zpool to allocate from. * @size: The amount of memory to allocate. @@ -278,46 +262,51 @@ void zpool_free(struct zpool *zpool, unsigned long handle) } /** - * zpool_map_handle() - Map a previously allocated handle into memory + * zpool_obj_read_begin() - Start reading from a previously allocated handle. * @zpool: The zpool that the handle was allocated from - * @handle: The handle to map - * @mapmode: How the memory should be mapped + * @handle: The handle to read from + * @local_copy: A local buffer to use if needed. * - * This maps a previously allocated handle into memory. The @mapmode - * param indicates to the implementation how the memory will be - * used, i.e. read-only, write-only, read-write. If the - * implementation does not support it, the memory will be treated - * as read-write. + * This starts a read operation of a previously allocated handle. The passed + * @local_copy buffer may be used if needed by copying the memory into. + * zpool_obj_read_end() MUST be called after the read is completed to undo any + * actions taken (e.g. release locks). * - * This may hold locks, disable interrupts, and/or preemption, - * and the zpool_unmap_handle() must be called to undo those - * actions. The code that uses the mapped handle should complete - * its operations on the mapped handle memory quickly and unmap - * as soon as possible. As the implementation may use per-cpu - * data, multiple handles should not be mapped concurrently on - * any cpu. + * Returns: A pointer to the handle memory to be read, if @local_copy is used, + * the returned pointer is @local_copy. + */ +void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle, + void *local_copy) +{ + return zpool->driver->obj_read_begin(zpool->pool, handle, local_copy); +} + +/** + * zpool_obj_read_end() - Finish reading from a previously allocated handle. + * @zpool: The zpool that the handle was allocated from + * @handle: The handle to read from + * @handle_mem: The pointer returned by zpool_obj_read_begin() * - * Returns: A pointer to the handle's mapped memory area. + * Finishes a read operation previously started by zpool_obj_read_begin(). */ -void *zpool_map_handle(struct zpool *zpool, unsigned long handle, - enum zpool_mapmode mapmode) +void zpool_obj_read_end(struct zpool *zpool, unsigned long handle, + void *handle_mem) { - return zpool->driver->map(zpool->pool, handle, mapmode); + zpool->driver->obj_read_end(zpool->pool, handle, handle_mem); } /** - * zpool_unmap_handle() - Unmap a previously mapped handle + * zpool_obj_write() - Write to a previously allocated handle. * @zpool: The zpool that the handle was allocated from - * @handle: The handle to unmap + * @handle: The handle to read from + * @handle_mem: The memory to copy from into the handle. + * @mem_len: The length of memory to be written. * - * This unmaps a previously mapped handle. Any locks or other - * actions that the implementation took in zpool_map_handle() - * will be undone here. The memory area returned from - * zpool_map_handle() should no longer be used after this. */ -void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) +void zpool_obj_write(struct zpool *zpool, unsigned long handle, + void *handle_mem, size_t mem_len) { - zpool->driver->unmap(zpool->pool, handle); + zpool->driver->obj_write(zpool->pool, handle, handle_mem, mem_len); } /** @@ -333,23 +322,5 @@ u64 zpool_get_total_pages(struct zpool *zpool) return zpool->driver->total_pages(zpool->pool); } -/** - * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. - * @zpool: The zpool to test - * - * Some allocators enter non-preemptible context in ->map() callback (e.g. - * disable pagefaults) and exit that context in ->unmap(), which limits what - * we can do with the mapped object. For instance, we cannot wait for - * asynchronous crypto API to decompress such an object or take mutexes - * since those will call into the scheduler. This function tells us whether - * we use such an allocator. - * - * Returns: true if zpool can sleep; false otherwise. - */ -bool zpool_can_sleep_mapped(struct zpool *zpool) -{ - return zpool->driver->sleep_mapped; -} - MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); MODULE_DESCRIPTION("Common API for compressed memory storage"); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6d0e47f7ae33..961b270f023c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -18,7 +18,7 @@ /* * lock ordering: * page_lock - * pool->migrate_lock + * pool->lock * class->lock * zspage->lock */ @@ -223,8 +223,8 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct work_struct free_work; #endif - /* protect page/zspage migration */ - rwlock_t migrate_lock; + /* protect zspage migration/compaction */ + rwlock_t lock; atomic_t compaction_in_progress; }; @@ -257,6 +257,15 @@ static inline void free_zpdesc(struct zpdesc *zpdesc) __free_page(page); } +#define ZS_PAGE_UNLOCKED 0 +#define ZS_PAGE_WRLOCKED -1 + +struct zspage_lock { + spinlock_t lock; + int cnt; + struct lockdep_map dep_map; +}; + struct zspage { struct { unsigned int huge:HUGE_BITS; @@ -269,15 +278,86 @@ struct zspage { struct zpdesc *first_zpdesc; struct list_head list; /* fullness list */ struct zs_pool *pool; - rwlock_t lock; + struct zspage_lock zsl; }; -struct mapping_area { - local_lock_t lock; - char *vm_buf; /* copy buffer for objects that span pages */ - char *vm_addr; /* address of kmap_local_page()'ed pages */ - enum zs_mapmode vm_mm; /* mapping mode */ -}; +static void zspage_lock_init(struct zspage *zspage) +{ + static struct lock_class_key __key; + struct zspage_lock *zsl = &zspage->zsl; + + lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); + spin_lock_init(&zsl->lock); + zsl->cnt = ZS_PAGE_UNLOCKED; +} + +/* + * The zspage lock can be held from atomic contexts, but it needs to remain + * preemptible when held for reading because it remains held outside of those + * atomic contexts, otherwise we unnecessarily lose preemptibility. + * + * To achieve this, the following rules are enforced on readers and writers: + * + * - Writers are blocked by both writers and readers, while readers are only + * blocked by writers (i.e. normal rwlock semantics). + * + * - Writers are always atomic (to allow readers to spin waiting for them). + * + * - Writers always use trylock (as the lock may be held be sleeping readers). + * + * - Readers may spin on the lock (as they can only wait for atomic writers). + * + * - Readers may sleep while holding the lock (as writes only use trylock). + */ +static void zspage_read_lock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt++; + spin_unlock(&zsl->lock); + + lock_acquired(&zsl->dep_map, _RET_IP_); +} + +static void zspage_read_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt--; + spin_unlock(&zsl->lock); +} + +static __must_check bool zspage_write_trylock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + spin_lock(&zsl->lock); + if (zsl->cnt == ZS_PAGE_UNLOCKED) { + zsl->cnt = ZS_PAGE_WRLOCKED; + rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); + lock_acquired(&zsl->dep_map, _RET_IP_); + return true; + } + + spin_unlock(&zsl->lock); + return false; +} + +static void zspage_write_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + zsl->cnt = ZS_PAGE_UNLOCKED; + spin_unlock(&zsl->lock); +} /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetZsHugePage(struct zspage *zspage) @@ -290,12 +370,6 @@ static bool ZsHugePage(struct zspage *zspage) return zspage->huge; } -static void migrate_lock_init(struct zspage *zspage); -static void migrate_read_lock(struct zspage *zspage); -static void migrate_read_unlock(struct zspage *zspage); -static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_unlock(struct zspage *zspage); - #ifdef CONFIG_COMPACTION static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); @@ -401,29 +475,22 @@ static void zs_zpool_free(void *pool, unsigned long handle) zs_free(pool, handle); } -static void *zs_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) +static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle, + void *local_copy) { - enum zs_mapmode zs_mm; - - switch (mm) { - case ZPOOL_MM_RO: - zs_mm = ZS_MM_RO; - break; - case ZPOOL_MM_WO: - zs_mm = ZS_MM_WO; - break; - case ZPOOL_MM_RW: - default: - zs_mm = ZS_MM_RW; - break; - } + return zs_obj_read_begin(pool, handle, local_copy); +} - return zs_map_object(pool, handle, zs_mm); +static void zs_zpool_obj_read_end(void *pool, unsigned long handle, + void *handle_mem) +{ + zs_obj_read_end(pool, handle, handle_mem); } -static void zs_zpool_unmap(void *pool, unsigned long handle) + +static void zs_zpool_obj_write(void *pool, unsigned long handle, + void *handle_mem, size_t mem_len) { - zs_unmap_object(pool, handle); + zs_obj_write(pool, handle, handle_mem, mem_len); } static u64 zs_zpool_total_pages(void *pool) @@ -436,22 +503,17 @@ static struct zpool_driver zs_zpool_driver = { .owner = THIS_MODULE, .create = zs_zpool_create, .destroy = zs_zpool_destroy, - .malloc_support_movable = true, .malloc = zs_zpool_malloc, .free = zs_zpool_free, - .map = zs_zpool_map, - .unmap = zs_zpool_unmap, + .obj_read_begin = zs_zpool_obj_read_begin, + .obj_read_end = zs_zpool_obj_read_end, + .obj_write = zs_zpool_obj_write, .total_pages = zs_zpool_total_pages, }; MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ -/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ -static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { - .lock = INIT_LOCAL_LOCK(lock), -}; - static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) { return PagePrivate(zpdesc_page(zpdesc)); @@ -992,7 +1054,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, return NULL; zspage->magic = ZSPAGE_MAGIC; - migrate_lock_init(zspage); + zspage->pool = pool; + zspage->class = class->index; + zspage_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { struct zpdesc *zpdesc; @@ -1015,8 +1079,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, create_page_chain(class, zspage, zpdescs); init_zspage(class, zspage); - zspage->pool = pool; - zspage->class = class->index; return zspage; } @@ -1036,93 +1098,6 @@ static struct zspage *find_get_zspage(struct size_class *class) return zspage; } -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm_buf) - return 0; - area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); - if (!area->vm_buf) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - kfree(area->vm_buf); - area->vm_buf = NULL; -} - -static void *__zs_map_object(struct mapping_area *area, - struct zpdesc *zpdescs[2], int off, int size) -{ - size_t sizes[2]; - char *buf = area->vm_buf; - - /* disable page faults to match kmap_local_page() return conditions */ - pagefault_disable(); - - /* no read fastpath */ - if (area->vm_mm == ZS_MM_WO) - goto out; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy object to per-cpu buffer */ - memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]); - memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]); -out: - return area->vm_buf; -} - -static void __zs_unmap_object(struct mapping_area *area, - struct zpdesc *zpdescs[2], int off, int size) -{ - size_t sizes[2]; - char *buf; - - /* no write fastpath */ - if (area->vm_mm == ZS_MM_RO) - goto out; - - buf = area->vm_buf; - buf = buf + ZS_HANDLE_SIZE; - size -= ZS_HANDLE_SIZE; - off += ZS_HANDLE_SIZE; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy per-cpu buffer to object */ - memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]); - memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]); - -out: - /* enable page faults to match kunmap_local() return conditions */ - pagefault_enable(); -} - -static int zs_cpu_prepare(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - return __zs_cpu_up(area); -} - -static int zs_cpu_dead(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - __zs_cpu_down(area); - return 0; -} - static bool can_merge(struct size_class *prev, int pages_per_zspage, int objs_per_zspage) { @@ -1170,92 +1145,64 @@ unsigned long zs_get_total_pages(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_get_total_pages); -/** - * zs_map_object - get address of allocated object from handle. - * @pool: pool from which the object was allocated - * @handle: handle returned from zs_malloc - * @mm: mapping mode to use - * - * Before using an object allocated from zs_malloc, it must be mapped using - * this function. When done with the object, it must be unmapped using - * zs_unmap_object. - * - * Only one object can be mapped per cpu at a time. There is no protection - * against nested mappings. - * - * This function returns with preemption and page faults disabled. - */ -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm) +void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, + void *local_copy) { struct zspage *zspage; struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; - struct size_class *class; - struct mapping_area *area; - struct zpdesc *zpdescs[2]; - void *ret; + void *addr; - /* - * Because we use per-cpu mapping areas shared among the - * pools/users, we can't allow mapping in interrupt context - * because it can corrupt another users mappings. - */ - BUG_ON(in_interrupt()); - - /* It guarantees it can get zspage from handle safely */ - read_lock(&pool->migrate_lock); + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); zspage = get_zspage(zpdesc); - /* - * migration cannot move any zpages in this zspage. Here, class->lock - * is too heavy since callers would take some time until they calls - * zs_unmap_object API so delegate the locking from class to zspage - * which is smaller granularity. - */ - migrate_read_lock(zspage); - read_unlock(&pool->migrate_lock); + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); - local_lock(&zs_map_area.lock); - area = this_cpu_ptr(&zs_map_area); - area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ - area->vm_addr = kmap_local_zpdesc(zpdesc); - ret = area->vm_addr + off; - goto out; + addr = kmap_local_zpdesc(zpdesc); + addr += off; + } else { + size_t sizes[2]; + + /* this object spans two pages */ + sizes[0] = PAGE_SIZE - off; + sizes[1] = class->size - sizes[0]; + addr = local_copy; + + memcpy_from_page(addr, zpdesc_page(zpdesc), + off, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_from_page(addr + sizes[0], + zpdesc_page(zpdesc), + 0, sizes[1]); } - /* this object spans two pages */ - zpdescs[0] = zpdesc; - zpdescs[1] = get_next_zpdesc(zpdesc); - BUG_ON(!zpdescs[1]); - - ret = __zs_map_object(area, zpdescs, off, class->size); -out: - if (likely(!ZsHugePage(zspage))) - ret += ZS_HANDLE_SIZE; + if (!ZsHugePage(zspage)) + addr += ZS_HANDLE_SIZE; - return ret; + return addr; } -EXPORT_SYMBOL_GPL(zs_map_object); +EXPORT_SYMBOL_GPL(zs_obj_read_begin); -void zs_unmap_object(struct zs_pool *pool, unsigned long handle) +void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, + void *handle_mem) { struct zspage *zspage; struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; - struct size_class *class; - struct mapping_area *area; obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); @@ -1263,23 +1210,65 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); - area = this_cpu_ptr(&zs_map_area); - if (off + class->size <= PAGE_SIZE) - kunmap_local(area->vm_addr); - else { - struct zpdesc *zpdescs[2]; + if (off + class->size <= PAGE_SIZE) { + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + handle_mem -= off; + kunmap_local(handle_mem); + } + + zspage_read_unlock(zspage); +} +EXPORT_SYMBOL_GPL(zs_obj_read_end); + +void zs_obj_write(struct zs_pool *pool, unsigned long handle, + void *handle_mem, size_t mem_len) +{ + struct zspage *zspage; + struct zpdesc *zpdesc; + unsigned long obj, off; + unsigned int obj_idx; + struct size_class *class; - zpdescs[0] = zpdesc; - zpdescs[1] = get_next_zpdesc(zpdesc); - BUG_ON(!zpdescs[1]); + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); + obj = handle_to_obj(handle); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); - __zs_unmap_object(area, zpdescs, off, class->size); + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); + + class = zspage_class(pool, zspage); + off = offset_in_page(class->size * obj_idx); + + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ + void *dst = kmap_local_zpdesc(zpdesc); + + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + memcpy(dst + off, handle_mem, mem_len); + kunmap_local(dst); + } else { + /* this object spans two pages */ + size_t sizes[2]; + + off += ZS_HANDLE_SIZE; + sizes[0] = PAGE_SIZE - off; + sizes[1] = mem_len - sizes[0]; + + memcpy_to_page(zpdesc_page(zpdesc), off, + handle_mem, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_to_page(zpdesc_page(zpdesc), 0, + handle_mem + sizes[0], sizes[1]); } - local_unlock(&zs_map_area.lock); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); } -EXPORT_SYMBOL_GPL(zs_unmap_object); +EXPORT_SYMBOL_GPL(zs_obj_write); /** * zs_huge_class_size() - Returns the size (in bytes) of the first huge @@ -1450,16 +1439,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle) return; /* - * The pool->migrate_lock protects the race with zpage's migration + * The pool->lock protects the race with zpage's migration * so it's safe to get the page from handle. */ - read_lock(&pool->migrate_lock); + read_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_zpdesc(obj, &f_zpdesc); zspage = get_zspage(f_zpdesc); class = zspage_class(pool, zspage); spin_lock(&class->lock); - read_unlock(&pool->migrate_lock); + read_unlock(&pool->lock); class_stat_sub(class, ZS_OBJS_INUSE, 1); obj_free(class->size, obj); @@ -1671,18 +1660,18 @@ static void lock_zspage(struct zspage *zspage) /* * Pages we haven't locked yet can be migrated off the list while we're * trying to lock them, so we need to be careful and only attempt to - * lock each page under migrate_read_lock(). Otherwise, the page we lock + * lock each page under zspage_read_lock(). Otherwise, the page we lock * may no longer belong to the zspage. This means that we may wait for * the wrong page to unlock, so we must take a reference to the page - * prior to waiting for it to unlock outside migrate_read_lock(). + * prior to waiting for it to unlock outside zspage_read_lock(). */ while (1) { - migrate_read_lock(zspage); + zspage_read_lock(zspage); zpdesc = get_first_zpdesc(zspage); if (zpdesc_trylock(zpdesc)) break; zpdesc_get(zpdesc); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); zpdesc_wait_locked(zpdesc); zpdesc_put(zpdesc); } @@ -1693,41 +1682,16 @@ static void lock_zspage(struct zspage *zspage) curr_zpdesc = zpdesc; } else { zpdesc_get(zpdesc); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); zpdesc_wait_locked(zpdesc); zpdesc_put(zpdesc); - migrate_read_lock(zspage); + zspage_read_lock(zspage); } } - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); } #endif /* CONFIG_COMPACTION */ -static void migrate_lock_init(struct zspage *zspage) -{ - rwlock_init(&zspage->lock); -} - -static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) -{ - read_lock(&zspage->lock); -} - -static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) -{ - read_unlock(&zspage->lock); -} - -static void migrate_write_lock(struct zspage *zspage) -{ - write_lock(&zspage->lock); -} - -static void migrate_write_unlock(struct zspage *zspage) -{ - write_unlock(&zspage->lock); -} - #ifdef CONFIG_COMPACTION static const struct movable_operations zsmalloc_mops; @@ -1785,9 +1749,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); - /* We're committed, tell the world that this is a Zsmalloc page. */ - __zpdesc_set_zsmalloc(newzpdesc); - /* The page is locked, so this pointer must remain valid */ zspage = get_zspage(zpdesc); pool = zspage->pool; @@ -1796,15 +1757,22 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * The pool migrate_lock protects the race between zpage migration * and zs_free. */ - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); class = zspage_class(pool, zspage); /* * the class lock protects zpage alloc/free in the zspage. */ spin_lock(&class->lock); - /* the migrate_write_lock protects zpage access via zs_map_object */ - migrate_write_lock(zspage); + /* the zspage write_lock protects zpage access via zs_obj_read/write() */ + if (!zspage_write_trylock(zspage)) { + spin_unlock(&class->lock); + write_unlock(&pool->lock); + return -EINVAL; + } + + /* We're committed, tell the world that this is a Zsmalloc page. */ + __zpdesc_set_zsmalloc(newzpdesc); offset = get_first_obj_offset(zpdesc); s_addr = kmap_local_zpdesc(zpdesc); @@ -1833,9 +1801,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock. */ - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); spin_unlock(&class->lock); - migrate_write_unlock(zspage); + zspage_write_unlock(zspage); zpdesc_get(newzpdesc); if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { @@ -1956,7 +1924,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, * protect the race between zpage migration and zs_free * as well as zpage allocation/free */ - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); spin_lock(&class->lock); while (zs_can_compact(class)) { int fg; @@ -1971,9 +1939,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, if (!src_zspage) break; - migrate_write_lock(src_zspage); + if (!zspage_write_trylock(src_zspage)) + break; + migrate_zspage(pool, src_zspage, dst_zspage); - migrate_write_unlock(src_zspage); + zspage_write_unlock(src_zspage); fg = putback_zspage(class, src_zspage); if (fg == ZS_INUSE_RATIO_0) { @@ -1983,14 +1953,14 @@ static unsigned long __zs_compact(struct zs_pool *pool, src_zspage = NULL; if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 - || rwlock_is_contended(&pool->migrate_lock)) { + || rwlock_is_contended(&pool->lock)) { putback_zspage(class, dst_zspage); dst_zspage = NULL; spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); cond_resched(); - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); spin_lock(&class->lock); } } @@ -2002,7 +1972,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(class, dst_zspage); spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); return pages_freed; } @@ -2014,10 +1984,10 @@ unsigned long zs_compact(struct zs_pool *pool) unsigned long pages_freed = 0; /* - * Pool compaction is performed under pool->migrate_lock so it is basically + * Pool compaction is performed under pool->lock so it is basically * single-threaded. Having more than one thread in __zs_compact() - * will increase pool->migrate_lock contention, which will impact other - * zsmalloc operations that need pool->migrate_lock. + * will increase pool->lock contention, which will impact other + * zsmalloc operations that need pool->lock. */ if (atomic_xchg(&pool->compaction_in_progress, 1)) return 0; @@ -2139,7 +2109,7 @@ struct zs_pool *zs_create_pool(const char *name) return NULL; init_deferred_free(pool); - rwlock_init(&pool->migrate_lock); + rwlock_init(&pool->lock); atomic_set(&pool->compaction_in_progress, 0); pool->name = kstrdup(name, GFP_KERNEL); @@ -2278,23 +2248,11 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool); static int __init zs_init(void) { - int ret; - - ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", - zs_cpu_prepare, zs_cpu_dead); - if (ret) - goto out; - #ifdef CONFIG_ZPOOL zpool_register_driver(&zs_zpool_driver); #endif - zs_stat_init(); - return 0; - -out: - return ret; } static void __exit zs_exit(void) @@ -2302,8 +2260,6 @@ static void __exit zs_exit(void) #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif - cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); - zs_stat_exit(); } diff --git a/mm/zswap.c b/mm/zswap.c index 23365e76a3ce..0dcc54eab58b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail; static u64 zswap_reject_compress_fail; /* Compressed page was too big for the allocator to (optimally) store */ static u64 zswap_reject_compress_poor; +/* Load or writeback failed due to decompression failure */ +static u64 zswap_decompress_fail; /* Store failed because underlying allocator could not get memory */ static u64 zswap_reject_alloc_fail; /* Store failed because the entry metadata could not be allocated (rare) */ @@ -930,7 +932,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, unsigned int dlen = PAGE_SIZE; unsigned long handle; struct zpool *zpool; - char *buf; gfp_t gfp; u8 *dst; @@ -965,17 +966,12 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, goto unlock; zpool = pool->zpool; - gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; - if (zpool_malloc_support_movable(zpool)) - gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; + gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE; alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); if (alloc_ret) goto unlock; - buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); - memcpy(buf, dst, dlen); - zpool_unmap_handle(zpool, handle); - + zpool_obj_write(zpool, handle, dst, dlen); entry->handle = handle; entry->length = dlen; @@ -991,41 +987,49 @@ unlock: return comp_ret == 0 && alloc_ret == 0; } -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio) { struct zpool *zpool = entry->pool->zpool; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src; + int decomp_ret, dlen; + u8 *src, *obj; acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool); - src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); + obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer); + /* - * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer - * to do crypto_acomp_decompress() which might sleep. In such cases, we must - * resort to copying the buffer to a temporary one. - * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, - * such as a kmap address of high memory or even ever a vmap address. - * However, sg_init_one is only equipped to handle linearly mapped low memory. - * In such cases, we also must copy the buffer to a temporary and lowmem one. + * zpool_obj_read_begin() might return a kmap address of highmem when + * acomp_ctx->buffer is not used. However, sg_init_one() does not + * handle highmem addresses, so copy the object to acomp_ctx->buffer. */ - if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || - !virt_addr_valid(src)) { - memcpy(acomp_ctx->buffer, src, entry->length); + if (virt_addr_valid(obj)) { + src = obj; + } else { + WARN_ON_ONCE(obj == acomp_ctx->buffer); + memcpy(acomp_ctx->buffer, obj, entry->length); src = acomp_ctx->buffer; - zpool_unmap_handle(zpool, entry->handle); } sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); sg_set_folio(&output, folio, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); - BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); - BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); + decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); + dlen = acomp_ctx->req->dlen; - if (src != acomp_ctx->buffer) - zpool_unmap_handle(zpool, entry->handle); + zpool_obj_read_end(zpool, entry->handle, obj); acomp_ctx_put_unlock(acomp_ctx); + + if (!decomp_ret && dlen == PAGE_SIZE) + return true; + + zswap_decompress_fail++; + pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n", + swp_type(entry->swpentry), + swp_offset(entry->swpentry), + entry->pool->tfm_name, entry->length, dlen); + return false; } /********************************* @@ -1051,14 +1055,21 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct folio *folio; struct mempolicy *mpol; bool folio_was_allocated; + struct swap_info_struct *si; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; + int ret = 0; /* try to allocate swap cache folio */ + si = get_swap_device(swpentry); + if (!si) + return -EEXIST; + mpol = get_task_policy(current); folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, - NO_INTERLEAVE_INDEX, &folio_was_allocated, true); + NO_INTERLEAVE_INDEX, &folio_was_allocated, true); + put_swap_device(si); if (!folio) return -ENOMEM; @@ -1070,8 +1081,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry, * and freed when invalidated by the concurrent shrinker anyway. */ if (!folio_was_allocated) { - folio_put(folio); - return -EEXIST; + ret = -EEXIST; + goto out; } /* @@ -1084,14 +1095,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry, * be dereferenced. */ tree = swap_zswap_tree(swpentry); - if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { - delete_from_swap_cache(folio); - folio_unlock(folio); - folio_put(folio); - return -ENOMEM; + if (entry != xa_load(tree, offset)) { + ret = -ENOMEM; + goto out; } - zswap_decompress(entry, folio); + if (!zswap_decompress(entry, folio)) { + ret = -EIO; + goto out; + } + + xa_erase(tree, offset); count_vm_event(ZSWPWB); if (entry->objcg) @@ -1107,9 +1121,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry, /* start writeback */ __swap_writepage(folio, &wbc); - folio_put(folio); - return 0; +out: + if (ret && ret != -EEXIST) { + delete_from_swap_cache(folio); + folio_unlock(folio); + } + folio_put(folio); + return ret; } /********************************* @@ -1609,7 +1628,27 @@ check_old: return ret; } -bool zswap_load(struct folio *folio) +/** + * zswap_load() - load a folio from zswap + * @folio: folio to load + * + * Return: 0 on success, with the folio unlocked and marked up-to-date, or one + * of the following error codes: + * + * -EIO: if the swapped out content was in zswap, but could not be loaded + * into the page due to a decompression failure. The folio is unlocked, but + * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page() + * will SIGBUS). + * + * -EINVAL: if the swapped out content was in zswap, but the page belongs + * to a large folio, which is not supported by zswap. The folio is unlocked, + * but NOT marked up-to-date, so that an IO error is emitted (e.g. + * do_swap_page() will SIGBUS). + * + * -ENOENT: if the swapped out content was not in zswap. The folio remains + * locked on return. + */ +int zswap_load(struct folio *folio) { swp_entry_t swp = folio->swap; pgoff_t offset = swp_offset(swp); @@ -1620,18 +1659,32 @@ bool zswap_load(struct folio *folio) VM_WARN_ON_ONCE(!folio_test_locked(folio)); if (zswap_never_enabled()) - return false; + return -ENOENT; /* * Large folios should not be swapped in while zswap is being used, as * they are not properly handled. Zswap does not properly load large * folios, and a large folio may only be partially in zswap. - * - * Return true without marking the folio uptodate so that an IO error is - * emitted (e.g. do_swap_page() will sigbus). */ - if (WARN_ON_ONCE(folio_test_large(folio))) - return true; + if (WARN_ON_ONCE(folio_test_large(folio))) { + folio_unlock(folio); + return -EINVAL; + } + + entry = xa_load(tree, offset); + if (!entry) + return -ENOENT; + + if (!zswap_decompress(entry, folio)) { + folio_unlock(folio); + return -EIO; + } + + folio_mark_uptodate(folio); + + count_vm_event(ZSWPIN); + if (entry->objcg) + count_objcg_events(entry->objcg, ZSWPIN, 1); /* * When reading into the swapcache, invalidate our entry. The @@ -1645,27 +1698,14 @@ bool zswap_load(struct folio *folio) * files, which reads into a private page and may free it if * the fault fails. We remain the primary owner of the entry.) */ - if (swapcache) - entry = xa_erase(tree, offset); - else - entry = xa_load(tree, offset); - - if (!entry) - return false; - - zswap_decompress(entry, folio); - - count_vm_event(ZSWPIN); - if (entry->objcg) - count_objcg_events(entry->objcg, ZSWPIN, 1); - if (swapcache) { - zswap_entry_free(entry); folio_mark_dirty(folio); + xa_erase(tree, offset); + zswap_entry_free(entry); } - folio_mark_uptodate(folio); - return true; + folio_unlock(folio); + return 0; } void zswap_invalidate(swp_entry_t swp) @@ -1760,6 +1800,8 @@ static int zswap_debugfs_init(void) zswap_debugfs_root, &zswap_reject_compress_fail); debugfs_create_u64("reject_compress_poor", 0444, zswap_debugfs_root, &zswap_reject_compress_poor); + debugfs_create_u64("decompress_fail", 0444, + zswap_debugfs_root, &zswap_decompress_fail); debugfs_create_u64("written_back_pages", 0444, zswap_debugfs_root, &zswap_written_back_pages); debugfs_create_file("pool_total_size", 0444, diff --git a/net/mpls/internal.h b/net/mpls/internal.h index b9f492ddf93b..83c629529b57 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -33,7 +33,7 @@ struct mpls_dev { #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ @@ -45,7 +45,7 @@ struct mpls_dev { #define MPLS_INC_STATS(mdev, field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig index 63f6dcd71daa..564c49ed69a2 100644 --- a/samples/damon/Kconfig +++ b/samples/damon/Kconfig @@ -3,7 +3,7 @@ menu "DAMON Samples" config SAMPLE_DAMON_WSSE - bool "DAMON sameple module for working set size estimation" + bool "DAMON sample module for working set size estimation" depends on DAMON && DAMON_VADDR help This builds DAMON sample module for working set size estimation. @@ -15,7 +15,7 @@ config SAMPLE_DAMON_WSSE If unsure, say N. config SAMPLE_DAMON_PRCL - bool "DAMON sameple module for access-aware proactive reclamation" + bool "DAMON sample module for access-aware proactive reclamation" depends on DAMON && DAMON_VADDR help This builds DAMON sample module for access-aware proactive diff --git a/samples/kmemleak/kmemleak-test.c b/samples/kmemleak/kmemleak-test.c index 544c36d51d56..8609812a37eb 100644 --- a/samples/kmemleak/kmemleak-test.c +++ b/samples/kmemleak/kmemleak-test.c @@ -40,25 +40,25 @@ static int kmemleak_test_init(void) pr_info("Kmemleak testing\n"); /* make some orphan objects */ - pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); - pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); - pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); - pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); - pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); - pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); - pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); - pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); + pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL)); + pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL)); #ifndef CONFIG_MODULES - pr_info("kmem_cache_alloc(files_cachep) = %p\n", + pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); - pr_info("kmem_cache_alloc(files_cachep) = %p\n", + pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); #endif - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); /* * Add elements to a list. They should only appear as orphan @@ -66,7 +66,7 @@ static int kmemleak_test_init(void) */ for (i = 0; i < 10; i++) { elem = kzalloc(sizeof(*elem), GFP_KERNEL); - pr_info("kzalloc(sizeof(*elem)) = %p\n", elem); + pr_info("kzalloc(sizeof(*elem)) = 0x%px\n", elem); if (!elem) return -ENOMEM; INIT_LIST_HEAD(&elem->list); @@ -75,11 +75,11 @@ static int kmemleak_test_init(void) for_each_possible_cpu(i) { per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); - pr_info("kmalloc(129) = %p\n", + pr_info("kmalloc(129) = 0x%px\n", per_cpu(kmemleak_test_pointer, i)); } - pr_info("__alloc_percpu(64, 4) = %p\n", __alloc_percpu(64, 4)); + pr_info("__alloc_percpu(64, 4) = 0x%px\n", __alloc_percpu(64, 4)); return 0; } diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h index 36cb29bc57c2..1f30956e070d 100644 --- a/tools/include/linux/refcount.h +++ b/tools/include/linux/refcount.h @@ -60,6 +60,11 @@ static inline void refcount_set(refcount_t *r, unsigned int n) atomic_set(&r->refs, n); } +static inline void refcount_set_release(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + static inline unsigned int refcount_read(const refcount_t *r) { return atomic_read(&r->refs); diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 8b3591a51e1f..b2a6660bbd92 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -14,6 +14,7 @@ include ../shared/shared.mk main: $(OFILES) +xarray.o: ../../../lib/test_xarray.c idr-test.o: ../../../lib/test_ida.c idr-test: idr-test.o $(CORE_OFILES) diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 121000c28c10..c5241b193db8 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -57,4 +57,4 @@ droppable hugetlb_dio pkey_sighandler_tests_32 pkey_sighandler_tests_64 -guard-pages +guard-regions diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 63ce39d024bb..8270895039d1 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -97,7 +97,7 @@ TEST_GEN_FILES += hugetlb_fault_after_madv TEST_GEN_FILES += hugetlb_madv_vs_map TEST_GEN_FILES += hugetlb_dio TEST_GEN_FILES += droppable -TEST_GEN_FILES += guard-pages +TEST_GEN_FILES += guard-regions ifneq ($(ARCH),arm64) TEST_GEN_FILES += soft-dirty diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 9446673645eb..f0cb14ea8608 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) mremap_size = thpsize / 2; mremap_mem = mmap(NULL, mremap_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mem == MAP_FAILED) { + if (mremap_mem == MAP_FAILED) { ksft_test_result_fail("mmap() failed\n"); goto munmap; } diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-regions.c index 525c50d3ec23..b3d0e2771096 100644 --- a/tools/testing/selftests/mm/guard-pages.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -6,6 +6,7 @@ #include <assert.h> #include <errno.h> #include <fcntl.h> +#include <linux/limits.h> #include <linux/userfaultfd.h> #include <setjmp.h> #include <signal.h> @@ -18,6 +19,7 @@ #include <sys/syscall.h> #include <sys/uio.h> #include <unistd.h> +#include "vm_util.h" #include "../pidfd/pidfd.h" @@ -39,6 +41,79 @@ static sigjmp_buf signal_jmp_buf; */ #define FORCE_READ(x) (*(volatile typeof(x) *)x) +/* + * How is the test backing the mapping being tested? + */ +enum backing_type { + ANON_BACKED, + SHMEM_BACKED, + LOCAL_FILE_BACKED, +}; + +FIXTURE(guard_regions) +{ + unsigned long page_size; + char path[PATH_MAX]; + int fd; +}; + +FIXTURE_VARIANT(guard_regions) +{ + enum backing_type backing; +}; + +FIXTURE_VARIANT_ADD(guard_regions, anon) +{ + .backing = ANON_BACKED, +}; + +FIXTURE_VARIANT_ADD(guard_regions, shmem) +{ + .backing = SHMEM_BACKED, +}; + +FIXTURE_VARIANT_ADD(guard_regions, file) +{ + .backing = LOCAL_FILE_BACKED, +}; + +static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant) +{ + switch (variant->backing) { + case ANON_BACKED: + case SHMEM_BACKED: + return true; + default: + return false; + } +} + +static void *mmap_(FIXTURE_DATA(guard_regions) * self, + const FIXTURE_VARIANT(guard_regions) * variant, + void *addr, size_t length, int prot, int extra_flags, + off_t offset) +{ + int fd; + int flags = extra_flags; + + switch (variant->backing) { + case ANON_BACKED: + flags |= MAP_PRIVATE | MAP_ANON; + fd = -1; + break; + case SHMEM_BACKED: + case LOCAL_FILE_BACKED: + flags |= MAP_SHARED; + fd = self->fd; + break; + default: + ksft_exit_fail(); + break; + } + + return mmap(addr, length, prot, flags, fd, offset); +} + static int userfaultfd(int flags) { return syscall(SYS_userfaultfd, flags); @@ -104,12 +179,7 @@ static bool try_read_write_buf(char *ptr) return try_read_buf(ptr) && try_write_buf(ptr); } -FIXTURE(guard_pages) -{ - unsigned long page_size; -}; - -FIXTURE_SETUP(guard_pages) +static void setup_sighandler(void) { struct sigaction act = { .sa_handler = &handle_fatal, @@ -119,11 +189,9 @@ FIXTURE_SETUP(guard_pages) sigemptyset(&act.sa_mask); if (sigaction(SIGSEGV, &act, NULL)) ksft_exit_fail_perror("sigaction"); +} - self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); -}; - -FIXTURE_TEARDOWN(guard_pages) +static void teardown_sighandler(void) { struct sigaction act = { .sa_handler = SIG_DFL, @@ -134,15 +202,109 @@ FIXTURE_TEARDOWN(guard_pages) sigaction(SIGSEGV, &act, NULL); } -TEST_F(guard_pages, basic) +static int open_file(const char *prefix, char *path) +{ + int fd; + + snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix); + fd = mkstemp(path); + if (fd < 0) + ksft_exit_fail_perror("mkstemp"); + + return fd; +} + +/* Establish a varying pattern in a buffer. */ +static void set_pattern(char *ptr, size_t num_pages, size_t page_size) +{ + size_t i; + + for (i = 0; i < num_pages; i++) { + char *ptr2 = &ptr[i * page_size]; + + memset(ptr2, 'a' + (i % 26), page_size); + } +} + +/* + * Check that a buffer contains the pattern set by set_pattern(), starting at a + * page offset of pgoff within the buffer. + */ +static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size, + size_t pgoff) +{ + size_t i; + + for (i = 0; i < num_pages * page_size; i++) { + size_t offset = pgoff * page_size + i; + char actual = ptr[offset]; + char expected = 'a' + ((offset / page_size) % 26); + + if (actual != expected) + return false; + } + + return true; +} + +/* Check that a buffer contains the pattern set by set_pattern(). */ +static bool check_pattern(char *ptr, size_t num_pages, size_t page_size) +{ + return check_pattern_offset(ptr, num_pages, page_size, 0); +} + +/* Determine if a buffer contains only repetitions of a specified char. */ +static bool is_buf_eq(char *buf, size_t size, char chr) +{ + size_t i; + + for (i = 0; i < size; i++) { + if (buf[i] != chr) + return false; + } + + return true; +} + +FIXTURE_SETUP(guard_regions) +{ + self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); + setup_sighandler(); + + if (variant->backing == ANON_BACKED) + return; + + self->fd = open_file( + variant->backing == SHMEM_BACKED ? "/tmp/" : "", + self->path); + + /* We truncate file to at least 100 pages, tests can modify as needed. */ + ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0); +}; + +FIXTURE_TEARDOWN_PARENT(guard_regions) +{ + teardown_sighandler(); + + if (variant->backing == ANON_BACKED) + return; + + if (self->fd >= 0) + close(self->fd); + + if (self->path[0] != '\0') + unlink(self->path); +} + +TEST_F(guard_regions, basic) { const unsigned long NUM_PAGES = 10; const unsigned long page_size = self->page_size; char *ptr; int i; - ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, -1, 0); + ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Trivially assert we can touch the first page. */ @@ -228,32 +390,30 @@ TEST_F(guard_pages, basic) } /* Assert that operations applied across multiple VMAs work as expected. */ -TEST_F(guard_pages, multi_vma) +TEST_F(guard_regions, multi_vma) { const unsigned long page_size = self->page_size; char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3; int i; /* Reserve a 100 page region over which we can install VMAs. */ - ptr_region = mmap(NULL, 100 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_region = mmap_(self, variant, NULL, 100 * page_size, + PROT_NONE, 0, 0); ASSERT_NE(ptr_region, MAP_FAILED); /* Place a VMA of 10 pages size at the start of the region. */ - ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr1 = mmap_(self, variant, ptr_region, 10 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr1, MAP_FAILED); /* Place a VMA of 5 pages size 50 pages into the region. */ - ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr2, MAP_FAILED); /* Place a VMA of 20 pages size at the end of the region. */ - ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr3, MAP_FAILED); /* Unmap gaps. */ @@ -323,13 +483,11 @@ TEST_F(guard_pages, multi_vma) } /* Now map incompatible VMAs in the gaps. */ - ptr = mmap(&ptr_region[10 * page_size], 40 * page_size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size, + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0); ASSERT_NE(ptr, MAP_FAILED); - ptr = mmap(&ptr_region[55 * page_size], 25 * page_size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size, + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0); ASSERT_NE(ptr, MAP_FAILED); /* @@ -364,7 +522,7 @@ TEST_F(guard_pages, multi_vma) * Assert that batched operations performed using process_madvise() work as * expected. */ -TEST_F(guard_pages, process_madvise) +TEST_F(guard_regions, process_madvise) { const unsigned long page_size = self->page_size; char *ptr_region, *ptr1, *ptr2, *ptr3; @@ -372,8 +530,8 @@ TEST_F(guard_pages, process_madvise) struct iovec vec[6]; /* Reserve region to map over. */ - ptr_region = mmap(NULL, 100 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_region = mmap_(self, variant, NULL, 100 * page_size, + PROT_NONE, 0, 0); ASSERT_NE(ptr_region, MAP_FAILED); /* @@ -381,9 +539,8 @@ TEST_F(guard_pages, process_madvise) * overwrite existing entries and test this code path against * overwriting existing entries. */ - ptr1 = mmap(&ptr_region[page_size], 10 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0); + ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0); ASSERT_NE(ptr1, MAP_FAILED); /* We want guard markers at start/end of each VMA. */ vec[0].iov_base = ptr1; @@ -392,9 +549,8 @@ TEST_F(guard_pages, process_madvise) vec[1].iov_len = page_size; /* 5 pages offset 50 pages into reserve region. */ - ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr2, MAP_FAILED); vec[2].iov_base = ptr2; vec[2].iov_len = page_size; @@ -402,9 +558,8 @@ TEST_F(guard_pages, process_madvise) vec[3].iov_len = page_size; /* 20 pages offset 79 pages into reserve region. */ - ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr3, MAP_FAILED); vec[4].iov_base = ptr3; vec[4].iov_len = page_size; @@ -459,13 +614,13 @@ TEST_F(guard_pages, process_madvise) } /* Assert that unmapping ranges does not leave guard markers behind. */ -TEST_F(guard_pages, munmap) +TEST_F(guard_regions, munmap) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new1, *ptr_new2; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard first and last pages. */ @@ -481,11 +636,11 @@ TEST_F(guard_pages, munmap) ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0); /* Map over them.*/ - ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE, + MAP_FIXED, 0); ASSERT_NE(ptr_new1, MAP_FAILED); - ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new2, MAP_FAILED); /* Assert that they are now not guarded. */ @@ -497,14 +652,14 @@ TEST_F(guard_pages, munmap) } /* Assert that mprotect() operations have no bearing on guard markers. */ -TEST_F(guard_pages, mprotect) +TEST_F(guard_regions, mprotect) { const unsigned long page_size = self->page_size; char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard the middle of the range. */ @@ -545,14 +700,14 @@ TEST_F(guard_pages, mprotect) } /* Split and merge VMAs and make sure guard pages still behave. */ -TEST_F(guard_pages, split_merge) +TEST_F(guard_regions, split_merge) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard the whole range. */ @@ -593,14 +748,14 @@ TEST_F(guard_pages, split_merge) } /* Now map them again - the unmap will have cleared the guards. */ - ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); - ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); - ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); /* Now make sure guard pages are established. */ @@ -676,14 +831,14 @@ TEST_F(guard_pages, split_merge) } /* Assert that MADV_DONTNEED does not remove guard markers. */ -TEST_F(guard_pages, dontneed) +TEST_F(guard_regions, dontneed) { const unsigned long page_size = self->page_size; char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Back the whole range. */ @@ -713,8 +868,16 @@ TEST_F(guard_pages, dontneed) ASSERT_FALSE(result); } else { ASSERT_TRUE(result); - /* Make sure we really did get reset to zero page. */ - ASSERT_EQ(*curr, '\0'); + switch (variant->backing) { + case ANON_BACKED: + /* If anon, then we get a zero page. */ + ASSERT_EQ(*curr, '\0'); + break; + default: + /* Otherwise, we get the file data. */ + ASSERT_EQ(*curr, 'y'); + break; + } } /* Now write... */ @@ -729,14 +892,14 @@ TEST_F(guard_pages, dontneed) } /* Assert that mlock()'ed pages work correctly with guard markers. */ -TEST_F(guard_pages, mlock) +TEST_F(guard_regions, mlock) { const unsigned long page_size = self->page_size; char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Populate. */ @@ -802,14 +965,14 @@ TEST_F(guard_pages, mlock) * * - Moving a mapping alone should retain markers as they are. */ -TEST_F(guard_pages, mremap_move) +TEST_F(guard_regions, mremap_move) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; /* Map 5 pages. */ - ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 5 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Place guard markers at both ends of the 5 page span. */ @@ -823,8 +986,7 @@ TEST_F(guard_pages, mremap_move) /* Map a new region we will move this range into. Doing this ensures * that we have reserved a range to map into. */ - ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE, - -1, 0); + ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0); ASSERT_NE(ptr_new, MAP_FAILED); ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size, @@ -849,14 +1011,14 @@ TEST_F(guard_pages, mremap_move) * will have to remove guard pages manually to fix up (they'd have to do the * same if it were a PROT_NONE mapping). */ -TEST_F(guard_pages, mremap_expand) +TEST_F(guard_regions, mremap_expand) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; /* Map 10 pages... */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* ...But unmap the last 5 so we can ensure we can expand into them. */ ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0); @@ -880,8 +1042,7 @@ TEST_F(guard_pages, mremap_expand) ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size])); /* Reserve a region which we can move to and expand into. */ - ptr_new = mmap(NULL, 20 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0); ASSERT_NE(ptr_new, MAP_FAILED); /* Now move and expand into it. */ @@ -912,15 +1073,15 @@ TEST_F(guard_pages, mremap_expand) * if the user were using a PROT_NONE mapping they'd have to manually fix this * up also so this is OK. */ -TEST_F(guard_pages, mremap_shrink) +TEST_F(guard_regions, mremap_shrink) { const unsigned long page_size = self->page_size; char *ptr; int i; /* Map 5 pages. */ - ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 5 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Place guard markers at both ends of the 5 page span. */ @@ -976,7 +1137,7 @@ TEST_F(guard_pages, mremap_shrink) * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set * retain guard pages. */ -TEST_F(guard_pages, fork) +TEST_F(guard_regions, fork) { const unsigned long page_size = self->page_size; char *ptr; @@ -984,8 +1145,8 @@ TEST_F(guard_pages, fork) int i; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Establish guard pages in the first 5 pages. */ @@ -1031,16 +1192,19 @@ TEST_F(guard_pages, fork) * Assert expected behaviour after we fork populated ranges of anonymous memory * and then guard and unguard the range. */ -TEST_F(guard_pages, fork_cow) +TEST_F(guard_regions, fork_cow) { const unsigned long page_size = self->page_size; char *ptr; pid_t pid; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "CoW only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Populate range. */ @@ -1102,16 +1266,19 @@ TEST_F(guard_pages, fork_cow) * Assert that forking a process with VMAs that do have VM_WIPEONFORK set * behave as expected. */ -TEST_F(guard_pages, fork_wipeonfork) +TEST_F(guard_regions, fork_wipeonfork) { const unsigned long page_size = self->page_size; char *ptr; pid_t pid; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "Wipe on fork only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Mark wipe on fork. */ @@ -1152,15 +1319,18 @@ TEST_F(guard_pages, fork_wipeonfork) } /* Ensure that MADV_FREE retains guard entries as expected. */ -TEST_F(guard_pages, lazyfree) +TEST_F(guard_regions, lazyfree) { const unsigned long page_size = self->page_size; char *ptr; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "MADV_FREE only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1188,14 +1358,14 @@ TEST_F(guard_pages, lazyfree) } /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */ -TEST_F(guard_pages, populate) +TEST_F(guard_regions, populate) { const unsigned long page_size = self->page_size; char *ptr; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1214,15 +1384,15 @@ TEST_F(guard_pages, populate) } /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */ -TEST_F(guard_pages, cold_pageout) +TEST_F(guard_regions, cold_pageout) { const unsigned long page_size = self->page_size; char *ptr; int i; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1260,7 +1430,7 @@ TEST_F(guard_pages, cold_pageout) } /* Ensure that guard pages do not break userfaultd. */ -TEST_F(guard_pages, uffd) +TEST_F(guard_regions, uffd) { const unsigned long page_size = self->page_size; int uffd; @@ -1273,6 +1443,9 @@ TEST_F(guard_pages, uffd) struct uffdio_register reg; struct uffdio_range range; + if (!is_anon_backed(variant)) + SKIP(return, "uffd only works on anon backing"); + /* Set up uffd. */ uffd = userfaultfd(0); if (uffd == -1 && errno == EPERM) @@ -1282,8 +1455,8 @@ TEST_F(guard_pages, uffd) ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0); /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Register the range with uffd. */ @@ -1309,4 +1482,593 @@ TEST_F(guard_pages, uffd) ASSERT_EQ(munmap(ptr, 10 * page_size), 0); } +/* + * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we + * aggressively read-ahead, then install guard regions and assert that it + * behaves correctly. + * + * We page out using MADV_PAGEOUT before checking guard regions so we drop page + * cache folios, meaning we maximise the possibility of some broken readahead. + */ +TEST_F(guard_regions, madvise_sequential) +{ + char *ptr; + int i; + const unsigned long page_size = self->page_size; + + if (variant->backing == ANON_BACKED) + SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Establish a pattern of data in the file. */ + set_pattern(ptr, 10, page_size); + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Mark it as being accessed sequentially. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0); + + /* Mark every other page a guard page. */ + for (i = 0; i < 10; i += 2) { + char *ptr2 = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now page it out. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); + + /* Now make sure pages are as expected. */ + for (i = 0; i < 10; i++) { + char *chrp = &ptr[i * page_size]; + + if (i % 2 == 0) { + bool result = try_read_write_buf(chrp); + + ASSERT_FALSE(result); + } else { + ASSERT_EQ(*chrp, 'a' + i); + } + } + + /* Now remove guard pages. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Now make sure all data is as expected. */ + if (!check_pattern(ptr, 10, page_size)) + ASSERT_TRUE(false); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +/* + * Check that file-backed mappings implement guard regions with MAP_PRIVATE + * correctly. + */ +TEST_F(guard_regions, map_private) +{ + const unsigned long page_size = self->page_size; + char *ptr_shared, *ptr_private; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "MAP_PRIVATE test specific to file-backed"); + + ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr_shared, MAP_FAILED); + + /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */ + ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0); + ASSERT_NE(ptr_private, MAP_FAILED); + + /* Set pattern in shared mapping. */ + set_pattern(ptr_shared, 10, page_size); + + /* Install guard regions in every other page in the shared mapping. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_shared[i * page_size]; + + ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0); + } + + for (i = 0; i < 10; i++) { + /* Every even shared page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); + /* Private mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size])); + } + + /* Install guard regions in every other page in the private mapping. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_private[i * page_size]; + + ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0); + } + + for (i = 0; i < 10; i++) { + /* Every even shared page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); + /* Every odd private page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0); + } + + /* Remove guard regions from shared mapping. */ + ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0); + + for (i = 0; i < 10; i++) { + /* Shared mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + /* Every even private page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0); + } + + /* Remove guard regions from private mapping. */ + ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); + + for (i = 0; i < 10; i++) { + /* Shared mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + /* Private mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size])); + } + + /* Ensure patterns are intact. */ + ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size)); + ASSERT_TRUE(check_pattern(ptr_private, 10, page_size)); + + /* Now write out every other page to MAP_PRIVATE. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_private[i * page_size]; + + memset(ptr, 'a' + i, page_size); + } + + /* + * At this point the mapping is: + * + * 0123456789 + * SPSPSPSPSP + * + * Where S = shared, P = private mappings. + */ + + /* Now mark the beginning of the mapping guarded. */ + ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0); + + /* + * This renders the mapping: + * + * 0123456789 + * xxxxxPSPSP + */ + + for (i = 0; i < 10; i++) { + char *ptr = &ptr_private[i * page_size]; + + /* Ensure guard regions as expected. */ + ASSERT_EQ(try_read_buf(ptr), i >= 5); + /* The shared mapping should always succeed. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + } + + /* Remove the guard regions altogether. */ + ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* + * + * We now expect the mapping to be: + * + * 0123456789 + * SSSSSPSPSP + * + * As we removed guard regions, the private pages from the first 5 will + * have been zapped, so on fault will reestablish the shared mapping. + */ + + for (i = 0; i < 10; i++) { + char *ptr = &ptr_private[i * page_size]; + + /* + * Assert that shared mappings in the MAP_PRIVATE mapping match + * the shared mapping. + */ + if (i < 5 || i % 2 == 0) { + char *ptr_s = &ptr_shared[i * page_size]; + + ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0); + continue; + } + + /* Everything else is a private mapping. */ + ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i)); + } + + ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0); + ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0); +} + +/* Test that guard regions established over a read-only mapping function correctly. */ +TEST_F(guard_regions, readonly_file) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Read-only test specific to file-backed"); + + /* Map shared so we can populate with pattern, populate it, unmap. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + /* Close the fd so we can re-open read-only. */ + ASSERT_EQ(close(self->fd), 0); + + /* Re-open read-only. */ + self->fd = open(self->path, O_RDONLY); + ASSERT_NE(self->fd, -1); + /* Re-map read-only. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Mark every other page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_pg = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Assert that the guard regions are in place.*/ + for (i = 0; i < 10; i++) { + char *ptr_pg = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0); + } + + /* Remove guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Ensure the data is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, fault_around) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Fault-around test specific to file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Establish a pattern in the backing file. */ + set_pattern(ptr, 10, page_size); + + /* + * Now drop it from the page cache so we get major faults when next we + * map it. + */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); + + /* Unmap and remap 'to be sure'. */ + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now fault in every odd page. This should trigger fault-around. */ + for (i = 1; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_TRUE(try_read_buf(ptr_p)); + } + + /* Finally, ensure that guard regions are intact as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, truncation) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Truncation test specific to file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* + * Establish a pattern in the backing file, just so there is data + * there. + */ + set_pattern(ptr, 10, page_size); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + /* Now truncate to actually used size (initialised to 100). */ + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Here the guard regions will remain intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + /* Now truncate to half the size, then truncate again to the full size. */ + ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0); + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Again, guard pages will remain intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, hole_punch) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Truncation test specific to file-backed"); + + /* Establish pattern in mapping. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + + /* Install a guard region in the middle of the mapping. */ + ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size, + MADV_GUARD_INSTALL), 0); + + /* + * The buffer will now be: + * + * 0123456789 + * ***xxxx*** + * + * Where * is data and x is the guard region. + */ + + /* Ensure established. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7); + } + + /* Now hole punch the guarded region. */ + ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size, + MADV_REMOVE), 0); + + /* Ensure guard regions remain. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7); + } + + /* Now remove guard region throughout. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Check that the pattern exists in non-hole punched region. */ + ASSERT_TRUE(check_pattern(ptr, 3, page_size)); + /* Check that hole punched region is zeroed. */ + ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0')); + /* Check that the pattern exists in the remainder of the file. */ + ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7)); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +/* + * Ensure that a memfd works correctly with guard regions, that we can write + * seal it then open the mapping read-only and still establish guard regions + * within, remove those guard regions and have everything work correctly. + */ +TEST_F(guard_regions, memfd_write_seal) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing != SHMEM_BACKED) + SKIP(return, "memfd write seal test specific to shmem"); + + /* OK, we need a memfd, so close existing one. */ + ASSERT_EQ(close(self->fd), 0); + + /* Create and truncate memfd. */ + self->fd = memfd_create("guard_regions_memfd_seals_test", + MFD_ALLOW_SEALING); + ASSERT_NE(self->fd, -1); + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Map, set pattern, unmap. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + + /* Write-seal the memfd. */ + ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0); + + /* Now map the memfd readonly. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Ensure pattern is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + /* Now remove guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Ensure pattern is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Ensure write seal intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_FALSE(try_write_buf(ptr_p)); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + + +/* + * Since we are now permitted to establish guard regions in read-only anonymous + * mappings, for the sake of thoroughness, though it probably has no practical + * use, test that guard regions function with a mapping to the anonymous zero + * page. + */ +TEST_F(guard_regions, anon_zeropage) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (!is_anon_backed(variant)) + SKIP(return, "anon zero page test specific to anon/shmem"); + + /* Obtain a read-only i.e. anon zero page mapping. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + /* Now remove all guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_TRUE(try_read_buf(ptr_p)); + } + + /* Ensure zero page...*/ + ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0')); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +/* + * Assert that /proc/$pid/pagemap correctly identifies guard region ranges. + */ +TEST_F(guard_regions, pagemap) +{ + const unsigned long page_size = self->page_size; + int proc_fd; + char *ptr; + int i; + + proc_fd = open("/proc/self/pagemap", O_RDONLY); + ASSERT_NE(proc_fd, -1); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Read from pagemap, and assert no guard regions are detected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + unsigned long entry = pagemap_get_entry(proc_fd, ptr_p); + unsigned long masked = entry & PM_GUARD_REGION; + + ASSERT_EQ(masked, 0); + } + + /* Install a guard region in every other page. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Re-read from pagemap, and assert guard regions are detected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + unsigned long entry = pagemap_get_entry(proc_fd, ptr_p); + unsigned long masked = entry & PM_GUARD_REGION; + + ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0); + } + + ASSERT_EQ(close(proc_fd), 0); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 9423ad439a61..21595b20bbc3 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -96,13 +96,17 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) int ret; if (ftruncate(fd, size)) { - ksft_test_result_fail("ftruncate() failed\n"); + if (errno == ENOENT) { + skip_test_dodgy_fs("ftruncate()"); + } else { + ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno)); + } return; } if (fallocate(fd, 0, 0, size)) { if (size == pagesize) - ksft_test_result_fail("fallocate() failed\n"); + ksft_test_result_fail("fallocate() failed (%s)\n", strerror(errno)); else ksft_test_result_skip("need more free huge pages\n"); return; @@ -112,7 +116,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) shared ? MAP_SHARED : MAP_PRIVATE, fd, 0); if (mem == MAP_FAILED) { if (size == pagesize || shared) - ksft_test_result_fail("mmap() failed\n"); + ksft_test_result_fail("mmap() failed (%s)\n", strerror(errno)); else ksft_test_result_skip("need more free huge pages\n"); return; @@ -130,7 +134,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) */ ret = mprotect(mem, size, PROT_READ); if (ret) { - ksft_test_result_fail("mprotect() failed\n"); + ksft_test_result_fail("mprotect() failed (%s)\n", strerror(errno)); goto munmap; } /* FALLTHROUGH */ @@ -165,18 +169,20 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) args.flags |= rw ? PIN_LONGTERM_TEST_FLAG_USE_WRITE : 0; ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args); if (ret && errno == EINVAL) { - ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n"); + ksft_test_result_skip("PIN_LONGTERM_TEST_START failed (EINVAL)n"); break; } else if (ret && errno == EFAULT) { ksft_test_result(!should_work, "Should have failed\n"); break; } else if (ret) { - ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n"); + ksft_test_result_fail("PIN_LONGTERM_TEST_START failed (%s)\n", + strerror(errno)); break; } if (ioctl(gup_fd, PIN_LONGTERM_TEST_STOP)) - ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n"); + ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed (%s)\n", + strerror(errno)); /* * TODO: if the kernel ever supports long-term R/W pinning on @@ -202,7 +208,8 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) /* Skip on errors, as we might just lack kernel support. */ ret = io_uring_queue_init(1, &ring, 0); if (ret < 0) { - ksft_test_result_skip("io_uring_queue_init() failed\n"); + ksft_test_result_skip("io_uring_queue_init() failed (%s)\n", + strerror(-ret)); break; } /* @@ -215,13 +222,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) /* Only new kernels return EFAULT. */ if (ret && (errno == ENOSPC || errno == EOPNOTSUPP || errno == EFAULT)) { - ksft_test_result(!should_work, "Should have failed\n"); + ksft_test_result(!should_work, "Should have failed (%s)\n", + strerror(errno)); } else if (ret) { /* * We might just lack support or have insufficient * MEMLOCK limits. */ - ksft_test_result_skip("io_uring_register_buffers() failed\n"); + ksft_test_result_skip("io_uring_register_buffers() failed (%s)\n", + strerror(-ret)); } else { ksft_test_result(should_work, "Should have worked\n"); io_uring_unregister_buffers(&ring); @@ -249,7 +258,7 @@ static void run_with_memfd(test_fn fn, const char *desc) fd = memfd_create("test", 0); if (fd < 0) { - ksft_test_result_fail("memfd_create() failed\n"); + ksft_test_result_fail("memfd_create() failed (%s)\n", strerror(errno)); return; } @@ -266,13 +275,13 @@ static void run_with_tmpfile(test_fn fn, const char *desc) file = tmpfile(); if (!file) { - ksft_test_result_fail("tmpfile() failed\n"); + ksft_test_result_fail("tmpfile() failed (%s)\n", strerror(errno)); return; } fd = fileno(file); if (fd < 0) { - ksft_test_result_fail("fileno() failed\n"); + ksft_test_result_fail("fileno() failed (%s)\n", strerror(errno)); goto close; } @@ -290,12 +299,12 @@ static void run_with_local_tmpfile(test_fn fn, const char *desc) fd = mkstemp(filename); if (fd < 0) { - ksft_test_result_fail("mkstemp() failed\n"); + ksft_test_result_fail("mkstemp() failed (%s)\n", strerror(errno)); return; } if (unlink(filename)) { - ksft_test_result_fail("unlink() failed\n"); + ksft_test_result_fail("unlink() failed (%s)\n", strerror(errno)); goto close; } @@ -317,7 +326,7 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc, fd = memfd_create("test", flags); if (fd < 0) { - ksft_test_result_skip("memfd_create() failed\n"); + ksft_test_result_skip("memfd_create() failed (%s)\n", strerror(errno)); return; } diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c index 5c8a53869b1b..9df2636c829b 100644 --- a/tools/testing/selftests/mm/map_populate.c +++ b/tools/testing/selftests/mm/map_populate.c @@ -18,6 +18,8 @@ #include <unistd.h> #include "../kselftest.h" +#include "vm_util.h" + #define MMAP_SZ 4096 #define BUG_ON(condition, description) \ @@ -87,6 +89,9 @@ int main(int argc, char **argv) BUG_ON(!ftmp, "tmpfile()"); ret = ftruncate(fileno(ftmp), MMAP_SZ); + if (ret < 0 && errno == ENOENT) { + skip_test_dodgy_fs("ftruncate()"); + } BUG_ON(ret, "ftruncate()"); smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE, diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c index 1cd80b0f76c3..b8d7e966f44c 100644 --- a/tools/testing/selftests/mm/mlock-random-test.c +++ b/tools/testing/selftests/mm/mlock-random-test.c @@ -161,9 +161,9 @@ static void test_mlock_within_limit(char *p, int alloc_size) MLOCK_ONFAULT); if (ret) - ksft_exit_fail_msg("%s() failure at |%p(%d)| mlock:|%p(%d)|\n", + ksft_exit_fail_msg("%s() failure (%s) at |%p(%d)| mlock:|%p(%d)|\n", is_mlock ? "mlock" : "mlock2", - p, alloc_size, + strerror(errno), p, alloc_size, p + start_offset, lock_size); } diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h index 4417eaa5cfb7..81e77fa41901 100644 --- a/tools/testing/selftests/mm/mlock2.h +++ b/tools/testing/selftests/mm/mlock2.h @@ -6,7 +6,13 @@ static int mlock2_(void *start, size_t len, int flags) { - return syscall(__NR_mlock2, start, len, flags); + int ret = syscall(__NR_mlock2, start, len, flags); + + if (ret) { + errno = ret; + return -1; + } + return 0; } static FILE *seek_to_smaps_entry(unsigned long addr) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 7cc71d942f83..9aff33b10999 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -187,9 +187,10 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then printf "Not enough huge pages available (%d < %d)\n" \ "$freepgs" "$needpgs" fi + HAVE_HUGEPAGES=1 else echo "no hugetlbfs support in kernel?" - exit 1 + HAVE_HUGEPAGES=0 fi # filter 64bit architectures @@ -218,13 +219,20 @@ pretty_name() { # Usage: run_test [test binary] [arbitrary test arguments...] run_test() { if test_selected ${CATEGORY}; then + local skip=0 + # On memory constrainted systems some tests can fail to allocate hugepages. # perform some cleanup before the test for a higher success rate. if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then - echo 3 > /proc/sys/vm/drop_caches - sleep 2 - echo 1 > /proc/sys/vm/compact_memory - sleep 2 + if [ "${HAVE_HUGEPAGES}" = "1" ]; then + echo 3 > /proc/sys/vm/drop_caches + sleep 2 + echo 1 > /proc/sys/vm/compact_memory + sleep 2 + else + echo "hugepages not supported" | tap_prefix + skip=1 + fi fi local test=$(pretty_name "$*") @@ -232,8 +240,12 @@ run_test() { local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix - ("$@" 2>&1) | tap_prefix - local ret=${PIPESTATUS[0]} + if [ "${skip}" != "1" ]; then + ("$@" 2>&1) | tap_prefix + local ret=${PIPESTATUS[0]} + else + local ret=$ksft_skip + fi count_total=$(( count_total + 1 )) if [ $ret -eq 0 ]; then count_pass=$(( count_pass + 1 )) @@ -271,13 +283,15 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap CATEGORY="hugetlb" run_test ./hugetlb-madvise CATEGORY="hugetlb" run_test ./hugetlb_dio -nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) -# For this test, we need one and just one huge page -echo 1 > /proc/sys/vm/nr_hugepages -CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv -CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map -# Restore the previous number of huge pages, since further tests rely on it -echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages +if [ "${HAVE_HUGEPAGES}" = "1" ]; then + nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) + # For this test, we need one and just one huge page + echo 1 > /proc/sys/vm/nr_hugepages + CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv + CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map + # Restore the previous number of huge pages, since further tests rely on it + echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages +fi if test_selected "hugetlb"; then echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix @@ -311,14 +325,35 @@ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 3 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16 -CATEGORY="userfaultfd" run_test ./uffd-wp-mremap +# uffd-wp-mremap requires at least one page of each size. +have_all_size_hugepgs=true +declare -A nr_size_hugepgs +for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do + old=$(cat $f) + nr_size_hugepgs["$f"]="$old" + if [ "$old" == 0 ]; then + echo 1 > "$f" + fi + if [ $(cat "$f") == 0 ]; then + have_all_size_hugepgs=false + break + fi +done +if $have_all_size_hugepgs; then + CATEGORY="userfaultfd" run_test ./uffd-wp-mremap +else + echo "# SKIP ./uffd-wp-mremap" +fi #cleanup +for f in "${!nr_size_hugepgs[@]}"; do + echo "${nr_size_hugepgs["$f"]}" > "$f" +done echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages CATEGORY="compaction" run_test ./compaction_test -if command -v sudo &> /dev/null; +if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null; then CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit else @@ -381,19 +416,21 @@ CATEGORY="mremap" run_test ./mremap_dontunmap CATEGORY="hmm" run_test bash ./test_hmm.sh smoke # MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests -CATEGORY="madv_guard" run_test ./guard-pages +CATEGORY="madv_guard" run_test ./guard-regions # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests CATEGORY="madv_populate" run_test ./madv_populate if [ -x ./memfd_secret ] then -(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix +(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix CATEGORY="memfd_secret" run_test ./memfd_secret fi # KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100 -CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +if [ "${HAVE_HUGEPAGES}" = "1" ]; then + CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +fi # KSM KSM_MERGE_TIME test with size of 100 CATEGORY="ksm" run_test ./ksm_tests -P -s 100 # KSM MADV_MERGEABLE test with 10 identical pages @@ -442,15 +479,17 @@ CATEGORY="thp" run_test ./transhuge-stress -d 20 # Try to create XFS if not provided if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then - if test_selected "thp"; then - if grep xfs /proc/filesystems &>/dev/null; then - XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX) - SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX) - truncate -s 314572800 ${XFS_IMG} - mkfs.xfs -q ${XFS_IMG} - mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} - MOUNTED_XFS=1 - fi + if [ "${HAVE_HUGEPAGES}" = "1" ]; then + if test_selected "thp"; then + if grep xfs /proc/filesystems &>/dev/null; then + XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX) + SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX) + truncate -s 314572800 ${XFS_IMG} + mkfs.xfs -q ${XFS_IMG} + mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} + MOUNTED_XFS=1 + fi + fi fi fi diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 3f353f3d070f..aa7400ed0e99 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -5,6 +5,7 @@ */ #define _GNU_SOURCE +#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> @@ -14,6 +15,7 @@ #include <fcntl.h> #include <sys/mman.h> #include <sys/mount.h> +#include <sys/param.h> #include <malloc.h> #include <stdbool.h> #include <time.h> @@ -261,18 +263,32 @@ void split_pte_mapped_thp(void) close(kpageflags_fd); } -void split_file_backed_thp(void) +void split_file_backed_thp(int order) { int status; int fd; - ssize_t num_written; char tmpfs_template[] = "/tmp/thp_split_XXXXXX"; const char *tmpfs_loc = mkdtemp(tmpfs_template); char testfile[INPUT_MAX]; + ssize_t num_written, num_read; + char *file_buf1, *file_buf2; uint64_t pgoff_start = 0, pgoff_end = 1024; + int i; ksft_print_msg("Please enable pr_debug in split_huge_pages_in_file() for more info.\n"); + file_buf1 = (char *)malloc(pmd_pagesize); + file_buf2 = (char *)malloc(pmd_pagesize); + + if (!file_buf1 || !file_buf2) { + ksft_print_msg("cannot allocate file buffers\n"); + goto out; + } + + for (i = 0; i < pmd_pagesize; i++) + file_buf1[i] = (char)i; + memset(file_buf2, 0, pmd_pagesize); + status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m"); if (status) @@ -281,26 +297,45 @@ void split_file_backed_thp(void) status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc); if (status >= INPUT_MAX) { ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n"); + goto cleanup; } - fd = open(testfile, O_CREAT|O_WRONLY, 0664); + fd = open(testfile, O_CREAT|O_RDWR, 0664); if (fd == -1) { ksft_perror("Cannot open testing file"); goto cleanup; } - /* write something to the file, so a file-backed THP can be allocated */ - num_written = write(fd, tmpfs_loc, strlen(tmpfs_loc) + 1); - close(fd); + /* write pmd size data to the file, so a file-backed THP can be allocated */ + num_written = write(fd, file_buf1, pmd_pagesize); - if (num_written < 1) { - ksft_perror("Fail to write data to testing file"); - goto cleanup; + if (num_written == -1 || num_written != pmd_pagesize) { + ksft_perror("Failed to write data to testing file"); + goto close_file; } /* split the file-backed THP */ - write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0); + write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, order); + + /* check file content after split */ + status = lseek(fd, 0, SEEK_SET); + if (status == -1) { + ksft_perror("Cannot lseek file"); + goto close_file; + } + + num_read = read(fd, file_buf2, num_written); + if (num_read == -1 || num_read != num_written) { + ksft_perror("Cannot read file content back"); + goto close_file; + } + + if (strncmp(file_buf1, file_buf2, pmd_pagesize) != 0) { + ksft_print_msg("File content changed\n"); + goto close_file; + } + close(fd); status = unlink(testfile); if (status) { ksft_perror("Cannot remove testing file"); @@ -318,12 +353,15 @@ void split_file_backed_thp(void) ksft_exit_fail_msg("cannot remove tmp dir: %s\n", strerror(errno)); ksft_print_msg("Please check dmesg for more information\n"); - ksft_test_result_pass("File-backed THP split test done\n"); + ksft_test_result_pass("File-backed THP split to order %d test done\n", order); return; +close_file: + close(fd); cleanup: umount(tmpfs_loc); rmdir(tmpfs_loc); +out: ksft_exit_fail_msg("Error occurred\n"); } @@ -361,6 +399,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, { size_t i; int dummy = 0; + unsigned char buf[1024]; srand(time(NULL)); @@ -368,11 +407,12 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, if (*fd == -1) ksft_exit_fail_msg("Failed to create a file at %s\n", testfile); - for (i = 0; i < fd_size; i++) { - unsigned char byte = (unsigned char)i; + assert(fd_size % sizeof(buf) == 0); + for (i = 0; i < sizeof(buf); i++) + buf[i] = (unsigned char)i; + for (i = 0; i < fd_size; i += sizeof(buf)) + write(*fd, buf, sizeof(buf)); - write(*fd, &byte, sizeof(byte)); - } close(*fd); sync(); *fd = open("/proc/sys/vm/drop_caches", O_WRONLY); @@ -420,7 +460,8 @@ err_out_unlink: return -1; } -void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc) +void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, + int order, int offset) { int fd; char *addr; @@ -438,7 +479,12 @@ void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_l return; err = 0; - write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order); + if (offset == -1) + write_debugfs(PID_FMT, getpid(), (uint64_t)addr, + (uint64_t)addr + fd_size, order); + else + write_debugfs(PID_FMT, getpid(), (uint64_t)addr, + (uint64_t)addr + fd_size, order, offset); for (i = 0; i < fd_size; i++) if (*(addr + i) != (char)i) { @@ -457,9 +503,15 @@ out: munmap(addr, fd_size); close(fd); unlink(testfile); - if (err) - ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order); - ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order); + if (offset == -1) { + if (err) + ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order); + ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order); + } else { + if (err) + ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d at in-folio offset %d failed\n", order, offset); + ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d at in-folio offset %d passed\n", order, offset); + } } int main(int argc, char **argv) @@ -470,6 +522,7 @@ int main(int argc, char **argv) char fs_loc_template[] = "/tmp/thp_fs_XXXXXX"; const char *fs_loc; bool created_tmp; + int offset; ksft_print_header(); @@ -481,7 +534,7 @@ int main(int argc, char **argv) if (argc > 1) optional_xfs_path = argv[1]; - ksft_set_plan(1+8+2+9); + ksft_set_plan(1+8+1+9+9+8*4+2); pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; @@ -498,12 +551,19 @@ int main(int argc, char **argv) split_pmd_thp_to_order(i); split_pte_mapped_thp(); - split_file_backed_thp(); + for (i = 0; i < 9; i++) + split_file_backed_thp(i); created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template, &fs_loc); for (i = 8; i >= 0; i--) - split_thp_in_pagecache_to_order(fd_size, i, fs_loc); + split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1); + + for (i = 0; i < 9; i++) + for (offset = 0; + offset < pmd_pagesize / pagesize; + offset += MAX(pmd_pagesize / pagesize / 4, 1 << i)) + split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset); cleanup_thp_fs(fs_loc, created_tmp); ksft_finished(); diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c index e4370b79b62f..cd5174d735be 100644 --- a/tools/testing/selftests/mm/thuge-gen.c +++ b/tools/testing/selftests/mm/thuge-gen.c @@ -127,7 +127,7 @@ void test_mmap(unsigned long size, unsigned flags) show(size); ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, - "%s mmap\n", __func__); + "%s mmap %lu\n", __func__, size); if (munmap(map, size * NUM_PAGES)) ksft_exit_fail_msg("%s: unmap %s\n", __func__, strerror(errno)); @@ -165,7 +165,7 @@ void test_shmget(unsigned long size, unsigned flags) show(size); ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, - "%s: mmap\n", __func__); + "%s: mmap %lu\n", __func__, size); if (shmdt(map)) ksft_exit_fail_msg("%s: shmdt: %s\n", __func__, strerror(errno)); } diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 7ad6ba660c7d..a37088a23ffe 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -10,7 +10,7 @@ #define BASE_PMD_ADDR ((void *)(1UL << 30)) volatile bool test_uffdio_copy_eexist = true; -unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; +unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size; char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap; int uffd = -1, uffd_flags, finished, *pipefd, test_type; bool map_shared; @@ -269,7 +269,7 @@ void uffd_test_ctx_clear(void) size_t i; if (pipefd) { - for (i = 0; i < nr_cpus * 2; ++i) { + for (i = 0; i < nr_parallel * 2; ++i) { if (close(pipefd[i])) err("close pipefd"); } @@ -323,7 +323,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) ret = userfaultfd_open(&features); if (ret) { if (errmsg) - *errmsg = "possible lack of priviledge"; + *errmsg = "possible lack of privilege"; return ret; } @@ -348,7 +348,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) /* * After initialization of area_src, we must explicitly release pages * for area_dst to make sure it's fully empty. Otherwise we could have - * some area_dst pages be errornously initialized with zero pages, + * some area_dst pages be erroneously initialized with zero pages, * hence we could hit memory corruption later in the test. * * One example is when THP is globally enabled, above allocate_area() @@ -365,10 +365,10 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) */ uffd_test_ops->release_pages(area_dst); - pipefd = malloc(sizeof(int) * nr_cpus * 2); + pipefd = malloc(sizeof(int) * nr_parallel * 2); if (!pipefd) err("pipefd"); - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK)) err("pipe"); diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h index a70ae10b5f62..7700cbfa3975 100644 --- a/tools/testing/selftests/mm/uffd-common.h +++ b/tools/testing/selftests/mm/uffd-common.h @@ -98,7 +98,7 @@ struct uffd_test_case_ops { }; typedef struct uffd_test_case_ops uffd_test_case_ops_t; -extern unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; +extern unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size; extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap; extern int uffd, uffd_flags, finished, *pipefd, test_type; extern bool map_shared; diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 944d559ade21..40af7f67c407 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -180,12 +180,12 @@ static void *background_thread(void *arg) static int stress(struct uffd_args *args) { unsigned long cpu; - pthread_t locking_threads[nr_cpus]; - pthread_t uffd_threads[nr_cpus]; - pthread_t background_threads[nr_cpus]; + pthread_t locking_threads[nr_parallel]; + pthread_t uffd_threads[nr_parallel]; + pthread_t background_threads[nr_parallel]; finished = 0; - for (cpu = 0; cpu < nr_cpus; cpu++) { + for (cpu = 0; cpu < nr_parallel; cpu++) { if (pthread_create(&locking_threads[cpu], &attr, locking_thread, (void *)cpu)) return 1; @@ -203,7 +203,7 @@ static int stress(struct uffd_args *args) background_thread, (void *)cpu)) return 1; } - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(background_threads[cpu], NULL)) return 1; @@ -219,11 +219,11 @@ static int stress(struct uffd_args *args) uffd_test_ops->release_pages(area_src); finished = 1; - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(locking_threads[cpu], NULL)) return 1; - for (cpu = 0; cpu < nr_cpus; cpu++) { + for (cpu = 0; cpu < nr_parallel; cpu++) { char c; if (bounces & BOUNCE_POLL) { if (write(pipefd[cpu*2+1], &c, 1) != 1) @@ -246,11 +246,11 @@ static int userfaultfd_stress(void) { void *area; unsigned long nr; - struct uffd_args args[nr_cpus]; + struct uffd_args args[nr_parallel]; uint64_t mem_size = nr_pages * page_size; int flags = 0; - memset(args, 0, sizeof(struct uffd_args) * nr_cpus); + memset(args, 0, sizeof(struct uffd_args) * nr_parallel); if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON) flags = UFFD_FEATURE_WP_UNPOPULATED; @@ -325,7 +325,7 @@ static int userfaultfd_stress(void) */ uffd_test_ops->release_pages(area_dst); - uffd_stats_reset(args, nr_cpus); + uffd_stats_reset(args, nr_parallel); /* bounce pass */ if (stress(args)) { @@ -359,7 +359,7 @@ static int userfaultfd_stress(void) swap(area_src_alias, area_dst_alias); - uffd_stats_report(args, nr_cpus); + uffd_stats_report(args, nr_parallel); } uffd_test_ctx_clear(); @@ -412,8 +412,8 @@ static void parse_test_type_arg(const char *raw_type) * feature. */ - if (uffd_get_features(&features)) - err("failed to get available features"); + if (uffd_get_features(&features) && errno == ENOENT) + ksft_exit_skip("failed to get available features (%d)\n", errno); test_uffdio_wp = test_uffdio_wp && (features & UFFD_FEATURE_PAGEFAULT_FLAG_WP); @@ -435,6 +435,7 @@ static void sigalrm(int sig) int main(int argc, char **argv) { + unsigned long nr_cpus; size_t bytes; if (argc < 4) @@ -454,10 +455,19 @@ int main(int argc, char **argv) } nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (nr_cpus > 32) { + /* Don't let calculation below go to zero. */ + ksft_print_msg("_SC_NPROCESSORS_ONLN (%lu) too large, capping nr_threads to 32\n", + nr_cpus); + nr_parallel = 32; + } else { + nr_parallel = nr_cpus; + } - nr_pages_per_cpu = bytes / page_size / nr_cpus; + nr_pages_per_cpu = bytes / page_size / nr_parallel; if (!nr_pages_per_cpu) { - _err("invalid MiB"); + _err("pages_per_cpu = 0, cannot test (%lu / %lu / %lu)", + bytes, page_size, nr_parallel); usage(); } @@ -466,7 +476,7 @@ int main(int argc, char **argv) _err("invalid bounces"); usage(); } - nr_pages = nr_pages_per_cpu * nr_cpus; + nr_pages = nr_pages_per_cpu * nr_parallel; printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", nr_pages, nr_pages_per_cpu); diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 74c8bc02b506..e8fd9011c2a3 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -26,6 +26,8 @@ #define ALIGN_UP(x, align_to) \ ((__typeof__(x))((((unsigned long)(x)) + ((align_to)-1)) & ~((align_to)-1))) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + struct mem_type { const char *name; unsigned int mem_flag; @@ -196,9 +198,10 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test, else page_size = psize(); - nr_pages = UFFD_TEST_MEM_SIZE / page_size; + /* Ensure we have at least 2 pages */ + nr_pages = MAX(UFFD_TEST_MEM_SIZE, page_size * 2) / page_size; /* TODO: remove this global var.. it's so ugly */ - nr_cpus = 1; + nr_parallel = 1; /* Initialize test arguments */ args->mem_type = mem_type; diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c index 2c4f984bd73c..c2ba7d46c7b4 100644 --- a/tools/testing/selftests/mm/uffd-wp-mremap.c +++ b/tools/testing/selftests/mm/uffd-wp-mremap.c @@ -182,7 +182,10 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb /* Register range for uffd-wp. */ if (userfaultfd_open(&features)) { - ksft_test_result_fail("userfaultfd_open() failed\n"); + if (errno == ENOENT) + ksft_test_result_skip("userfaultfd not available\n"); + else + ksft_test_result_fail("userfaultfd_open() failed\n"); goto out; } if (uffd_register(uffd, mem, size, false, true, false)) { diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index b60ac68a9dc8..6effafdc4d8a 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -5,11 +5,13 @@ #include <err.h> #include <strings.h> /* ffsl() */ #include <unistd.h> /* _SC_PAGESIZE */ +#include "../kselftest.h" #define BIT_ULL(nr) (1ULL << (nr)) #define PM_SOFT_DIRTY BIT_ULL(55) #define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_UFFD_WP BIT_ULL(57) +#define PM_GUARD_REGION BIT_ULL(58) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) @@ -31,6 +33,23 @@ static inline unsigned int pshift(void) return __page_shift; } +/* + * Plan 9 FS has bugs (at least on QEMU) where certain operations fail with + * ENOENT on unlinked files. See + * https://gitlab.com/qemu-project/qemu/-/issues/103 for some info about such + * bugs. There are rumours of NFS implementations with similar bugs. + * + * Ideally, tests should just detect filesystems known to have such issues and + * bail early. But 9pfs has the additional "feature" that it causes fstatfs to + * pass through the f_type field from the host filesystem. To avoid having to + * scrape /proc/mounts or some other hackery, tests can call this function when + * it seems such a bug might have been encountered. + */ +static inline void skip_test_dodgy_fs(const char *op_name) +{ + ksft_test_result_skip("%s failed with ENOENT. Filesystem might be buggy (9pfs?)\n", op_name); +} + uint64_t pagemap_get_entry(int fd, char *start); bool pagemap_is_softdirty(int fd, char *start); bool pagemap_is_swapped(int fd, char *start); diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h index 3e1b6adc027b..788c597c4fde 100644 --- a/tools/testing/vma/linux/atomic.h +++ b/tools/testing/vma/linux/atomic.h @@ -9,4 +9,9 @@ #define atomic_set(x, y) uatomic_set(x, y) #define U8_MAX UCHAR_MAX +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed uatomic_cmpxchg +#define atomic_cmpxchg_release uatomic_cmpxchg +#endif /* atomic_cmpxchg_relaxed */ + #endif /* _LINUX_ATOMIC_H */ diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 04ab45e27fb8..11f761769b5b 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -74,11 +74,23 @@ static struct vm_area_struct *alloc_vma(struct mm_struct *mm, ret->vm_end = end; ret->vm_pgoff = pgoff; ret->__vm_flags = flags; + vma_assert_detached(ret); return ret; } /* Helper function to allocate a VMA and link it to the tree. */ +static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) +{ + int res; + + res = vma_link(mm, vma); + if (!res) + vma_assert_attached(vma); + return res; +} + +/* Helper function to allocate a VMA and link it to the tree. */ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, unsigned long start, unsigned long end, @@ -90,7 +102,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, if (vma == NULL) return NULL; - if (vma_link(mm, vma)) { + if (attach_vma(mm, vma)) { vm_area_free(vma); return NULL; } @@ -108,6 +120,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, /* Helper function which provides a wrapper around a merge new VMA operation. */ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) { + struct vm_area_struct *vma; /* * For convenience, get prev and next VMAs. Which the new VMA operation * requires. @@ -116,7 +129,11 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) vmg->prev = vma_prev(vmg->vmi); vma_iter_next_range(vmg->vmi); - return vma_merge_new_range(vmg); + vma = vma_merge_new_range(vmg); + if (vma) + vma_assert_attached(vma); + + return vma; } /* @@ -125,7 +142,12 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) */ static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) { - return vma_merge_existing_range(vmg); + struct vm_area_struct *vma; + + vma = vma_merge_existing_range(vmg); + if (vma) + vma_assert_attached(vma); + return vma; } /* @@ -147,13 +169,20 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vma_iter_set(vmg->vmi, start); vmg->prev = NULL; + vmg->middle = NULL; vmg->next = NULL; - vmg->vma = NULL; + vmg->target = NULL; vmg->start = start; vmg->end = end; vmg->pgoff = pgoff; vmg->flags = flags; + + vmg->just_expand = false; + vmg->__remove_middle = false; + vmg->__remove_next = false; + vmg->__adjust_middle_start = false; + vmg->__adjust_next_start = false; } /* @@ -253,8 +282,8 @@ static bool test_simple_merge(void) .pgoff = 1, }; - ASSERT_FALSE(vma_link(&mm, vma_left)); - ASSERT_FALSE(vma_link(&mm, vma_right)); + ASSERT_FALSE(attach_vma(&mm, vma_left)); + ASSERT_FALSE(attach_vma(&mm, vma_right)); vma = merge_new(&vmg); ASSERT_NE(vma, NULL); @@ -278,7 +307,7 @@ static bool test_simple_modify(void) struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags); VMA_ITERATOR(vmi, &mm, 0x1000); - ASSERT_FALSE(vma_link(&mm, init_vma)); + ASSERT_FALSE(attach_vma(&mm, init_vma)); /* * The flags will not be changed, the vma_modify_flags() function @@ -338,13 +367,13 @@ static bool test_simple_expand(void) VMA_ITERATOR(vmi, &mm, 0); struct vma_merge_struct vmg = { .vmi = &vmi, - .vma = vma, + .middle = vma, .start = 0, .end = 0x3000, .pgoff = 0, }; - ASSERT_FALSE(vma_link(&mm, vma)); + ASSERT_FALSE(attach_vma(&mm, vma)); ASSERT_FALSE(expand_existing(&vmg)); @@ -365,7 +394,7 @@ static bool test_simple_shrink(void) struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags); VMA_ITERATOR(vmi, &mm, 0); - ASSERT_FALSE(vma_link(&mm, vma)); + ASSERT_FALSE(attach_vma(&mm, vma)); ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0)); @@ -631,7 +660,7 @@ static bool test_vma_merge_special_flags(void) */ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); ASSERT_NE(vma, NULL); - vmg.vma = vma; + vmg.middle = vma; for (i = 0; i < ARRAY_SIZE(special_flags); i++) { vm_flags_t special_flag = special_flags[i]; @@ -760,7 +789,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; /* * The VMA being modified in a way that would otherwise merge should @@ -787,7 +816,7 @@ static bool test_vma_merge_with_close(void) vma->vm_ops = &vm_ops; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); /* * Initially this is misapprehended as an out of memory report, as the @@ -817,7 +846,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -843,7 +872,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -940,7 +969,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; vmg.prev = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); @@ -973,7 +1002,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags); - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1003,7 +1032,7 @@ static bool test_merge_existing(void) vma->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); @@ -1037,7 +1066,7 @@ static bool test_merge_existing(void) vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1067,7 +1096,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1102,37 +1131,37 @@ static bool test_merge_existing(void) vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -1197,7 +1226,7 @@ static bool test_anon_vma_non_mergeable(void) vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1277,7 +1306,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 0, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma_next; ASSERT_EQ(expand_existing(&vmg), 0); @@ -1309,7 +1338,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1338,7 +1367,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1366,7 +1395,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1394,7 +1423,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1432,7 +1461,7 @@ static bool test_vmi_prealloc_fail(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; fail_prealloc = true; @@ -1458,7 +1487,7 @@ static bool test_vmi_prealloc_fail(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 3, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma; fail_prealloc = true; @@ -1515,11 +1544,11 @@ static bool test_copy_vma(void) vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); - ASSERT_NE(vma_new, vma); ASSERT_EQ(vma_new->vm_start, 0); ASSERT_EQ(vma_new->vm_end, 0x2000); ASSERT_EQ(vma_new->vm_pgoff, 0); + vma_assert_attached(vma_new); cleanup_mm(&mm, &vmi); @@ -1528,6 +1557,7 @@ static bool test_copy_vma(void) vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags); vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); + vma_assert_attached(vma_new); ASSERT_EQ(vma_new, vma_next); @@ -1546,7 +1576,7 @@ static bool test_expand_only_mode(void) /* * Place a VMA prior to the one we're expanding so we assert that we do * not erroneously try to traverse to the previous VMA even though we - * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not + * have, through the use of the just_expand flag, indicated we do not * need to do so. */ alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); @@ -1558,7 +1588,7 @@ static bool test_expand_only_mode(void) vma_iter_set(&vmi, 0x3000); vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; vma = vma_merge_new_range(&vmg); ASSERT_NE(vma, NULL); @@ -1569,6 +1599,7 @@ static bool test_expand_only_mode(void) ASSERT_EQ(vma->vm_pgoff, 3); ASSERT_TRUE(vma_write_started(vma)); ASSERT_EQ(vma_iter_addr(&vmi), 0x3000); + vma_assert_attached(vma); cleanup_mm(&mm, &vmi); return true; diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 1eae23039854..572ab2cea763 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -25,7 +25,7 @@ #include <linux/maple_tree.h> #include <linux/mm.h> #include <linux/rbtree.h> -#include <linux/rwsem.h> +#include <linux/refcount.h> extern unsigned long stack_guard_gap; #ifdef CONFIG_MMU @@ -135,10 +135,6 @@ typedef __bitwise unsigned int vm_fault_t; */ #define pr_warn_once pr_err -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - struct kref { refcount_t refcount; }; @@ -233,15 +229,12 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access */ }; -struct vma_lock { - struct rw_semaphore lock; -}; - - struct file { struct address_space *f_mapping; }; +#define VMA_LOCK_OFFSET 0x40000000 + struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -269,16 +262,13 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* Flag to indicate areas detached from the mm->mm_mt tree */ - bool detached; - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -287,20 +277,9 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - struct vma_lock *vm_lock; #endif /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. - * - */ - struct { - struct rb_node rb; - unsigned long rb_subtree_last; - } shared; - - /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack @@ -319,14 +298,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_ANON_VMA_NAME - /* - * For private and shared anonymous mappings, a pointer to a null - * terminated string containing the name given to the vma, or NULL if - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. - */ - struct anon_vma_name *anon_name; -#endif #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif @@ -339,6 +310,27 @@ struct vm_area_struct { #ifdef CONFIG_NUMA_BALANCING struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + refcount_t vm_refcnt; +#endif + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + * + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; +#ifdef CONFIG_ANON_VMA_NAME + /* + * For private and shared anonymous mappings, a pointer to a null + * terminated string containing the name given to the vma, or NULL if + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. + */ + struct anon_vma_name *anon_name; +#endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; @@ -464,26 +456,40 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) return mas_find(&vmi->mas, ULONG_MAX); } -static inline bool vma_lock_alloc(struct vm_area_struct *vma) +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ +static inline void vma_assert_attached(struct vm_area_struct *vma) { - vma->vm_lock = calloc(1, sizeof(struct vma_lock)); - - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = UINT_MAX; + WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); +} - return true; +static inline void vma_assert_detached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); } static inline void vma_assert_write_locked(struct vm_area_struct *); -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_mark_attached(struct vm_area_struct *vma) { - /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + vma_assert_write_locked(vma); + vma_assert_detached(vma); + refcount_set_release(&vma->vm_refcnt, 1); +} + +static inline void vma_mark_detached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_attached(vma); + /* We are the only writer, so no need to use vma_refcount_put(). */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* + * Reader must have temporarily raised vm_refcnt but it will + * drop it without using the vma since vma is write-locked. + */ + } } extern const struct vm_operations_struct vma_dummy_vm_ops; @@ -496,7 +502,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); + vma->vm_lock_seq = UINT_MAX; } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) @@ -507,10 +513,6 @@ static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - free(vma); - return NULL; - } return vma; } @@ -523,10 +525,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return NULL; memcpy(new, orig, sizeof(*new)); - if (!vma_lock_alloc(new)) { - free(new); - return NULL; - } + refcount_set(&new->vm_refcnt, 0); + new->vm_lock_seq = UINT_MAX; INIT_LIST_HEAD(&new->anon_vma_chain); return new; @@ -696,20 +696,9 @@ static inline void mpol_put(struct mempolicy *) { } -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - free(vma->vm_lock); -} - -static inline void __vm_area_free(struct vm_area_struct *vma) -{ - vma_lock_free(vma); - free(vma); -} - static inline void vm_area_free(struct vm_area_struct *vma) { - __vm_area_free(vma); + free(vma); } static inline void lru_add_drain(void) @@ -796,12 +785,12 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { (void)vma; (void)start; (void)end; - (void)adjust_next; + (void)next; } static inline void vma_iter_free(struct vma_iterator *vmi) |