linux/mm/damon/paddr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * DAMON Primitives for The Physical Address Space
   4 *
   5 * Author: SeongJae Park <sj@kernel.org>
   6 */
   7
   8#define pr_fmt(fmt) "damon-pa: " fmt
   9
  10#include <linux/mmu_notifier.h>
  11#include <linux/page_idle.h>
  12#include <linux/pagemap.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15
  16#include "../internal.h"
  17#include "prmtv-common.h"
  18
  19static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
  20                unsigned long addr, void *arg)
  21{
  22        struct page_vma_mapped_walk pvmw = {
  23                .page = page,
  24                .vma = vma,
  25                .address = addr,
  26        };
  27
  28        while (page_vma_mapped_walk(&pvmw)) {
  29                addr = pvmw.address;
  30                if (pvmw.pte)
  31                        damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
  32                else
  33                        damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
  34        }
  35        return true;
  36}
  37
  38static void damon_pa_mkold(unsigned long paddr)
  39{
  40        struct page *page = damon_get_page(PHYS_PFN(paddr));
  41        struct rmap_walk_control rwc = {
  42                .rmap_one = __damon_pa_mkold,
  43                .anon_lock = page_lock_anon_vma_read,
  44        };
  45        bool need_lock;
  46
  47        if (!page)
  48                return;
  49
  50        if (!page_mapped(page) || !page_rmapping(page)) {
  51                set_page_idle(page);
  52                goto out;
  53        }
  54
  55        need_lock = !PageAnon(page) || PageKsm(page);
  56        if (need_lock && !trylock_page(page))
  57                goto out;
  58
  59        rmap_walk(page, &rwc);
  60
  61        if (need_lock)
  62                unlock_page(page);
  63
  64out:
  65        put_page(page);
  66}
  67
  68static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
  69                                            struct damon_region *r)
  70{
  71        r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
  72
  73        damon_pa_mkold(r->sampling_addr);
  74}
  75
  76void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
  77{
  78        struct damon_target *t;
  79        struct damon_region *r;
  80
  81        damon_for_each_target(t, ctx) {
  82                damon_for_each_region(r, t)
  83                        __damon_pa_prepare_access_check(ctx, r);
  84        }
  85}
  86
  87struct damon_pa_access_chk_result {
  88        unsigned long page_sz;
  89        bool accessed;
  90};
  91
  92static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
  93                unsigned long addr, void *arg)
  94{
  95        struct damon_pa_access_chk_result *result = arg;
  96        struct page_vma_mapped_walk pvmw = {
  97                .page = page,
  98                .vma = vma,
  99                .address = addr,
 100        };
 101
 102        result->accessed = false;
 103        result->page_sz = PAGE_SIZE;
 104        while (page_vma_mapped_walk(&pvmw)) {
 105                addr = pvmw.address;
 106                if (pvmw.pte) {
 107                        result->accessed = pte_young(*pvmw.pte) ||
 108                                !page_is_idle(page) ||
 109                                mmu_notifier_test_young(vma->vm_mm, addr);
 110                } else {
 111#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 112                        result->accessed = pmd_young(*pvmw.pmd) ||
 113                                !page_is_idle(page) ||
 114                                mmu_notifier_test_young(vma->vm_mm, addr);
 115                        result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
 116#else
 117                        WARN_ON_ONCE(1);
 118#endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
 119                }
 120                if (result->accessed) {
 121                        page_vma_mapped_walk_done(&pvmw);
 122                        break;
 123                }
 124        }
 125
 126        /* If accessed, stop walking */
 127        return !result->accessed;
 128}
 129
 130static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 131{
 132        struct page *page = damon_get_page(PHYS_PFN(paddr));
 133        struct damon_pa_access_chk_result result = {
 134                .page_sz = PAGE_SIZE,
 135                .accessed = false,
 136        };
 137        struct rmap_walk_control rwc = {
 138                .arg = &result,
 139                .rmap_one = __damon_pa_young,
 140                .anon_lock = page_lock_anon_vma_read,
 141        };
 142        bool need_lock;
 143
 144        if (!page)
 145                return false;
 146
 147        if (!page_mapped(page) || !page_rmapping(page)) {
 148                if (page_is_idle(page))
 149                        result.accessed = false;
 150                else
 151                        result.accessed = true;
 152                put_page(page);
 153                goto out;
 154        }
 155
 156        need_lock = !PageAnon(page) || PageKsm(page);
 157        if (need_lock && !trylock_page(page)) {
 158                put_page(page);
 159                return NULL;
 160        }
 161
 162        rmap_walk(page, &rwc);
 163
 164        if (need_lock)
 165                unlock_page(page);
 166        put_page(page);
 167
 168out:
 169        *page_sz = result.page_sz;
 170        return result.accessed;
 171}
 172
 173static void __damon_pa_check_access(struct damon_ctx *ctx,
 174                                    struct damon_region *r)
 175{
 176        static unsigned long last_addr;
 177        static unsigned long last_page_sz = PAGE_SIZE;
 178        static bool last_accessed;
 179
 180        /* If the region is in the last checked page, reuse the result */
 181        if (ALIGN_DOWN(last_addr, last_page_sz) ==
 182                                ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
 183                if (last_accessed)
 184                        r->nr_accesses++;
 185                return;
 186        }
 187
 188        last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
 189        if (last_accessed)
 190                r->nr_accesses++;
 191
 192        last_addr = r->sampling_addr;
 193}
 194
 195unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
 196{
 197        struct damon_target *t;
 198        struct damon_region *r;
 199        unsigned int max_nr_accesses = 0;
 200
 201        damon_for_each_target(t, ctx) {
 202                damon_for_each_region(r, t) {
 203                        __damon_pa_check_access(ctx, r);
 204                        max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
 205                }
 206        }
 207
 208        return max_nr_accesses;
 209}
 210
 211bool damon_pa_target_valid(void *t)
 212{
 213        return true;
 214}
 215
 216int damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
 217                struct damon_region *r, struct damos *scheme)
 218{
 219        unsigned long addr;
 220        LIST_HEAD(page_list);
 221
 222        if (scheme->action != DAMOS_PAGEOUT)
 223                return -EINVAL;
 224
 225        for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
 226                struct page *page = damon_get_page(PHYS_PFN(addr));
 227
 228                if (!page)
 229                        continue;
 230
 231                ClearPageReferenced(page);
 232                test_and_clear_page_young(page);
 233                if (isolate_lru_page(page)) {
 234                        put_page(page);
 235                        continue;
 236                }
 237                if (PageUnevictable(page)) {
 238                        putback_lru_page(page);
 239                } else {
 240                        list_add(&page->lru, &page_list);
 241                        put_page(page);
 242                }
 243        }
 244        reclaim_pages(&page_list);
 245        cond_resched();
 246        return 0;
 247}
 248
 249int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t,
 250                struct damon_region *r, struct damos *scheme)
 251{
 252        switch (scheme->action) {
 253        case DAMOS_PAGEOUT:
 254                return damon_pageout_score(context, r, scheme);
 255        default:
 256                break;
 257        }
 258
 259        return DAMOS_MAX_SCORE;
 260}
 261
 262void damon_pa_set_primitives(struct damon_ctx *ctx)
 263{
 264        ctx->primitive.init = NULL;
 265        ctx->primitive.update = NULL;
 266        ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
 267        ctx->primitive.check_accesses = damon_pa_check_accesses;
 268        ctx->primitive.reset_aggregated = NULL;
 269        ctx->primitive.target_valid = damon_pa_target_valid;
 270        ctx->primitive.cleanup = NULL;
 271        ctx->primitive.apply_scheme = damon_pa_apply_scheme;
 272        ctx->primitive.get_scheme_score = damon_pa_scheme_score;
 273}
 274