linux/mm/mmu_gather.c
<<
>>
Prefs
   1#include <linux/gfp.h>
   2#include <linux/highmem.h>
   3#include <linux/kernel.h>
   4#include <linux/mmdebug.h>
   5#include <linux/mm_types.h>
   6#include <linux/pagemap.h>
   7#include <linux/rcupdate.h>
   8#include <linux/smp.h>
   9#include <linux/swap.h>
  10
  11#include <asm/pgalloc.h>
  12#include <asm/tlb.h>
  13
  14#ifndef CONFIG_MMU_GATHER_NO_GATHER
  15
  16static bool tlb_next_batch(struct mmu_gather *tlb)
  17{
  18        struct mmu_gather_batch *batch;
  19
  20        batch = tlb->active;
  21        if (batch->next) {
  22                tlb->active = batch->next;
  23                return true;
  24        }
  25
  26        if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
  27                return false;
  28
  29        batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  30        if (!batch)
  31                return false;
  32
  33        tlb->batch_count++;
  34        batch->next = NULL;
  35        batch->nr   = 0;
  36        batch->max  = MAX_GATHER_BATCH;
  37
  38        tlb->active->next = batch;
  39        tlb->active = batch;
  40
  41        return true;
  42}
  43
  44static void tlb_batch_pages_flush(struct mmu_gather *tlb)
  45{
  46        struct mmu_gather_batch *batch;
  47
  48        for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
  49                free_pages_and_swap_cache(batch->pages, batch->nr);
  50                batch->nr = 0;
  51        }
  52        tlb->active = &tlb->local;
  53}
  54
  55static void tlb_batch_list_free(struct mmu_gather *tlb)
  56{
  57        struct mmu_gather_batch *batch, *next;
  58
  59        for (batch = tlb->local.next; batch; batch = next) {
  60                next = batch->next;
  61                free_pages((unsigned long)batch, 0);
  62        }
  63        tlb->local.next = NULL;
  64}
  65
  66bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
  67{
  68        struct mmu_gather_batch *batch;
  69
  70        VM_BUG_ON(!tlb->end);
  71
  72#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
  73        VM_WARN_ON(tlb->page_size != page_size);
  74#endif
  75
  76        batch = tlb->active;
  77        /*
  78         * Add the page and check if we are full. If so
  79         * force a flush.
  80         */
  81        batch->pages[batch->nr++] = page;
  82        if (batch->nr == batch->max) {
  83                if (!tlb_next_batch(tlb))
  84                        return true;
  85                batch = tlb->active;
  86        }
  87        VM_BUG_ON_PAGE(batch->nr > batch->max, page);
  88
  89        return false;
  90}
  91
  92#endif /* MMU_GATHER_NO_GATHER */
  93
  94#ifdef CONFIG_MMU_GATHER_TABLE_FREE
  95
  96static void __tlb_remove_table_free(struct mmu_table_batch *batch)
  97{
  98        int i;
  99
 100        for (i = 0; i < batch->nr; i++)
 101                __tlb_remove_table(batch->tables[i]);
 102
 103        free_page((unsigned long)batch);
 104}
 105
 106#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
 107
 108/*
 109 * Semi RCU freeing of the page directories.
 110 *
 111 * This is needed by some architectures to implement software pagetable walkers.
 112 *
 113 * gup_fast() and other software pagetable walkers do a lockless page-table
 114 * walk and therefore needs some synchronization with the freeing of the page
 115 * directories. The chosen means to accomplish that is by disabling IRQs over
 116 * the walk.
 117 *
 118 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
 119 * since we unlink the page, flush TLBs, free the page. Since the disabling of
 120 * IRQs delays the completion of the TLB flush we can never observe an already
 121 * freed page.
 122 *
 123 * Architectures that do not have this (PPC) need to delay the freeing by some
 124 * other means, this is that means.
 125 *
 126 * What we do is batch the freed directory pages (tables) and RCU free them.
 127 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
 128 * holds off grace periods.
 129 *
 130 * However, in order to batch these pages we need to allocate storage, this
 131 * allocation is deep inside the MM code and can thus easily fail on memory
 132 * pressure. To guarantee progress we fall back to single table freeing, see
 133 * the implementation of tlb_remove_table_one().
 134 *
 135 */
 136
 137static void tlb_remove_table_smp_sync(void *arg)
 138{
 139        /* Simply deliver the interrupt */
 140}
 141
 142static void tlb_remove_table_sync_one(void)
 143{
 144        /*
 145         * This isn't an RCU grace period and hence the page-tables cannot be
 146         * assumed to be actually RCU-freed.
 147         *
 148         * It is however sufficient for software page-table walkers that rely on
 149         * IRQ disabling.
 150         */
 151        smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
 152}
 153
 154static void tlb_remove_table_rcu(struct rcu_head *head)
 155{
 156        __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
 157}
 158
 159static void tlb_remove_table_free(struct mmu_table_batch *batch)
 160{
 161        call_rcu(&batch->rcu, tlb_remove_table_rcu);
 162}
 163
 164#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 165
 166static void tlb_remove_table_sync_one(void) { }
 167
 168static void tlb_remove_table_free(struct mmu_table_batch *batch)
 169{
 170        __tlb_remove_table_free(batch);
 171}
 172
 173#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 174
 175/*
 176 * If we want tlb_remove_table() to imply TLB invalidates.
 177 */
 178static inline void tlb_table_invalidate(struct mmu_gather *tlb)
 179{
 180        if (tlb_needs_table_invalidate()) {
 181                /*
 182                 * Invalidate page-table caches used by hardware walkers. Then
 183                 * we still need to RCU-sched wait while freeing the pages
 184                 * because software walkers can still be in-flight.
 185                 */
 186                tlb_flush_mmu_tlbonly(tlb);
 187        }
 188}
 189
 190static void tlb_remove_table_one(void *table)
 191{
 192        tlb_remove_table_sync_one();
 193        __tlb_remove_table(table);
 194}
 195
 196static void tlb_table_flush(struct mmu_gather *tlb)
 197{
 198        struct mmu_table_batch **batch = &tlb->batch;
 199
 200        if (*batch) {
 201                tlb_table_invalidate(tlb);
 202                tlb_remove_table_free(*batch);
 203                *batch = NULL;
 204        }
 205}
 206
 207void tlb_remove_table(struct mmu_gather *tlb, void *table)
 208{
 209        struct mmu_table_batch **batch = &tlb->batch;
 210
 211        if (*batch == NULL) {
 212                *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
 213                if (*batch == NULL) {
 214                        tlb_table_invalidate(tlb);
 215                        tlb_remove_table_one(table);
 216                        return;
 217                }
 218                (*batch)->nr = 0;
 219        }
 220
 221        (*batch)->tables[(*batch)->nr++] = table;
 222        if ((*batch)->nr == MAX_TABLE_BATCH)
 223                tlb_table_flush(tlb);
 224}
 225
 226static inline void tlb_table_init(struct mmu_gather *tlb)
 227{
 228        tlb->batch = NULL;
 229}
 230
 231#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
 232
 233static inline void tlb_table_flush(struct mmu_gather *tlb) { }
 234static inline void tlb_table_init(struct mmu_gather *tlb) { }
 235
 236#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
 237
 238static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 239{
 240        tlb_table_flush(tlb);
 241#ifndef CONFIG_MMU_GATHER_NO_GATHER
 242        tlb_batch_pages_flush(tlb);
 243#endif
 244}
 245
 246void tlb_flush_mmu(struct mmu_gather *tlb)
 247{
 248        tlb_flush_mmu_tlbonly(tlb);
 249        tlb_flush_mmu_free(tlb);
 250}
 251
 252static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 253                             bool fullmm)
 254{
 255        tlb->mm = mm;
 256        tlb->fullmm = fullmm;
 257
 258#ifndef CONFIG_MMU_GATHER_NO_GATHER
 259        tlb->need_flush_all = 0;
 260        tlb->local.next = NULL;
 261        tlb->local.nr   = 0;
 262        tlb->local.max  = ARRAY_SIZE(tlb->__pages);
 263        tlb->active     = &tlb->local;
 264        tlb->batch_count = 0;
 265#endif
 266
 267        tlb_table_init(tlb);
 268#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 269        tlb->page_size = 0;
 270#endif
 271
 272        __tlb_reset_range(tlb);
 273        inc_tlb_flush_pending(tlb->mm);
 274}
 275
 276/**
 277 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
 278 * @tlb: the mmu_gather structure to initialize
 279 * @mm: the mm_struct of the target address space
 280 *
 281 * Called to initialize an (on-stack) mmu_gather structure for page-table
 282 * tear-down from @mm.
 283 */
 284void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
 285{
 286        __tlb_gather_mmu(tlb, mm, false);
 287}
 288
 289/**
 290 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
 291 * @tlb: the mmu_gather structure to initialize
 292 * @mm: the mm_struct of the target address space
 293 *
 294 * In this case, @mm is without users and we're going to destroy the
 295 * full address space (exit/execve).
 296 *
 297 * Called to initialize an (on-stack) mmu_gather structure for page-table
 298 * tear-down from @mm.
 299 */
 300void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
 301{
 302        __tlb_gather_mmu(tlb, mm, true);
 303}
 304
 305/**
 306 * tlb_finish_mmu - finish an mmu_gather structure
 307 * @tlb: the mmu_gather structure to finish
 308 *
 309 * Called at the end of the shootdown operation to free up any resources that
 310 * were required.
 311 */
 312void tlb_finish_mmu(struct mmu_gather *tlb)
 313{
 314        /*
 315         * If there are parallel threads are doing PTE changes on same range
 316         * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
 317         * flush by batching, one thread may end up seeing inconsistent PTEs
 318         * and result in having stale TLB entries.  So flush TLB forcefully
 319         * if we detect parallel PTE batching threads.
 320         *
 321         * However, some syscalls, e.g. munmap(), may free page tables, this
 322         * needs force flush everything in the given range. Otherwise this
 323         * may result in having stale TLB entries for some architectures,
 324         * e.g. aarch64, that could specify flush what level TLB.
 325         */
 326        if (mm_tlb_flush_nested(tlb->mm)) {
 327                /*
 328                 * The aarch64 yields better performance with fullmm by
 329                 * avoiding multiple CPUs spamming TLBI messages at the
 330                 * same time.
 331                 *
 332                 * On x86 non-fullmm doesn't yield significant difference
 333                 * against fullmm.
 334                 */
 335                tlb->fullmm = 1;
 336                __tlb_reset_range(tlb);
 337                tlb->freed_tables = 1;
 338        }
 339
 340        tlb_flush_mmu(tlb);
 341
 342#ifndef CONFIG_MMU_GATHER_NO_GATHER
 343        tlb_batch_list_free(tlb);
 344#endif
 345        dec_tlb_flush_pending(tlb->mm);
 346}
 347