linux/mm/highmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * High memory handling common code and variables.
   4 *
   5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
   6 *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
   7 *
   8 *
   9 * Redesigned the x86 32-bit VM architecture to deal with
  10 * 64-bit physical space. With current x86 CPUs this
  11 * means up to 64 Gigabytes physical RAM.
  12 *
  13 * Rewrote high memory support to move the page cache into
  14 * high memory. Implemented permanent (schedulable) kmaps
  15 * based on Linus' idea.
  16 *
  17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  18 */
  19
  20#include <linux/mm.h>
  21#include <linux/export.h>
  22#include <linux/swap.h>
  23#include <linux/bio.h>
  24#include <linux/pagemap.h>
  25#include <linux/mempool.h>
  26#include <linux/blkdev.h>
  27#include <linux/init.h>
  28#include <linux/hash.h>
  29#include <linux/highmem.h>
  30#include <linux/kgdb.h>
  31#include <asm/tlbflush.h>
  32
  33
  34#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
  35DEFINE_PER_CPU(int, __kmap_atomic_idx);
  36#endif
  37
  38/*
  39 * Virtual_count is not a pure "count".
  40 *  0 means that it is not mapped, and has not been mapped
  41 *    since a TLB flush - it is usable.
  42 *  1 means that there are no users, but it has been mapped
  43 *    since the last TLB flush - so we can't use it.
  44 *  n means that there are (n-1) current users of it.
  45 */
  46#ifdef CONFIG_HIGHMEM
  47
  48/*
  49 * Architecture with aliasing data cache may define the following family of
  50 * helper functions in its asm/highmem.h to control cache color of virtual
  51 * addresses where physical memory pages are mapped by kmap.
  52 */
  53#ifndef get_pkmap_color
  54
  55/*
  56 * Determine color of virtual address where the page should be mapped.
  57 */
  58static inline unsigned int get_pkmap_color(struct page *page)
  59{
  60        return 0;
  61}
  62#define get_pkmap_color get_pkmap_color
  63
  64/*
  65 * Get next index for mapping inside PKMAP region for page with given color.
  66 */
  67static inline unsigned int get_next_pkmap_nr(unsigned int color)
  68{
  69        static unsigned int last_pkmap_nr;
  70
  71        last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
  72        return last_pkmap_nr;
  73}
  74
  75/*
  76 * Determine if page index inside PKMAP region (pkmap_nr) of given color
  77 * has wrapped around PKMAP region end. When this happens an attempt to
  78 * flush all unused PKMAP slots is made.
  79 */
  80static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
  81{
  82        return pkmap_nr == 0;
  83}
  84
  85/*
  86 * Get the number of PKMAP entries of the given color. If no free slot is
  87 * found after checking that many entries, kmap will sleep waiting for
  88 * someone to call kunmap and free PKMAP slot.
  89 */
  90static inline int get_pkmap_entries_count(unsigned int color)
  91{
  92        return LAST_PKMAP;
  93}
  94
  95/*
  96 * Get head of a wait queue for PKMAP entries of the given color.
  97 * Wait queues for different mapping colors should be independent to avoid
  98 * unnecessary wakeups caused by freeing of slots of other colors.
  99 */
 100static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
 101{
 102        static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
 103
 104        return &pkmap_map_wait;
 105}
 106#endif
 107
 108unsigned long totalhigh_pages __read_mostly;
 109EXPORT_SYMBOL(totalhigh_pages);
 110
 111
 112EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
 113
 114unsigned int nr_free_highpages (void)
 115{
 116        struct zone *zone;
 117        unsigned int pages = 0;
 118
 119        for_each_populated_zone(zone) {
 120                if (is_highmem(zone))
 121                        pages += zone_page_state(zone, NR_FREE_PAGES);
 122        }
 123
 124        return pages;
 125}
 126
 127static int pkmap_count[LAST_PKMAP];
 128static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
 129
 130pte_t * pkmap_page_table;
 131
 132/*
 133 * Most architectures have no use for kmap_high_get(), so let's abstract
 134 * the disabling of IRQ out of the locking in that case to save on a
 135 * potential useless overhead.
 136 */
 137#ifdef ARCH_NEEDS_KMAP_HIGH_GET
 138#define lock_kmap()             spin_lock_irq(&kmap_lock)
 139#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
 140#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
 141#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
 142#else
 143#define lock_kmap()             spin_lock(&kmap_lock)
 144#define unlock_kmap()           spin_unlock(&kmap_lock)
 145#define lock_kmap_any(flags)    \
 146                do { spin_lock(&kmap_lock); (void)(flags); } while (0)
 147#define unlock_kmap_any(flags)  \
 148                do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
 149#endif
 150
 151struct page *kmap_to_page(void *vaddr)
 152{
 153        unsigned long addr = (unsigned long)vaddr;
 154
 155        if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
 156                int i = PKMAP_NR(addr);
 157                return pte_page(pkmap_page_table[i]);
 158        }
 159
 160        return virt_to_page(addr);
 161}
 162EXPORT_SYMBOL(kmap_to_page);
 163
 164static void flush_all_zero_pkmaps(void)
 165{
 166        int i;
 167        int need_flush = 0;
 168
 169        flush_cache_kmaps();
 170
 171        for (i = 0; i < LAST_PKMAP; i++) {
 172                struct page *page;
 173
 174                /*
 175                 * zero means we don't have anything to do,
 176                 * >1 means that it is still in use. Only
 177                 * a count of 1 means that it is free but
 178                 * needs to be unmapped
 179                 */
 180                if (pkmap_count[i] != 1)
 181                        continue;
 182                pkmap_count[i] = 0;
 183
 184                /* sanity check */
 185                BUG_ON(pte_none(pkmap_page_table[i]));
 186
 187                /*
 188                 * Don't need an atomic fetch-and-clear op here;
 189                 * no-one has the page mapped, and cannot get at
 190                 * its virtual address (and hence PTE) without first
 191                 * getting the kmap_lock (which is held here).
 192                 * So no dangers, even with speculative execution.
 193                 */
 194                page = pte_page(pkmap_page_table[i]);
 195                pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
 196
 197                set_page_address(page, NULL);
 198                need_flush = 1;
 199        }
 200        if (need_flush)
 201                flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 202}
 203
 204/**
 205 * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
 206 */
 207void kmap_flush_unused(void)
 208{
 209        lock_kmap();
 210        flush_all_zero_pkmaps();
 211        unlock_kmap();
 212}
 213
 214static inline unsigned long map_new_virtual(struct page *page)
 215{
 216        unsigned long vaddr;
 217        int count;
 218        unsigned int last_pkmap_nr;
 219        unsigned int color = get_pkmap_color(page);
 220
 221start:
 222        count = get_pkmap_entries_count(color);
 223        /* Find an empty entry */
 224        for (;;) {
 225                last_pkmap_nr = get_next_pkmap_nr(color);
 226                if (no_more_pkmaps(last_pkmap_nr, color)) {
 227                        flush_all_zero_pkmaps();
 228                        count = get_pkmap_entries_count(color);
 229                }
 230                if (!pkmap_count[last_pkmap_nr])
 231                        break;  /* Found a usable entry */
 232                if (--count)
 233                        continue;
 234
 235                /*
 236                 * Sleep for somebody else to unmap their entries
 237                 */
 238                {
 239                        DECLARE_WAITQUEUE(wait, current);
 240                        wait_queue_head_t *pkmap_map_wait =
 241                                get_pkmap_wait_queue_head(color);
 242
 243                        __set_current_state(TASK_UNINTERRUPTIBLE);
 244                        add_wait_queue(pkmap_map_wait, &wait);
 245                        unlock_kmap();
 246                        schedule();
 247                        remove_wait_queue(pkmap_map_wait, &wait);
 248                        lock_kmap();
 249
 250                        /* Somebody else might have mapped it while we slept */
 251                        if (page_address(page))
 252                                return (unsigned long)page_address(page);
 253
 254                        /* Re-start */
 255                        goto start;
 256                }
 257        }
 258        vaddr = PKMAP_ADDR(last_pkmap_nr);
 259        set_pte_at(&init_mm, vaddr,
 260                   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
 261
 262        pkmap_count[last_pkmap_nr] = 1;
 263        set_page_address(page, (void *)vaddr);
 264
 265        return vaddr;
 266}
 267
 268/**
 269 * kmap_high - map a highmem page into memory
 270 * @page: &struct page to map
 271 *
 272 * Returns the page's virtual memory address.
 273 *
 274 * We cannot call this from interrupts, as it may block.
 275 */
 276void *kmap_high(struct page *page)
 277{
 278        unsigned long vaddr;
 279
 280        /*
 281         * For highmem pages, we can't trust "virtual" until
 282         * after we have the lock.
 283         */
 284        lock_kmap();
 285        vaddr = (unsigned long)page_address(page);
 286        if (!vaddr)
 287                vaddr = map_new_virtual(page);
 288        pkmap_count[PKMAP_NR(vaddr)]++;
 289        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
 290        unlock_kmap();
 291        return (void*) vaddr;
 292}
 293
 294EXPORT_SYMBOL(kmap_high);
 295
 296#ifdef ARCH_NEEDS_KMAP_HIGH_GET
 297/**
 298 * kmap_high_get - pin a highmem page into memory
 299 * @page: &struct page to pin
 300 *
 301 * Returns the page's current virtual memory address, or NULL if no mapping
 302 * exists.  If and only if a non null address is returned then a
 303 * matching call to kunmap_high() is necessary.
 304 *
 305 * This can be called from any context.
 306 */
 307void *kmap_high_get(struct page *page)
 308{
 309        unsigned long vaddr, flags;
 310
 311        lock_kmap_any(flags);
 312        vaddr = (unsigned long)page_address(page);
 313        if (vaddr) {
 314                BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
 315                pkmap_count[PKMAP_NR(vaddr)]++;
 316        }
 317        unlock_kmap_any(flags);
 318        return (void*) vaddr;
 319}
 320#endif
 321
 322/**
 323 * kunmap_high - unmap a highmem page into memory
 324 * @page: &struct page to unmap
 325 *
 326 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
 327 * only from user context.
 328 */
 329void kunmap_high(struct page *page)
 330{
 331        unsigned long vaddr;
 332        unsigned long nr;
 333        unsigned long flags;
 334        int need_wakeup;
 335        unsigned int color = get_pkmap_color(page);
 336        wait_queue_head_t *pkmap_map_wait;
 337
 338        lock_kmap_any(flags);
 339        vaddr = (unsigned long)page_address(page);
 340        BUG_ON(!vaddr);
 341        nr = PKMAP_NR(vaddr);
 342
 343        /*
 344         * A count must never go down to zero
 345         * without a TLB flush!
 346         */
 347        need_wakeup = 0;
 348        switch (--pkmap_count[nr]) {
 349        case 0:
 350                BUG();
 351        case 1:
 352                /*
 353                 * Avoid an unnecessary wake_up() function call.
 354                 * The common case is pkmap_count[] == 1, but
 355                 * no waiters.
 356                 * The tasks queued in the wait-queue are guarded
 357                 * by both the lock in the wait-queue-head and by
 358                 * the kmap_lock.  As the kmap_lock is held here,
 359                 * no need for the wait-queue-head's lock.  Simply
 360                 * test if the queue is empty.
 361                 */
 362                pkmap_map_wait = get_pkmap_wait_queue_head(color);
 363                need_wakeup = waitqueue_active(pkmap_map_wait);
 364        }
 365        unlock_kmap_any(flags);
 366
 367        /* do wake-up, if needed, race-free outside of the spin lock */
 368        if (need_wakeup)
 369                wake_up(pkmap_map_wait);
 370}
 371
 372EXPORT_SYMBOL(kunmap_high);
 373#endif
 374
 375#if defined(HASHED_PAGE_VIRTUAL)
 376
 377#define PA_HASH_ORDER   7
 378
 379/*
 380 * Describes one page->virtual association
 381 */
 382struct page_address_map {
 383        struct page *page;
 384        void *virtual;
 385        struct list_head list;
 386};
 387
 388static struct page_address_map page_address_maps[LAST_PKMAP];
 389
 390/*
 391 * Hash table bucket
 392 */
 393static struct page_address_slot {
 394        struct list_head lh;                    /* List of page_address_maps */
 395        spinlock_t lock;                        /* Protect this bucket's list */
 396} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
 397
 398static struct page_address_slot *page_slot(const struct page *page)
 399{
 400        return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
 401}
 402
 403/**
 404 * page_address - get the mapped virtual address of a page
 405 * @page: &struct page to get the virtual address of
 406 *
 407 * Returns the page's virtual address.
 408 */
 409void *page_address(const struct page *page)
 410{
 411        unsigned long flags;
 412        void *ret;
 413        struct page_address_slot *pas;
 414
 415        if (!PageHighMem(page))
 416                return lowmem_page_address(page);
 417
 418        pas = page_slot(page);
 419        ret = NULL;
 420        spin_lock_irqsave(&pas->lock, flags);
 421        if (!list_empty(&pas->lh)) {
 422                struct page_address_map *pam;
 423
 424                list_for_each_entry(pam, &pas->lh, list) {
 425                        if (pam->page == page) {
 426                                ret = pam->virtual;
 427                                goto done;
 428                        }
 429                }
 430        }
 431done:
 432        spin_unlock_irqrestore(&pas->lock, flags);
 433        return ret;
 434}
 435
 436EXPORT_SYMBOL(page_address);
 437
 438/**
 439 * set_page_address - set a page's virtual address
 440 * @page: &struct page to set
 441 * @virtual: virtual address to use
 442 */
 443void set_page_address(struct page *page, void *virtual)
 444{
 445        unsigned long flags;
 446        struct page_address_slot *pas;
 447        struct page_address_map *pam;
 448
 449        BUG_ON(!PageHighMem(page));
 450
 451        pas = page_slot(page);
 452        if (virtual) {          /* Add */
 453                pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
 454                pam->page = page;
 455                pam->virtual = virtual;
 456
 457                spin_lock_irqsave(&pas->lock, flags);
 458                list_add_tail(&pam->list, &pas->lh);
 459                spin_unlock_irqrestore(&pas->lock, flags);
 460        } else {                /* Remove */
 461                spin_lock_irqsave(&pas->lock, flags);
 462                list_for_each_entry(pam, &pas->lh, list) {
 463                        if (pam->page == page) {
 464                                list_del(&pam->list);
 465                                spin_unlock_irqrestore(&pas->lock, flags);
 466                                goto done;
 467                        }
 468                }
 469                spin_unlock_irqrestore(&pas->lock, flags);
 470        }
 471done:
 472        return;
 473}
 474
 475void __init page_address_init(void)
 476{
 477        int i;
 478
 479        for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
 480                INIT_LIST_HEAD(&page_address_htable[i].lh);
 481                spin_lock_init(&page_address_htable[i].lock);
 482        }
 483}
 484
 485#endif  /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
 486