linux/include/linux/highmem-internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HIGHMEM_INTERNAL_H
   3#define _LINUX_HIGHMEM_INTERNAL_H
   4
   5/*
   6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
   7 */
   8#ifdef CONFIG_KMAP_LOCAL
   9void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
  10void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
  11void kunmap_local_indexed(void *vaddr);
  12void kmap_local_fork(struct task_struct *tsk);
  13void __kmap_local_sched_out(void);
  14void __kmap_local_sched_in(void);
  15static inline void kmap_assert_nomap(void)
  16{
  17        DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
  18}
  19#else
  20static inline void kmap_local_fork(struct task_struct *tsk) { }
  21static inline void kmap_assert_nomap(void) { }
  22#endif
  23
  24#ifdef CONFIG_HIGHMEM
  25#include <asm/highmem.h>
  26
  27#ifndef ARCH_HAS_KMAP_FLUSH_TLB
  28static inline void kmap_flush_tlb(unsigned long addr) { }
  29#endif
  30
  31#ifndef kmap_prot
  32#define kmap_prot PAGE_KERNEL
  33#endif
  34
  35void *kmap_high(struct page *page);
  36void kunmap_high(struct page *page);
  37void __kmap_flush_unused(void);
  38struct page *__kmap_to_page(void *addr);
  39
  40static inline void *kmap(struct page *page)
  41{
  42        void *addr;
  43
  44        might_sleep();
  45        if (!PageHighMem(page))
  46                addr = page_address(page);
  47        else
  48                addr = kmap_high(page);
  49        kmap_flush_tlb((unsigned long)addr);
  50        return addr;
  51}
  52
  53static inline void kunmap(struct page *page)
  54{
  55        might_sleep();
  56        if (!PageHighMem(page))
  57                return;
  58        kunmap_high(page);
  59}
  60
  61static inline struct page *kmap_to_page(void *addr)
  62{
  63        return __kmap_to_page(addr);
  64}
  65
  66static inline void kmap_flush_unused(void)
  67{
  68        __kmap_flush_unused();
  69}
  70
  71static inline void *kmap_local_page(struct page *page)
  72{
  73        return __kmap_local_page_prot(page, kmap_prot);
  74}
  75
  76static inline void *kmap_local_folio(struct folio *folio, size_t offset)
  77{
  78        struct page *page = folio_page(folio, offset / PAGE_SIZE);
  79        return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
  80}
  81
  82static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
  83{
  84        return __kmap_local_page_prot(page, prot);
  85}
  86
  87static inline void *kmap_local_pfn(unsigned long pfn)
  88{
  89        return __kmap_local_pfn_prot(pfn, kmap_prot);
  90}
  91
  92static inline void __kunmap_local(void *vaddr)
  93{
  94        kunmap_local_indexed(vaddr);
  95}
  96
  97static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  98{
  99        if (IS_ENABLED(CONFIG_PREEMPT_RT))
 100                migrate_disable();
 101        else
 102                preempt_disable();
 103
 104        pagefault_disable();
 105        return __kmap_local_page_prot(page, prot);
 106}
 107
 108static inline void *kmap_atomic(struct page *page)
 109{
 110        return kmap_atomic_prot(page, kmap_prot);
 111}
 112
 113static inline void *kmap_atomic_pfn(unsigned long pfn)
 114{
 115        if (IS_ENABLED(CONFIG_PREEMPT_RT))
 116                migrate_disable();
 117        else
 118                preempt_disable();
 119
 120        pagefault_disable();
 121        return __kmap_local_pfn_prot(pfn, kmap_prot);
 122}
 123
 124static inline void __kunmap_atomic(void *addr)
 125{
 126        kunmap_local_indexed(addr);
 127        pagefault_enable();
 128        if (IS_ENABLED(CONFIG_PREEMPT_RT))
 129                migrate_enable();
 130        else
 131                preempt_enable();
 132}
 133
 134unsigned int __nr_free_highpages(void);
 135extern atomic_long_t _totalhigh_pages;
 136
 137static inline unsigned int nr_free_highpages(void)
 138{
 139        return __nr_free_highpages();
 140}
 141
 142static inline unsigned long totalhigh_pages(void)
 143{
 144        return (unsigned long)atomic_long_read(&_totalhigh_pages);
 145}
 146
 147static inline void totalhigh_pages_add(long count)
 148{
 149        atomic_long_add(count, &_totalhigh_pages);
 150}
 151
 152#else /* CONFIG_HIGHMEM */
 153
 154static inline struct page *kmap_to_page(void *addr)
 155{
 156        return virt_to_page(addr);
 157}
 158
 159static inline void *kmap(struct page *page)
 160{
 161        might_sleep();
 162        return page_address(page);
 163}
 164
 165static inline void kunmap_high(struct page *page) { }
 166static inline void kmap_flush_unused(void) { }
 167
 168static inline void kunmap(struct page *page)
 169{
 170#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 171        kunmap_flush_on_unmap(page_address(page));
 172#endif
 173}
 174
 175static inline void *kmap_local_page(struct page *page)
 176{
 177        return page_address(page);
 178}
 179
 180static inline void *kmap_local_folio(struct folio *folio, size_t offset)
 181{
 182        return page_address(&folio->page) + offset;
 183}
 184
 185static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
 186{
 187        return kmap_local_page(page);
 188}
 189
 190static inline void *kmap_local_pfn(unsigned long pfn)
 191{
 192        return kmap_local_page(pfn_to_page(pfn));
 193}
 194
 195static inline void __kunmap_local(void *addr)
 196{
 197#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 198        kunmap_flush_on_unmap(addr);
 199#endif
 200}
 201
 202static inline void *kmap_atomic(struct page *page)
 203{
 204        if (IS_ENABLED(CONFIG_PREEMPT_RT))
 205                migrate_disable();
 206        else
 207                preempt_disable();
 208        pagefault_disable();
 209        return page_address(page);
 210}
 211
 212static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 213{
 214        return kmap_atomic(page);
 215}
 216
 217static inline void *kmap_atomic_pfn(unsigned long pfn)
 218{
 219        return kmap_atomic(pfn_to_page(pfn));
 220}
 221
 222static inline void __kunmap_atomic(void *addr)
 223{
 224#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
 225        kunmap_flush_on_unmap(addr);
 226#endif
 227        pagefault_enable();
 228        if (IS_ENABLED(CONFIG_PREEMPT_RT))
 229                migrate_enable();
 230        else
 231                preempt_enable();
 232}
 233
 234static inline unsigned int nr_free_highpages(void) { return 0; }
 235static inline unsigned long totalhigh_pages(void) { return 0UL; }
 236
 237#endif /* CONFIG_HIGHMEM */
 238
 239/*
 240 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 241 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 242 */
 243#define kunmap_atomic(__addr)                                   \
 244do {                                                            \
 245        BUILD_BUG_ON(__same_type((__addr), struct page *));     \
 246        __kunmap_atomic(__addr);                                \
 247} while (0)
 248
 249/**
 250 * kunmap_local - Unmap a page mapped via kmap_local_page().
 251 * @__addr: An address within the page mapped
 252 *
 253 * @__addr can be any address within the mapped page.  Commonly it is the
 254 * address return from kmap_local_page(), but it can also include offsets.
 255 *
 256 * Unmapping should be done in the reverse order of the mapping.  See
 257 * kmap_local_page() for details.
 258 */
 259#define kunmap_local(__addr)                                    \
 260do {                                                            \
 261        BUILD_BUG_ON(__same_type((__addr), struct page *));     \
 262        __kunmap_local(__addr);                                 \
 263} while (0)
 264
 265#endif
 266