linux/arch/arm/include/asm/memory.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  arch/arm/include/asm/memory.h
   4 *
   5 *  Copyright (C) 2000-2002 Russell King
   6 *  modification for nommu, Hyok S. Choi, 2004
   7 *
   8 *  Note: this file should not be included by non-asm/.h files
   9 */
  10#ifndef __ASM_ARM_MEMORY_H
  11#define __ASM_ARM_MEMORY_H
  12
  13#include <linux/compiler.h>
  14#include <linux/const.h>
  15#include <linux/types.h>
  16#include <linux/sizes.h>
  17
  18#ifdef CONFIG_NEED_MACH_MEMORY_H
  19#include <mach/memory.h>
  20#endif
  21#include <asm/kasan_def.h>
  22
  23/*
  24 * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
  25 *   the virtual address range for userspace.
  26 * KERNEL_OFFSET: the virtual address of the start of the kernel image.
  27 *   we may further offset this with TEXT_OFFSET in practice.
  28 */
  29#define PAGE_OFFSET             UL(CONFIG_PAGE_OFFSET)
  30#define KERNEL_OFFSET           (PAGE_OFFSET)
  31
  32#ifdef CONFIG_MMU
  33
  34/*
  35 * TASK_SIZE - the maximum size of a user space task.
  36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  37 */
  38#ifndef CONFIG_KASAN
  39#define TASK_SIZE               (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
  40#else
  41#define TASK_SIZE               (KASAN_SHADOW_START)
  42#endif
  43#define TASK_UNMAPPED_BASE      ALIGN(TASK_SIZE / 3, SZ_16M)
  44
  45/*
  46 * The maximum size of a 26-bit user space task.
  47 */
  48#define TASK_SIZE_26            (UL(1) << 26)
  49
  50/*
  51 * The module space lives between the addresses given by TASK_SIZE
  52 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
  53 */
  54#ifndef CONFIG_THUMB2_KERNEL
  55#define MODULES_VADDR           (PAGE_OFFSET - SZ_16M)
  56#else
  57/* smaller range for Thumb-2 symbols relocation (2^24)*/
  58#define MODULES_VADDR           (PAGE_OFFSET - SZ_8M)
  59#endif
  60
  61#if TASK_SIZE > MODULES_VADDR
  62#error Top of user space clashes with start of module space
  63#endif
  64
  65/*
  66 * The highmem pkmap virtual space shares the end of the module area.
  67 */
  68#ifdef CONFIG_HIGHMEM
  69#define MODULES_END             (PAGE_OFFSET - PMD_SIZE)
  70#else
  71#define MODULES_END             (PAGE_OFFSET)
  72#endif
  73
  74/*
  75 * The XIP kernel gets mapped at the bottom of the module vm area.
  76 * Since we use sections to map it, this macro replaces the physical address
  77 * with its virtual address while keeping offset from the base section.
  78 */
  79#define XIP_VIRT_ADDR(physaddr)  (MODULES_VADDR + ((physaddr) & 0x000fffff))
  80
  81#define FDT_FIXED_BASE          UL(0xff800000)
  82#define FDT_FIXED_SIZE          (2 * SECTION_SIZE)
  83#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
  84
  85#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
  86/*
  87 * Allow 16MB-aligned ioremap pages
  88 */
  89#define IOREMAP_MAX_ORDER       24
  90#endif
  91
  92#define VECTORS_BASE            UL(0xffff0000)
  93
  94#else /* CONFIG_MMU */
  95
  96#ifndef __ASSEMBLY__
  97extern unsigned long setup_vectors_base(void);
  98extern unsigned long vectors_base;
  99#define VECTORS_BASE            vectors_base
 100#endif
 101
 102/*
 103 * The limitation of user task size can grow up to the end of free ram region.
 104 * It is difficult to define and perhaps will never meet the original meaning
 105 * of this define that was meant to.
 106 * Fortunately, there is no reference for this in noMMU mode, for now.
 107 */
 108#define TASK_SIZE               UL(0xffffffff)
 109
 110#ifndef TASK_UNMAPPED_BASE
 111#define TASK_UNMAPPED_BASE      UL(0x00000000)
 112#endif
 113
 114#ifndef END_MEM
 115#define END_MEM                 (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
 116#endif
 117
 118/*
 119 * The module can be at any place in ram in nommu mode.
 120 */
 121#define MODULES_END             (END_MEM)
 122#define MODULES_VADDR           PAGE_OFFSET
 123
 124#define XIP_VIRT_ADDR(physaddr)  (physaddr)
 125#define FDT_VIRT_BASE(physbase)  ((void *)(physbase))
 126
 127#endif /* !CONFIG_MMU */
 128
 129#ifdef CONFIG_XIP_KERNEL
 130#define KERNEL_START            _sdata
 131#else
 132#define KERNEL_START            _stext
 133#endif
 134#define KERNEL_END              _end
 135
 136/*
 137 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
 138 * locations
 139 */
 140#ifdef CONFIG_HAVE_TCM
 141#define ITCM_OFFSET     UL(0xfffe0000)
 142#define DTCM_OFFSET     UL(0xfffe8000)
 143#endif
 144
 145/*
 146 * Convert a page to/from a physical address
 147 */
 148#define page_to_phys(page)      (__pfn_to_phys(page_to_pfn(page)))
 149#define phys_to_page(phys)      (pfn_to_page(__phys_to_pfn(phys)))
 150
 151/*
 152 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
 153 * memory.  This is used for XIP and NoMMU kernels, and on platforms that don't
 154 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
 155 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
 156 */
 157#define PLAT_PHYS_OFFSET        UL(CONFIG_PHYS_OFFSET)
 158
 159#ifndef __ASSEMBLY__
 160
 161/*
 162 * Physical start and end address of the kernel sections. These addresses are
 163 * 2MB-aligned to match the section mappings placed over the kernel. We use
 164 * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
 165 */
 166extern u64 kernel_sec_start;
 167extern u64 kernel_sec_end;
 168
 169/*
 170 * Physical vs virtual RAM address space conversion.  These are
 171 * private definitions which should NOT be used outside memory.h
 172 * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
 173 *
 174 * PFNs are used to describe any physical page; this means
 175 * PFN 0 == physical address 0.
 176 */
 177
 178#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
 179
 180/*
 181 * Constants used to force the right instruction encodings and shifts
 182 * so that all we need to do is modify the 8-bit constant field.
 183 */
 184#define __PV_BITS_31_24 0x81000000
 185#define __PV_BITS_23_16 0x810000
 186#define __PV_BITS_7_0   0x81
 187
 188extern unsigned long __pv_phys_pfn_offset;
 189extern u64 __pv_offset;
 190extern void fixup_pv_table(const void *, unsigned long);
 191extern const void *__pv_table_begin, *__pv_table_end;
 192
 193#define PHYS_OFFSET     ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
 194#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
 195
 196#ifndef CONFIG_THUMB2_KERNEL
 197#define __pv_stub(from,to,instr)                        \
 198        __asm__("@ __pv_stub\n"                         \
 199        "1:     " instr "       %0, %1, %2\n"           \
 200        "2:     " instr "       %0, %0, %3\n"           \
 201        "       .pushsection .pv_table,\"a\"\n"         \
 202        "       .long   1b - ., 2b - .\n"               \
 203        "       .popsection\n"                          \
 204        : "=r" (to)                                     \
 205        : "r" (from), "I" (__PV_BITS_31_24),            \
 206          "I"(__PV_BITS_23_16))
 207
 208#define __pv_add_carry_stub(x, y)                       \
 209        __asm__("@ __pv_add_carry_stub\n"               \
 210        "0:     movw    %R0, #0\n"                      \
 211        "       adds    %Q0, %1, %R0, lsl #20\n"        \
 212        "1:     mov     %R0, %2\n"                      \
 213        "       adc     %R0, %R0, #0\n"                 \
 214        "       .pushsection .pv_table,\"a\"\n"         \
 215        "       .long   0b - ., 1b - .\n"               \
 216        "       .popsection\n"                          \
 217        : "=&r" (y)                                     \
 218        : "r" (x), "I" (__PV_BITS_7_0)                  \
 219        : "cc")
 220
 221#else
 222#define __pv_stub(from,to,instr)                        \
 223        __asm__("@ __pv_stub\n"                         \
 224        "0:     movw    %0, #0\n"                       \
 225        "       lsl     %0, #21\n"                      \
 226        "       " instr " %0, %1, %0\n"                 \
 227        "       .pushsection .pv_table,\"a\"\n"         \
 228        "       .long   0b - .\n"                       \
 229        "       .popsection\n"                          \
 230        : "=&r" (to)                                    \
 231        : "r" (from))
 232
 233#define __pv_add_carry_stub(x, y)                       \
 234        __asm__("@ __pv_add_carry_stub\n"               \
 235        "0:     movw    %R0, #0\n"                      \
 236        "       lsls    %R0, #21\n"                     \
 237        "       adds    %Q0, %1, %R0\n"                 \
 238        "1:     mvn     %R0, #0\n"                      \
 239        "       adc     %R0, %R0, #0\n"                 \
 240        "       .pushsection .pv_table,\"a\"\n"         \
 241        "       .long   0b - ., 1b - .\n"               \
 242        "       .popsection\n"                          \
 243        : "=&r" (y)                                     \
 244        : "r" (x)                                       \
 245        : "cc")
 246#endif
 247
 248static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
 249{
 250        phys_addr_t t;
 251
 252        if (sizeof(phys_addr_t) == 4) {
 253                __pv_stub(x, t, "add");
 254        } else {
 255                __pv_add_carry_stub(x, t);
 256        }
 257        return t;
 258}
 259
 260static inline unsigned long __phys_to_virt(phys_addr_t x)
 261{
 262        unsigned long t;
 263
 264        /*
 265         * 'unsigned long' cast discard upper word when
 266         * phys_addr_t is 64 bit, and makes sure that inline
 267         * assembler expression receives 32 bit argument
 268         * in place where 'r' 32 bit operand is expected.
 269         */
 270        __pv_stub((unsigned long) x, t, "sub");
 271        return t;
 272}
 273
 274#else
 275
 276#define PHYS_OFFSET     PLAT_PHYS_OFFSET
 277#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
 278
 279static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
 280{
 281        return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
 282}
 283
 284static inline unsigned long __phys_to_virt(phys_addr_t x)
 285{
 286        return x - PHYS_OFFSET + PAGE_OFFSET;
 287}
 288
 289#endif
 290
 291#define virt_to_pfn(kaddr) \
 292        ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
 293         PHYS_PFN_OFFSET)
 294
 295#define __pa_symbol_nodebug(x)  __virt_to_phys_nodebug((x))
 296
 297#ifdef CONFIG_DEBUG_VIRTUAL
 298extern phys_addr_t __virt_to_phys(unsigned long x);
 299extern phys_addr_t __phys_addr_symbol(unsigned long x);
 300#else
 301#define __virt_to_phys(x)       __virt_to_phys_nodebug(x)
 302#define __phys_addr_symbol(x)   __pa_symbol_nodebug(x)
 303#endif
 304
 305/*
 306 * These are *only* valid on the kernel direct mapped RAM memory.
 307 * Note: Drivers should NOT use these.  They are the wrong
 308 * translation for translating DMA addresses.  Use the driver
 309 * DMA support - see dma-mapping.h.
 310 */
 311#define virt_to_phys virt_to_phys
 312static inline phys_addr_t virt_to_phys(const volatile void *x)
 313{
 314        return __virt_to_phys((unsigned long)(x));
 315}
 316
 317#define phys_to_virt phys_to_virt
 318static inline void *phys_to_virt(phys_addr_t x)
 319{
 320        return (void *)__phys_to_virt(x);
 321}
 322
 323/*
 324 * Drivers should NOT use these either.
 325 */
 326#define __pa(x)                 __virt_to_phys((unsigned long)(x))
 327#define __pa_symbol(x)          __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
 328#define __va(x)                 ((void *)__phys_to_virt((phys_addr_t)(x)))
 329#define pfn_to_kaddr(pfn)       __va((phys_addr_t)(pfn) << PAGE_SHIFT)
 330
 331extern long long arch_phys_to_idmap_offset;
 332
 333/*
 334 * These are for systems that have a hardware interconnect supported alias
 335 * of physical memory for idmap purposes.  Most cases should leave these
 336 * untouched.  Note: this can only return addresses less than 4GiB.
 337 */
 338static inline bool arm_has_idmap_alias(void)
 339{
 340        return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
 341}
 342
 343#define IDMAP_INVALID_ADDR ((u32)~0)
 344
 345static inline unsigned long phys_to_idmap(phys_addr_t addr)
 346{
 347        if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
 348                addr += arch_phys_to_idmap_offset;
 349                if (addr > (u32)~0)
 350                        addr = IDMAP_INVALID_ADDR;
 351        }
 352        return addr;
 353}
 354
 355static inline phys_addr_t idmap_to_phys(unsigned long idmap)
 356{
 357        phys_addr_t addr = idmap;
 358
 359        if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
 360                addr -= arch_phys_to_idmap_offset;
 361
 362        return addr;
 363}
 364
 365static inline unsigned long __virt_to_idmap(unsigned long x)
 366{
 367        return phys_to_idmap(__virt_to_phys(x));
 368}
 369
 370#define virt_to_idmap(x)        __virt_to_idmap((unsigned long)(x))
 371
 372/*
 373 * Virtual <-> DMA view memory address translations
 374 * Again, these are *only* valid on the kernel direct mapped RAM
 375 * memory.  Use of these is *deprecated* (and that doesn't mean
 376 * use the __ prefixed forms instead.)  See dma-mapping.h.
 377 */
 378#ifndef __virt_to_bus
 379#define __virt_to_bus   __virt_to_phys
 380#define __bus_to_virt   __phys_to_virt
 381#define __pfn_to_bus(x) __pfn_to_phys(x)
 382#define __bus_to_pfn(x) __phys_to_pfn(x)
 383#endif
 384
 385/*
 386 * Conversion between a struct page and a physical address.
 387 *
 388 *  page_to_pfn(page)   convert a struct page * to a PFN number
 389 *  pfn_to_page(pfn)    convert a _valid_ PFN number to struct page *
 390 *
 391 *  virt_to_page(k)     convert a _valid_ virtual address to struct page *
 392 *  virt_addr_valid(k)  indicates whether a virtual address is valid
 393 */
 394#define ARCH_PFN_OFFSET         PHYS_PFN_OFFSET
 395
 396#define virt_to_page(kaddr)     pfn_to_page(virt_to_pfn(kaddr))
 397#define virt_addr_valid(kaddr)  (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
 398                                        && pfn_valid(virt_to_pfn(kaddr)))
 399
 400#endif
 401
 402#include <asm-generic/memory_model.h>
 403
 404#endif
 405