linux/arch/m68k/mm/kmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/arch/m68k/mm/kmap.c
   4 *
   5 *  Copyright (C) 1997 Roman Hodek
   6 *
   7 *  10/01/99 cleaned up the code and changing to the same interface
   8 *           used by other architectures                /Roman Zippel
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/mm.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/types.h>
  16#include <linux/slab.h>
  17#include <linux/vmalloc.h>
  18
  19#include <asm/setup.h>
  20#include <asm/page.h>
  21#include <asm/io.h>
  22#include <asm/tlbflush.h>
  23
  24#undef DEBUG
  25
  26/*
  27 * For 040/060 we can use the virtual memory area like other architectures,
  28 * but for 020/030 we want to use early termination page descriptors and we
  29 * can't mix this with normal page descriptors, so we have to copy that code
  30 * (mm/vmalloc.c) and return appropriately aligned addresses.
  31 */
  32
  33#ifdef CPU_M68040_OR_M68060_ONLY
  34
  35#define IO_SIZE         PAGE_SIZE
  36
  37static inline struct vm_struct *get_io_area(unsigned long size)
  38{
  39        return get_vm_area(size, VM_IOREMAP);
  40}
  41
  42
  43static inline void free_io_area(void *addr)
  44{
  45        vfree((void *)(PAGE_MASK & (unsigned long)addr));
  46}
  47
  48#else
  49
  50#define IO_SIZE         PMD_SIZE
  51
  52static struct vm_struct *iolist;
  53
  54/*
  55 * __free_io_area unmaps nearly everything, so be careful
  56 * Currently it doesn't free pointer/page tables anymore but this
  57 * wasn't used anyway and might be added later.
  58 */
  59static void __free_io_area(void *addr, unsigned long size)
  60{
  61        unsigned long virtaddr = (unsigned long)addr;
  62        pgd_t *pgd_dir;
  63        p4d_t *p4d_dir;
  64        pud_t *pud_dir;
  65        pmd_t *pmd_dir;
  66        pte_t *pte_dir;
  67
  68        while ((long)size > 0) {
  69                pgd_dir = pgd_offset_k(virtaddr);
  70                p4d_dir = p4d_offset(pgd_dir, virtaddr);
  71                pud_dir = pud_offset(p4d_dir, virtaddr);
  72                if (pud_bad(*pud_dir)) {
  73                        printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
  74                        pud_clear(pud_dir);
  75                        return;
  76                }
  77                pmd_dir = pmd_offset(pud_dir, virtaddr);
  78
  79#if CONFIG_PGTABLE_LEVELS == 3
  80                if (CPU_IS_020_OR_030) {
  81                        int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
  82
  83                        if (pmd_type == _PAGE_PRESENT) {
  84                                pmd_clear(pmd_dir);
  85                                virtaddr += PMD_SIZE;
  86                                size -= PMD_SIZE;
  87
  88                        } else if (pmd_type == 0)
  89                                continue;
  90                }
  91#endif
  92
  93                if (pmd_bad(*pmd_dir)) {
  94                        printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
  95                        pmd_clear(pmd_dir);
  96                        return;
  97                }
  98                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
  99
 100                pte_val(*pte_dir) = 0;
 101                virtaddr += PAGE_SIZE;
 102                size -= PAGE_SIZE;
 103        }
 104
 105        flush_tlb_all();
 106}
 107
 108static struct vm_struct *get_io_area(unsigned long size)
 109{
 110        unsigned long addr;
 111        struct vm_struct **p, *tmp, *area;
 112
 113        area = kmalloc(sizeof(*area), GFP_KERNEL);
 114        if (!area)
 115                return NULL;
 116        addr = KMAP_START;
 117        for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
 118                if (size + addr < (unsigned long)tmp->addr)
 119                        break;
 120                if (addr > KMAP_END-size) {
 121                        kfree(area);
 122                        return NULL;
 123                }
 124                addr = tmp->size + (unsigned long)tmp->addr;
 125        }
 126        area->addr = (void *)addr;
 127        area->size = size + IO_SIZE;
 128        area->next = *p;
 129        *p = area;
 130        return area;
 131}
 132
 133static inline void free_io_area(void *addr)
 134{
 135        struct vm_struct **p, *tmp;
 136
 137        if (!addr)
 138                return;
 139        addr = (void *)((unsigned long)addr & -IO_SIZE);
 140        for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
 141                if (tmp->addr == addr) {
 142                        *p = tmp->next;
 143                        /* remove gap added in get_io_area() */
 144                        __free_io_area(tmp->addr, tmp->size - IO_SIZE);
 145                        kfree(tmp);
 146                        return;
 147                }
 148        }
 149}
 150
 151#endif
 152
 153/*
 154 * Map some physical address range into the kernel address space.
 155 */
 156/* Rewritten by Andreas Schwab to remove all races. */
 157
 158void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
 159{
 160        struct vm_struct *area;
 161        unsigned long virtaddr, retaddr;
 162        long offset;
 163        pgd_t *pgd_dir;
 164        p4d_t *p4d_dir;
 165        pud_t *pud_dir;
 166        pmd_t *pmd_dir;
 167        pte_t *pte_dir;
 168
 169        /*
 170         * Don't allow mappings that wrap..
 171         */
 172        if (!size || physaddr > (unsigned long)(-size))
 173                return NULL;
 174
 175#ifdef CONFIG_AMIGA
 176        if (MACH_IS_AMIGA) {
 177                if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
 178                    && (cacheflag == IOMAP_NOCACHE_SER))
 179                        return (void __iomem *)physaddr;
 180        }
 181#endif
 182#ifdef CONFIG_COLDFIRE
 183        if (__cf_internalio(physaddr))
 184                return (void __iomem *) physaddr;
 185#endif
 186
 187#ifdef DEBUG
 188        printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
 189#endif
 190        /*
 191         * Mappings have to be aligned
 192         */
 193        offset = physaddr & (IO_SIZE - 1);
 194        physaddr &= -IO_SIZE;
 195        size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
 196
 197        /*
 198         * Ok, go for it..
 199         */
 200        area = get_io_area(size);
 201        if (!area)
 202                return NULL;
 203
 204        virtaddr = (unsigned long)area->addr;
 205        retaddr = virtaddr + offset;
 206#ifdef DEBUG
 207        printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
 208#endif
 209
 210        /*
 211         * add cache and table flags to physical address
 212         */
 213        if (CPU_IS_040_OR_060) {
 214                physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
 215                             _PAGE_ACCESSED | _PAGE_DIRTY);
 216                switch (cacheflag) {
 217                case IOMAP_FULL_CACHING:
 218                        physaddr |= _PAGE_CACHE040;
 219                        break;
 220                case IOMAP_NOCACHE_SER:
 221                default:
 222                        physaddr |= _PAGE_NOCACHE_S;
 223                        break;
 224                case IOMAP_NOCACHE_NONSER:
 225                        physaddr |= _PAGE_NOCACHE;
 226                        break;
 227                case IOMAP_WRITETHROUGH:
 228                        physaddr |= _PAGE_CACHE040W;
 229                        break;
 230                }
 231        } else {
 232                physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
 233                             _PAGE_DIRTY | _PAGE_READWRITE);
 234                switch (cacheflag) {
 235                case IOMAP_NOCACHE_SER:
 236                case IOMAP_NOCACHE_NONSER:
 237                default:
 238                        physaddr |= _PAGE_NOCACHE030;
 239                        break;
 240                case IOMAP_FULL_CACHING:
 241                case IOMAP_WRITETHROUGH:
 242                        break;
 243                }
 244        }
 245
 246        while ((long)size > 0) {
 247#ifdef DEBUG
 248                if (!(virtaddr & (PMD_SIZE-1)))
 249                        printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
 250#endif
 251                pgd_dir = pgd_offset_k(virtaddr);
 252                p4d_dir = p4d_offset(pgd_dir, virtaddr);
 253                pud_dir = pud_offset(p4d_dir, virtaddr);
 254                pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
 255                if (!pmd_dir) {
 256                        printk("ioremap: no mem for pmd_dir\n");
 257                        return NULL;
 258                }
 259
 260#if CONFIG_PGTABLE_LEVELS == 3
 261                if (CPU_IS_020_OR_030) {
 262                        pmd_val(*pmd_dir) = physaddr;
 263                        physaddr += PMD_SIZE;
 264                        virtaddr += PMD_SIZE;
 265                        size -= PMD_SIZE;
 266                } else
 267#endif
 268                {
 269                        pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
 270                        if (!pte_dir) {
 271                                printk("ioremap: no mem for pte_dir\n");
 272                                return NULL;
 273                        }
 274
 275                        pte_val(*pte_dir) = physaddr;
 276                        virtaddr += PAGE_SIZE;
 277                        physaddr += PAGE_SIZE;
 278                        size -= PAGE_SIZE;
 279                }
 280        }
 281#ifdef DEBUG
 282        printk("\n");
 283#endif
 284        flush_tlb_all();
 285
 286        return (void __iomem *)retaddr;
 287}
 288EXPORT_SYMBOL(__ioremap);
 289
 290/*
 291 * Unmap an ioremap()ed region again
 292 */
 293void iounmap(void __iomem *addr)
 294{
 295#ifdef CONFIG_AMIGA
 296        if ((!MACH_IS_AMIGA) ||
 297            (((unsigned long)addr < 0x40000000) ||
 298             ((unsigned long)addr > 0x60000000)))
 299                        free_io_area((__force void *)addr);
 300#else
 301#ifdef CONFIG_COLDFIRE
 302        if (cf_internalio(addr))
 303                return;
 304#endif
 305        free_io_area((__force void *)addr);
 306#endif
 307}
 308EXPORT_SYMBOL(iounmap);
 309
 310/*
 311 * Set new cache mode for some kernel address space.
 312 * The caller must push data for that range itself, if such data may already
 313 * be in the cache.
 314 */
 315void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
 316{
 317        unsigned long virtaddr = (unsigned long)addr;
 318        pgd_t *pgd_dir;
 319        p4d_t *p4d_dir;
 320        pud_t *pud_dir;
 321        pmd_t *pmd_dir;
 322        pte_t *pte_dir;
 323
 324        if (CPU_IS_040_OR_060) {
 325                switch (cmode) {
 326                case IOMAP_FULL_CACHING:
 327                        cmode = _PAGE_CACHE040;
 328                        break;
 329                case IOMAP_NOCACHE_SER:
 330                default:
 331                        cmode = _PAGE_NOCACHE_S;
 332                        break;
 333                case IOMAP_NOCACHE_NONSER:
 334                        cmode = _PAGE_NOCACHE;
 335                        break;
 336                case IOMAP_WRITETHROUGH:
 337                        cmode = _PAGE_CACHE040W;
 338                        break;
 339                }
 340        } else {
 341                switch (cmode) {
 342                case IOMAP_NOCACHE_SER:
 343                case IOMAP_NOCACHE_NONSER:
 344                default:
 345                        cmode = _PAGE_NOCACHE030;
 346                        break;
 347                case IOMAP_FULL_CACHING:
 348                case IOMAP_WRITETHROUGH:
 349                        cmode = 0;
 350                }
 351        }
 352
 353        while ((long)size > 0) {
 354                pgd_dir = pgd_offset_k(virtaddr);
 355                p4d_dir = p4d_offset(pgd_dir, virtaddr);
 356                pud_dir = pud_offset(p4d_dir, virtaddr);
 357                if (pud_bad(*pud_dir)) {
 358                        printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
 359                        pud_clear(pud_dir);
 360                        return;
 361                }
 362                pmd_dir = pmd_offset(pud_dir, virtaddr);
 363
 364#if CONFIG_PGTABLE_LEVELS == 3
 365                if (CPU_IS_020_OR_030) {
 366                        unsigned long pmd = pmd_val(*pmd_dir);
 367
 368                        if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
 369                                *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
 370                                virtaddr += PMD_SIZE;
 371                                size -= PMD_SIZE;
 372                                continue;
 373                        }
 374                }
 375#endif
 376
 377                if (pmd_bad(*pmd_dir)) {
 378                        printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 379                        pmd_clear(pmd_dir);
 380                        return;
 381                }
 382                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 383
 384                pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
 385                virtaddr += PAGE_SIZE;
 386                size -= PAGE_SIZE;
 387        }
 388
 389        flush_tlb_all();
 390}
 391EXPORT_SYMBOL(kernel_set_cachemode);
 392