linux/arch/m68k/mm/kmap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/m68k/mm/kmap.c
   3 *
   4 *  Copyright (C) 1997 Roman Hodek
   5 *
   6 *  10/01/99 cleaned up the code and changing to the same interface
   7 *           used by other architectures                /Roman Zippel
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/mm.h>
  12#include <linux/kernel.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17
  18#include <asm/setup.h>
  19#include <asm/segment.h>
  20#include <asm/page.h>
  21#include <asm/pgalloc.h>
  22#include <asm/io.h>
  23
  24#undef DEBUG
  25
  26#define PTRTREESIZE     (256*1024)
  27
  28/*
  29 * For 040/060 we can use the virtual memory area like other architectures,
  30 * but for 020/030 we want to use early termination page descriptors and we
  31 * can't mix this with normal page descriptors, so we have to copy that code
  32 * (mm/vmalloc.c) and return appropriately aligned addresses.
  33 */
  34
  35#ifdef CPU_M68040_OR_M68060_ONLY
  36
  37#define IO_SIZE         PAGE_SIZE
  38
  39static inline struct vm_struct *get_io_area(unsigned long size)
  40{
  41        return get_vm_area(size, VM_IOREMAP);
  42}
  43
  44
  45static inline void free_io_area(void *addr)
  46{
  47        vfree((void *)(PAGE_MASK & (unsigned long)addr));
  48}
  49
  50#else
  51
  52#define IO_SIZE         (256*1024)
  53
  54static struct vm_struct *iolist;
  55
  56static struct vm_struct *get_io_area(unsigned long size)
  57{
  58        unsigned long addr;
  59        struct vm_struct **p, *tmp, *area;
  60
  61        area = kmalloc(sizeof(*area), GFP_KERNEL);
  62        if (!area)
  63                return NULL;
  64        addr = KMAP_START;
  65        for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
  66                if (size + addr < (unsigned long)tmp->addr)
  67                        break;
  68                if (addr > KMAP_END-size) {
  69                        kfree(area);
  70                        return NULL;
  71                }
  72                addr = tmp->size + (unsigned long)tmp->addr;
  73        }
  74        area->addr = (void *)addr;
  75        area->size = size + IO_SIZE;
  76        area->next = *p;
  77        *p = area;
  78        return area;
  79}
  80
  81static inline void free_io_area(void *addr)
  82{
  83        struct vm_struct **p, *tmp;
  84
  85        if (!addr)
  86                return;
  87        addr = (void *)((unsigned long)addr & -IO_SIZE);
  88        for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
  89                if (tmp->addr == addr) {
  90                        *p = tmp->next;
  91                        __iounmap(tmp->addr, tmp->size);
  92                        kfree(tmp);
  93                        return;
  94                }
  95        }
  96}
  97
  98#endif
  99
 100/*
 101 * Map some physical address range into the kernel address space.
 102 */
 103/* Rewritten by Andreas Schwab to remove all races. */
 104
 105void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
 106{
 107        struct vm_struct *area;
 108        unsigned long virtaddr, retaddr;
 109        long offset;
 110        pgd_t *pgd_dir;
 111        pmd_t *pmd_dir;
 112        pte_t *pte_dir;
 113
 114        /*
 115         * Don't allow mappings that wrap..
 116         */
 117        if (!size || physaddr > (unsigned long)(-size))
 118                return NULL;
 119
 120#ifdef CONFIG_AMIGA
 121        if (MACH_IS_AMIGA) {
 122                if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
 123                    && (cacheflag == IOMAP_NOCACHE_SER))
 124                        return (void __iomem *)physaddr;
 125        }
 126#endif
 127
 128#ifdef DEBUG
 129        printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
 130#endif
 131        /*
 132         * Mappings have to be aligned
 133         */
 134        offset = physaddr & (IO_SIZE - 1);
 135        physaddr &= -IO_SIZE;
 136        size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
 137
 138        /*
 139         * Ok, go for it..
 140         */
 141        area = get_io_area(size);
 142        if (!area)
 143                return NULL;
 144
 145        virtaddr = (unsigned long)area->addr;
 146        retaddr = virtaddr + offset;
 147#ifdef DEBUG
 148        printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
 149#endif
 150
 151        /*
 152         * add cache and table flags to physical address
 153         */
 154        if (CPU_IS_040_OR_060) {
 155                physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
 156                             _PAGE_ACCESSED | _PAGE_DIRTY);
 157                switch (cacheflag) {
 158                case IOMAP_FULL_CACHING:
 159                        physaddr |= _PAGE_CACHE040;
 160                        break;
 161                case IOMAP_NOCACHE_SER:
 162                default:
 163                        physaddr |= _PAGE_NOCACHE_S;
 164                        break;
 165                case IOMAP_NOCACHE_NONSER:
 166                        physaddr |= _PAGE_NOCACHE;
 167                        break;
 168                case IOMAP_WRITETHROUGH:
 169                        physaddr |= _PAGE_CACHE040W;
 170                        break;
 171                }
 172        } else {
 173                physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
 174                             _PAGE_DIRTY | _PAGE_READWRITE);
 175                switch (cacheflag) {
 176                case IOMAP_NOCACHE_SER:
 177                case IOMAP_NOCACHE_NONSER:
 178                default:
 179                        physaddr |= _PAGE_NOCACHE030;
 180                        break;
 181                case IOMAP_FULL_CACHING:
 182                case IOMAP_WRITETHROUGH:
 183                        break;
 184                }
 185        }
 186
 187        while ((long)size > 0) {
 188#ifdef DEBUG
 189                if (!(virtaddr & (PTRTREESIZE-1)))
 190                        printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
 191#endif
 192                pgd_dir = pgd_offset_k(virtaddr);
 193                pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
 194                if (!pmd_dir) {
 195                        printk("ioremap: no mem for pmd_dir\n");
 196                        return NULL;
 197                }
 198
 199                if (CPU_IS_020_OR_030) {
 200                        pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
 201                        physaddr += PTRTREESIZE;
 202                        virtaddr += PTRTREESIZE;
 203                        size -= PTRTREESIZE;
 204                } else {
 205                        pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
 206                        if (!pte_dir) {
 207                                printk("ioremap: no mem for pte_dir\n");
 208                                return NULL;
 209                        }
 210
 211                        pte_val(*pte_dir) = physaddr;
 212                        virtaddr += PAGE_SIZE;
 213                        physaddr += PAGE_SIZE;
 214                        size -= PAGE_SIZE;
 215                }
 216        }
 217#ifdef DEBUG
 218        printk("\n");
 219#endif
 220        flush_tlb_all();
 221
 222        return (void __iomem *)retaddr;
 223}
 224EXPORT_SYMBOL(__ioremap);
 225
 226/*
 227 * Unmap an ioremap()ed region again
 228 */
 229void iounmap(void __iomem *addr)
 230{
 231#ifdef CONFIG_AMIGA
 232        if ((!MACH_IS_AMIGA) ||
 233            (((unsigned long)addr < 0x40000000) ||
 234             ((unsigned long)addr > 0x60000000)))
 235                        free_io_area((__force void *)addr);
 236#else
 237        free_io_area((__force void *)addr);
 238#endif
 239}
 240EXPORT_SYMBOL(iounmap);
 241
 242/*
 243 * __iounmap unmaps nearly everything, so be careful
 244 * Currently it doesn't free pointer/page tables anymore but this
 245 * wasn't used anyway and might be added later.
 246 */
 247void __iounmap(void *addr, unsigned long size)
 248{
 249        unsigned long virtaddr = (unsigned long)addr;
 250        pgd_t *pgd_dir;
 251        pmd_t *pmd_dir;
 252        pte_t *pte_dir;
 253
 254        while ((long)size > 0) {
 255                pgd_dir = pgd_offset_k(virtaddr);
 256                if (pgd_bad(*pgd_dir)) {
 257                        printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
 258                        pgd_clear(pgd_dir);
 259                        return;
 260                }
 261                pmd_dir = pmd_offset(pgd_dir, virtaddr);
 262
 263                if (CPU_IS_020_OR_030) {
 264                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
 265                        int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
 266
 267                        if (pmd_type == _PAGE_PRESENT) {
 268                                pmd_dir->pmd[pmd_off] = 0;
 269                                virtaddr += PTRTREESIZE;
 270                                size -= PTRTREESIZE;
 271                                continue;
 272                        } else if (pmd_type == 0)
 273                                continue;
 274                }
 275
 276                if (pmd_bad(*pmd_dir)) {
 277                        printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 278                        pmd_clear(pmd_dir);
 279                        return;
 280                }
 281                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 282
 283                pte_val(*pte_dir) = 0;
 284                virtaddr += PAGE_SIZE;
 285                size -= PAGE_SIZE;
 286        }
 287
 288        flush_tlb_all();
 289}
 290
 291/*
 292 * Set new cache mode for some kernel address space.
 293 * The caller must push data for that range itself, if such data may already
 294 * be in the cache.
 295 */
 296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
 297{
 298        unsigned long virtaddr = (unsigned long)addr;
 299        pgd_t *pgd_dir;
 300        pmd_t *pmd_dir;
 301        pte_t *pte_dir;
 302
 303        if (CPU_IS_040_OR_060) {
 304                switch (cmode) {
 305                case IOMAP_FULL_CACHING:
 306                        cmode = _PAGE_CACHE040;
 307                        break;
 308                case IOMAP_NOCACHE_SER:
 309                default:
 310                        cmode = _PAGE_NOCACHE_S;
 311                        break;
 312                case IOMAP_NOCACHE_NONSER:
 313                        cmode = _PAGE_NOCACHE;
 314                        break;
 315                case IOMAP_WRITETHROUGH:
 316                        cmode = _PAGE_CACHE040W;
 317                        break;
 318                }
 319        } else {
 320                switch (cmode) {
 321                case IOMAP_NOCACHE_SER:
 322                case IOMAP_NOCACHE_NONSER:
 323                default:
 324                        cmode = _PAGE_NOCACHE030;
 325                        break;
 326                case IOMAP_FULL_CACHING:
 327                case IOMAP_WRITETHROUGH:
 328                        cmode = 0;
 329                }
 330        }
 331
 332        while ((long)size > 0) {
 333                pgd_dir = pgd_offset_k(virtaddr);
 334                if (pgd_bad(*pgd_dir)) {
 335                        printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
 336                        pgd_clear(pgd_dir);
 337                        return;
 338                }
 339                pmd_dir = pmd_offset(pgd_dir, virtaddr);
 340
 341                if (CPU_IS_020_OR_030) {
 342                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
 343
 344                        if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
 345                                pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
 346                                                         _CACHEMASK040) | cmode;
 347                                virtaddr += PTRTREESIZE;
 348                                size -= PTRTREESIZE;
 349                                continue;
 350                        }
 351                }
 352
 353                if (pmd_bad(*pmd_dir)) {
 354                        printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 355                        pmd_clear(pmd_dir);
 356                        return;
 357                }
 358                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 359
 360                pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
 361                virtaddr += PAGE_SIZE;
 362                size -= PAGE_SIZE;
 363        }
 364
 365        flush_tlb_all();
 366}
 367EXPORT_SYMBOL(kernel_set_cachemode);
 368