linux/arch/m68k/mm/motorola.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/arch/m68k/mm/motorola.c
   4 *
   5 * Routines specific to the Motorola MMU, originally from:
   6 * linux/arch/m68k/init.c
   7 * which are Copyright (C) 1995 Hamish Macdonald
   8 *
   9 * Moved 8/20/1999 Sam Creasey
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/mm.h>
  16#include <linux/swap.h>
  17#include <linux/kernel.h>
  18#include <linux/string.h>
  19#include <linux/types.h>
  20#include <linux/init.h>
  21#include <linux/memblock.h>
  22#include <linux/gfp.h>
  23
  24#include <asm/setup.h>
  25#include <linux/uaccess.h>
  26#include <asm/page.h>
  27#include <asm/pgalloc.h>
  28#include <asm/machdep.h>
  29#include <asm/io.h>
  30#include <asm/dma.h>
  31#ifdef CONFIG_ATARI
  32#include <asm/atari_stram.h>
  33#endif
  34#include <asm/sections.h>
  35
  36#undef DEBUG
  37
  38#ifndef mm_cachebits
  39/*
  40 * Bits to add to page descriptors for "normal" caching mode.
  41 * For 68020/030 this is 0.
  42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
  43 */
  44unsigned long mm_cachebits;
  45EXPORT_SYMBOL(mm_cachebits);
  46#endif
  47
  48/* Prior to calling these routines, the page should have been flushed
  49 * from both the cache and ATC, or the CPU might not notice that the
  50 * cache setting for the page has been changed. -jskov
  51 */
  52static inline void nocache_page(void *vaddr)
  53{
  54        unsigned long addr = (unsigned long)vaddr;
  55
  56        if (CPU_IS_040_OR_060) {
  57                pte_t *ptep = virt_to_kpte(addr);
  58
  59                *ptep = pte_mknocache(*ptep);
  60        }
  61}
  62
  63static inline void cache_page(void *vaddr)
  64{
  65        unsigned long addr = (unsigned long)vaddr;
  66
  67        if (CPU_IS_040_OR_060) {
  68                pte_t *ptep = virt_to_kpte(addr);
  69
  70                *ptep = pte_mkcache(*ptep);
  71        }
  72}
  73
  74/*
  75 * Motorola 680x0 user's manual recommends using uncached memory for address
  76 * translation tables.
  77 *
  78 * Seeing how the MMU can be external on (some of) these chips, that seems like
  79 * a very important recommendation to follow. Provide some helpers to combat
  80 * 'variation' amongst the users of this.
  81 */
  82
  83void mmu_page_ctor(void *page)
  84{
  85        __flush_page_to_ram(page);
  86        flush_tlb_kernel_page(page);
  87        nocache_page(page);
  88}
  89
  90void mmu_page_dtor(void *page)
  91{
  92        cache_page(page);
  93}
  94
  95/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
  96   struct page instead of separately kmalloced struct.  Stolen from
  97   arch/sparc/mm/srmmu.c ... */
  98
  99typedef struct list_head ptable_desc;
 100
 101static struct list_head ptable_list[2] = {
 102        LIST_HEAD_INIT(ptable_list[0]),
 103        LIST_HEAD_INIT(ptable_list[1]),
 104};
 105
 106#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
 107#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
 108#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
 109
 110static const int ptable_shift[2] = {
 111        7+2, /* PGD, PMD */
 112        6+2, /* PTE */
 113};
 114
 115#define ptable_size(type) (1U << ptable_shift[type])
 116#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
 117
 118void __init init_pointer_table(void *table, int type)
 119{
 120        ptable_desc *dp;
 121        unsigned long ptable = (unsigned long)table;
 122        unsigned long page = ptable & PAGE_MASK;
 123        unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
 124
 125        dp = PD_PTABLE(page);
 126        if (!(PD_MARKBITS(dp) & mask)) {
 127                PD_MARKBITS(dp) = ptable_mask(type);
 128                list_add(dp, &ptable_list[type]);
 129        }
 130
 131        PD_MARKBITS(dp) &= ~mask;
 132        pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
 133
 134        /* unreserve the page so it's possible to free that page */
 135        __ClearPageReserved(PD_PAGE(dp));
 136        init_page_count(PD_PAGE(dp));
 137
 138        return;
 139}
 140
 141void *get_pointer_table(int type)
 142{
 143        ptable_desc *dp = ptable_list[type].next;
 144        unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
 145        unsigned int tmp, off;
 146
 147        /*
 148         * For a pointer table for a user process address space, a
 149         * table is taken from a page allocated for the purpose.  Each
 150         * page can hold 8 pointer tables.  The page is remapped in
 151         * virtual address space to be noncacheable.
 152         */
 153        if (mask == 0) {
 154                void *page;
 155                ptable_desc *new;
 156
 157                if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
 158                        return NULL;
 159
 160                if (type == TABLE_PTE) {
 161                        /*
 162                         * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
 163                         * SMP.
 164                         */
 165                        pgtable_pte_page_ctor(virt_to_page(page));
 166                }
 167
 168                mmu_page_ctor(page);
 169
 170                new = PD_PTABLE(page);
 171                PD_MARKBITS(new) = ptable_mask(type) - 1;
 172                list_add_tail(new, dp);
 173
 174                return (pmd_t *)page;
 175        }
 176
 177        for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
 178                ;
 179        PD_MARKBITS(dp) = mask & ~tmp;
 180        if (!PD_MARKBITS(dp)) {
 181                /* move to end of list */
 182                list_move_tail(dp, &ptable_list[type]);
 183        }
 184        return page_address(PD_PAGE(dp)) + off;
 185}
 186
 187int free_pointer_table(void *table, int type)
 188{
 189        ptable_desc *dp;
 190        unsigned long ptable = (unsigned long)table;
 191        unsigned long page = ptable & PAGE_MASK;
 192        unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
 193
 194        dp = PD_PTABLE(page);
 195        if (PD_MARKBITS (dp) & mask)
 196                panic ("table already free!");
 197
 198        PD_MARKBITS (dp) |= mask;
 199
 200        if (PD_MARKBITS(dp) == ptable_mask(type)) {
 201                /* all tables in page are free, free page */
 202                list_del(dp);
 203                mmu_page_dtor((void *)page);
 204                if (type == TABLE_PTE)
 205                        pgtable_pte_page_dtor(virt_to_page(page));
 206                free_page (page);
 207                return 1;
 208        } else if (ptable_list[type].next != dp) {
 209                /*
 210                 * move this descriptor to the front of the list, since
 211                 * it has one or more free tables.
 212                 */
 213                list_move(dp, &ptable_list[type]);
 214        }
 215        return 0;
 216}
 217
 218/* size of memory already mapped in head.S */
 219extern __initdata unsigned long m68k_init_mapped_size;
 220
 221extern unsigned long availmem;
 222
 223static pte_t *last_pte_table __initdata = NULL;
 224
 225static pte_t * __init kernel_page_table(void)
 226{
 227        pte_t *pte_table = last_pte_table;
 228
 229        if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
 230                pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 231                if (!pte_table) {
 232                        panic("%s: Failed to allocate %lu bytes align=%lx\n",
 233                                        __func__, PAGE_SIZE, PAGE_SIZE);
 234                }
 235
 236                clear_page(pte_table);
 237                mmu_page_ctor(pte_table);
 238
 239                last_pte_table = pte_table;
 240        }
 241
 242        last_pte_table += PTRS_PER_PTE;
 243
 244        return pte_table;
 245}
 246
 247static pmd_t *last_pmd_table __initdata = NULL;
 248
 249static pmd_t * __init kernel_ptr_table(void)
 250{
 251        if (!last_pmd_table) {
 252                unsigned long pmd, last;
 253                int i;
 254
 255                /* Find the last ptr table that was used in head.S and
 256                 * reuse the remaining space in that page for further
 257                 * ptr tables.
 258                 */
 259                last = (unsigned long)kernel_pg_dir;
 260                for (i = 0; i < PTRS_PER_PGD; i++) {
 261                        pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
 262
 263                        if (!pud_present(*pud))
 264                                continue;
 265                        pmd = pgd_page_vaddr(kernel_pg_dir[i]);
 266                        if (pmd > last)
 267                                last = pmd;
 268                }
 269
 270                last_pmd_table = (pmd_t *)last;
 271#ifdef DEBUG
 272                printk("kernel_ptr_init: %p\n", last_pmd_table);
 273#endif
 274        }
 275
 276        last_pmd_table += PTRS_PER_PMD;
 277        if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
 278                last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
 279                                                           PAGE_SIZE);
 280                if (!last_pmd_table)
 281                        panic("%s: Failed to allocate %lu bytes align=%lx\n",
 282                              __func__, PAGE_SIZE, PAGE_SIZE);
 283
 284                clear_page(last_pmd_table);
 285                mmu_page_ctor(last_pmd_table);
 286        }
 287
 288        return last_pmd_table;
 289}
 290
 291static void __init map_node(int node)
 292{
 293        unsigned long physaddr, virtaddr, size;
 294        pgd_t *pgd_dir;
 295        p4d_t *p4d_dir;
 296        pud_t *pud_dir;
 297        pmd_t *pmd_dir;
 298        pte_t *pte_dir;
 299
 300        size = m68k_memory[node].size;
 301        physaddr = m68k_memory[node].addr;
 302        virtaddr = (unsigned long)phys_to_virt(physaddr);
 303        physaddr |= m68k_supervisor_cachemode |
 304                    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
 305        if (CPU_IS_040_OR_060)
 306                physaddr |= _PAGE_GLOBAL040;
 307
 308        while (size > 0) {
 309#ifdef DEBUG
 310                if (!(virtaddr & (PMD_SIZE-1)))
 311                        printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
 312                                virtaddr);
 313#endif
 314                pgd_dir = pgd_offset_k(virtaddr);
 315                if (virtaddr && CPU_IS_020_OR_030) {
 316                        if (!(virtaddr & (PGDIR_SIZE-1)) &&
 317                            size >= PGDIR_SIZE) {
 318#ifdef DEBUG
 319                                printk ("[very early term]");
 320#endif
 321                                pgd_val(*pgd_dir) = physaddr;
 322                                size -= PGDIR_SIZE;
 323                                virtaddr += PGDIR_SIZE;
 324                                physaddr += PGDIR_SIZE;
 325                                continue;
 326                        }
 327                }
 328                p4d_dir = p4d_offset(pgd_dir, virtaddr);
 329                pud_dir = pud_offset(p4d_dir, virtaddr);
 330                if (!pud_present(*pud_dir)) {
 331                        pmd_dir = kernel_ptr_table();
 332#ifdef DEBUG
 333                        printk ("[new pointer %p]", pmd_dir);
 334#endif
 335                        pud_set(pud_dir, pmd_dir);
 336                } else
 337                        pmd_dir = pmd_offset(pud_dir, virtaddr);
 338
 339                if (CPU_IS_020_OR_030) {
 340                        if (virtaddr) {
 341#ifdef DEBUG
 342                                printk ("[early term]");
 343#endif
 344                                pmd_val(*pmd_dir) = physaddr;
 345                                physaddr += PMD_SIZE;
 346                        } else {
 347                                int i;
 348#ifdef DEBUG
 349                                printk ("[zero map]");
 350#endif
 351                                pte_dir = kernel_page_table();
 352                                pmd_set(pmd_dir, pte_dir);
 353
 354                                pte_val(*pte_dir++) = 0;
 355                                physaddr += PAGE_SIZE;
 356                                for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
 357                                        pte_val(*pte_dir++) = physaddr;
 358                        }
 359                        size -= PMD_SIZE;
 360                        virtaddr += PMD_SIZE;
 361                } else {
 362                        if (!pmd_present(*pmd_dir)) {
 363#ifdef DEBUG
 364                                printk ("[new table]");
 365#endif
 366                                pte_dir = kernel_page_table();
 367                                pmd_set(pmd_dir, pte_dir);
 368                        }
 369                        pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 370
 371                        if (virtaddr) {
 372                                if (!pte_present(*pte_dir))
 373                                        pte_val(*pte_dir) = physaddr;
 374                        } else
 375                                pte_val(*pte_dir) = 0;
 376                        size -= PAGE_SIZE;
 377                        virtaddr += PAGE_SIZE;
 378                        physaddr += PAGE_SIZE;
 379                }
 380
 381        }
 382#ifdef DEBUG
 383        printk("\n");
 384#endif
 385}
 386
 387/*
 388 * paging_init() continues the virtual memory environment setup which
 389 * was begun by the code in arch/head.S.
 390 */
 391void __init paging_init(void)
 392{
 393        unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 394        unsigned long min_addr, max_addr;
 395        unsigned long addr;
 396        int i;
 397
 398#ifdef DEBUG
 399        printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
 400#endif
 401
 402        /* Fix the cache mode in the page descriptors for the 680[46]0.  */
 403        if (CPU_IS_040_OR_060) {
 404                int i;
 405#ifndef mm_cachebits
 406                mm_cachebits = _PAGE_CACHE040;
 407#endif
 408                for (i = 0; i < 16; i++)
 409                        pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
 410        }
 411
 412        min_addr = m68k_memory[0].addr;
 413        max_addr = min_addr + m68k_memory[0].size;
 414        memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 415        for (i = 1; i < m68k_num_memory;) {
 416                if (m68k_memory[i].addr < min_addr) {
 417                        printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
 418                                m68k_memory[i].addr, m68k_memory[i].size);
 419                        printk("Fix your bootloader or use a memfile to make use of this area!\n");
 420                        m68k_num_memory--;
 421                        memmove(m68k_memory + i, m68k_memory + i + 1,
 422                                (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
 423                        continue;
 424                }
 425                memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
 426                addr = m68k_memory[i].addr + m68k_memory[i].size;
 427                if (addr > max_addr)
 428                        max_addr = addr;
 429                i++;
 430        }
 431        m68k_memoffset = min_addr - PAGE_OFFSET;
 432        m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
 433
 434        module_fixup(NULL, __start_fixup, __stop_fixup);
 435        flush_icache();
 436
 437        high_memory = phys_to_virt(max_addr);
 438
 439        min_low_pfn = availmem >> PAGE_SHIFT;
 440        max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 441
 442        /* Reserve kernel text/data/bss and the memory allocated in head.S */
 443        memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 444
 445        /*
 446         * Map the physical memory available into the kernel virtual
 447         * address space. Make sure memblock will not try to allocate
 448         * pages beyond the memory we already mapped in head.S
 449         */
 450        memblock_set_bottom_up(true);
 451
 452        for (i = 0; i < m68k_num_memory; i++) {
 453                m68k_setup_node(i);
 454                map_node(i);
 455        }
 456
 457        flush_tlb_all();
 458
 459        /*
 460         * initialize the bad page table and bad page to point
 461         * to a couple of allocated pages
 462         */
 463        empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 464        if (!empty_zero_page)
 465                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 466                      __func__, PAGE_SIZE, PAGE_SIZE);
 467
 468        /*
 469         * Set up SFC/DFC registers
 470         */
 471        set_fs(KERNEL_DS);
 472
 473#ifdef DEBUG
 474        printk ("before free_area_init\n");
 475#endif
 476        for (i = 0; i < m68k_num_memory; i++)
 477                if (node_present_pages(i))
 478                        node_set_state(i, N_NORMAL_MEMORY);
 479
 480        max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
 481        free_area_init(max_zone_pfn);
 482}
 483