linux/arch/m68k/mm/motorola.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/arch/m68k/mm/motorola.c
   4 *
   5 * Routines specific to the Motorola MMU, originally from:
   6 * linux/arch/m68k/init.c
   7 * which are Copyright (C) 1995 Hamish Macdonald
   8 *
   9 * Moved 8/20/1999 Sam Creasey
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/mm.h>
  16#include <linux/swap.h>
  17#include <linux/kernel.h>
  18#include <linux/string.h>
  19#include <linux/types.h>
  20#include <linux/init.h>
  21#include <linux/memblock.h>
  22#include <linux/gfp.h>
  23
  24#include <asm/setup.h>
  25#include <linux/uaccess.h>
  26#include <asm/page.h>
  27#include <asm/pgalloc.h>
  28#include <asm/machdep.h>
  29#include <asm/io.h>
  30#include <asm/dma.h>
  31#ifdef CONFIG_ATARI
  32#include <asm/atari_stram.h>
  33#endif
  34#include <asm/sections.h>
  35
  36#undef DEBUG
  37
  38#ifndef mm_cachebits
  39/*
  40 * Bits to add to page descriptors for "normal" caching mode.
  41 * For 68020/030 this is 0.
  42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
  43 */
  44unsigned long mm_cachebits;
  45EXPORT_SYMBOL(mm_cachebits);
  46#endif
  47
  48/* Prior to calling these routines, the page should have been flushed
  49 * from both the cache and ATC, or the CPU might not notice that the
  50 * cache setting for the page has been changed. -jskov
  51 */
  52static inline void nocache_page(void *vaddr)
  53{
  54        unsigned long addr = (unsigned long)vaddr;
  55
  56        if (CPU_IS_040_OR_060) {
  57                pte_t *ptep = virt_to_kpte(addr);
  58
  59                *ptep = pte_mknocache(*ptep);
  60        }
  61}
  62
  63static inline void cache_page(void *vaddr)
  64{
  65        unsigned long addr = (unsigned long)vaddr;
  66
  67        if (CPU_IS_040_OR_060) {
  68                pte_t *ptep = virt_to_kpte(addr);
  69
  70                *ptep = pte_mkcache(*ptep);
  71        }
  72}
  73
  74/*
  75 * Motorola 680x0 user's manual recommends using uncached memory for address
  76 * translation tables.
  77 *
  78 * Seeing how the MMU can be external on (some of) these chips, that seems like
  79 * a very important recommendation to follow. Provide some helpers to combat
  80 * 'variation' amongst the users of this.
  81 */
  82
  83void mmu_page_ctor(void *page)
  84{
  85        __flush_page_to_ram(page);
  86        flush_tlb_kernel_page(page);
  87        nocache_page(page);
  88}
  89
  90void mmu_page_dtor(void *page)
  91{
  92        cache_page(page);
  93}
  94
  95/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
  96   struct page instead of separately kmalloced struct.  Stolen from
  97   arch/sparc/mm/srmmu.c ... */
  98
  99typedef struct list_head ptable_desc;
 100
 101static struct list_head ptable_list[2] = {
 102        LIST_HEAD_INIT(ptable_list[0]),
 103        LIST_HEAD_INIT(ptable_list[1]),
 104};
 105
 106#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
 107#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
 108#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
 109
 110static const int ptable_shift[2] = {
 111        7+2, /* PGD, PMD */
 112        6+2, /* PTE */
 113};
 114
 115#define ptable_size(type) (1U << ptable_shift[type])
 116#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
 117
 118void __init init_pointer_table(void *table, int type)
 119{
 120        ptable_desc *dp;
 121        unsigned long ptable = (unsigned long)table;
 122        unsigned long page = ptable & PAGE_MASK;
 123        unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
 124
 125        dp = PD_PTABLE(page);
 126        if (!(PD_MARKBITS(dp) & mask)) {
 127                PD_MARKBITS(dp) = ptable_mask(type);
 128                list_add(dp, &ptable_list[type]);
 129        }
 130
 131        PD_MARKBITS(dp) &= ~mask;
 132        pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
 133
 134        /* unreserve the page so it's possible to free that page */
 135        __ClearPageReserved(PD_PAGE(dp));
 136        init_page_count(PD_PAGE(dp));
 137
 138        return;
 139}
 140
 141void *get_pointer_table(int type)
 142{
 143        ptable_desc *dp = ptable_list[type].next;
 144        unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
 145        unsigned int tmp, off;
 146
 147        /*
 148         * For a pointer table for a user process address space, a
 149         * table is taken from a page allocated for the purpose.  Each
 150         * page can hold 8 pointer tables.  The page is remapped in
 151         * virtual address space to be noncacheable.
 152         */
 153        if (mask == 0) {
 154                void *page;
 155                ptable_desc *new;
 156
 157                if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
 158                        return NULL;
 159
 160                if (type == TABLE_PTE) {
 161                        /*
 162                         * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
 163                         * SMP.
 164                         */
 165                        pgtable_pte_page_ctor(virt_to_page(page));
 166                }
 167
 168                mmu_page_ctor(page);
 169
 170                new = PD_PTABLE(page);
 171                PD_MARKBITS(new) = ptable_mask(type) - 1;
 172                list_add_tail(new, dp);
 173
 174                return (pmd_t *)page;
 175        }
 176
 177        for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
 178                ;
 179        PD_MARKBITS(dp) = mask & ~tmp;
 180        if (!PD_MARKBITS(dp)) {
 181                /* move to end of list */
 182                list_move_tail(dp, &ptable_list[type]);
 183        }
 184        return page_address(PD_PAGE(dp)) + off;
 185}
 186
 187int free_pointer_table(void *table, int type)
 188{
 189        ptable_desc *dp;
 190        unsigned long ptable = (unsigned long)table;
 191        unsigned long page = ptable & PAGE_MASK;
 192        unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
 193
 194        dp = PD_PTABLE(page);
 195        if (PD_MARKBITS (dp) & mask)
 196                panic ("table already free!");
 197
 198        PD_MARKBITS (dp) |= mask;
 199
 200        if (PD_MARKBITS(dp) == ptable_mask(type)) {
 201                /* all tables in page are free, free page */
 202                list_del(dp);
 203                mmu_page_dtor((void *)page);
 204                if (type == TABLE_PTE)
 205                        pgtable_pte_page_dtor(virt_to_page(page));
 206                free_page (page);
 207                return 1;
 208        } else if (ptable_list[type].next != dp) {
 209                /*
 210                 * move this descriptor to the front of the list, since
 211                 * it has one or more free tables.
 212                 */
 213                list_move(dp, &ptable_list[type]);
 214        }
 215        return 0;
 216}
 217
 218/* size of memory already mapped in head.S */
 219extern __initdata unsigned long m68k_init_mapped_size;
 220
 221extern unsigned long availmem;
 222
 223static pte_t *last_pte_table __initdata = NULL;
 224
 225static pte_t * __init kernel_page_table(void)
 226{
 227        pte_t *pte_table = last_pte_table;
 228
 229        if (PAGE_ALIGNED(last_pte_table)) {
 230                pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 231                if (!pte_table) {
 232                        panic("%s: Failed to allocate %lu bytes align=%lx\n",
 233                                        __func__, PAGE_SIZE, PAGE_SIZE);
 234                }
 235
 236                clear_page(pte_table);
 237                mmu_page_ctor(pte_table);
 238
 239                last_pte_table = pte_table;
 240        }
 241
 242        last_pte_table += PTRS_PER_PTE;
 243
 244        return pte_table;
 245}
 246
 247static pmd_t *last_pmd_table __initdata = NULL;
 248
 249static pmd_t * __init kernel_ptr_table(void)
 250{
 251        if (!last_pmd_table) {
 252                unsigned long pmd, last;
 253                int i;
 254
 255                /* Find the last ptr table that was used in head.S and
 256                 * reuse the remaining space in that page for further
 257                 * ptr tables.
 258                 */
 259                last = (unsigned long)kernel_pg_dir;
 260                for (i = 0; i < PTRS_PER_PGD; i++) {
 261                        pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
 262
 263                        if (!pud_present(*pud))
 264                                continue;
 265                        pmd = pgd_page_vaddr(kernel_pg_dir[i]);
 266                        if (pmd > last)
 267                                last = pmd;
 268                }
 269
 270                last_pmd_table = (pmd_t *)last;
 271#ifdef DEBUG
 272                printk("kernel_ptr_init: %p\n", last_pmd_table);
 273#endif
 274        }
 275
 276        last_pmd_table += PTRS_PER_PMD;
 277        if (PAGE_ALIGNED(last_pmd_table)) {
 278                last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 279                if (!last_pmd_table)
 280                        panic("%s: Failed to allocate %lu bytes align=%lx\n",
 281                              __func__, PAGE_SIZE, PAGE_SIZE);
 282
 283                clear_page(last_pmd_table);
 284                mmu_page_ctor(last_pmd_table);
 285        }
 286
 287        return last_pmd_table;
 288}
 289
 290static void __init map_node(int node)
 291{
 292        unsigned long physaddr, virtaddr, size;
 293        pgd_t *pgd_dir;
 294        p4d_t *p4d_dir;
 295        pud_t *pud_dir;
 296        pmd_t *pmd_dir;
 297        pte_t *pte_dir;
 298
 299        size = m68k_memory[node].size;
 300        physaddr = m68k_memory[node].addr;
 301        virtaddr = (unsigned long)phys_to_virt(physaddr);
 302        physaddr |= m68k_supervisor_cachemode |
 303                    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
 304        if (CPU_IS_040_OR_060)
 305                physaddr |= _PAGE_GLOBAL040;
 306
 307        while (size > 0) {
 308#ifdef DEBUG
 309                if (!(virtaddr & (PMD_SIZE-1)))
 310                        printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
 311                                virtaddr);
 312#endif
 313                pgd_dir = pgd_offset_k(virtaddr);
 314                if (virtaddr && CPU_IS_020_OR_030) {
 315                        if (!(virtaddr & (PGDIR_SIZE-1)) &&
 316                            size >= PGDIR_SIZE) {
 317#ifdef DEBUG
 318                                printk ("[very early term]");
 319#endif
 320                                pgd_val(*pgd_dir) = physaddr;
 321                                size -= PGDIR_SIZE;
 322                                virtaddr += PGDIR_SIZE;
 323                                physaddr += PGDIR_SIZE;
 324                                continue;
 325                        }
 326                }
 327                p4d_dir = p4d_offset(pgd_dir, virtaddr);
 328                pud_dir = pud_offset(p4d_dir, virtaddr);
 329                if (!pud_present(*pud_dir)) {
 330                        pmd_dir = kernel_ptr_table();
 331#ifdef DEBUG
 332                        printk ("[new pointer %p]", pmd_dir);
 333#endif
 334                        pud_set(pud_dir, pmd_dir);
 335                } else
 336                        pmd_dir = pmd_offset(pud_dir, virtaddr);
 337
 338                if (CPU_IS_020_OR_030) {
 339                        if (virtaddr) {
 340#ifdef DEBUG
 341                                printk ("[early term]");
 342#endif
 343                                pmd_val(*pmd_dir) = physaddr;
 344                                physaddr += PMD_SIZE;
 345                        } else {
 346                                int i;
 347#ifdef DEBUG
 348                                printk ("[zero map]");
 349#endif
 350                                pte_dir = kernel_page_table();
 351                                pmd_set(pmd_dir, pte_dir);
 352
 353                                pte_val(*pte_dir++) = 0;
 354                                physaddr += PAGE_SIZE;
 355                                for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
 356                                        pte_val(*pte_dir++) = physaddr;
 357                        }
 358                        size -= PMD_SIZE;
 359                        virtaddr += PMD_SIZE;
 360                } else {
 361                        if (!pmd_present(*pmd_dir)) {
 362#ifdef DEBUG
 363                                printk ("[new table]");
 364#endif
 365                                pte_dir = kernel_page_table();
 366                                pmd_set(pmd_dir, pte_dir);
 367                        }
 368                        pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 369
 370                        if (virtaddr) {
 371                                if (!pte_present(*pte_dir))
 372                                        pte_val(*pte_dir) = physaddr;
 373                        } else
 374                                pte_val(*pte_dir) = 0;
 375                        size -= PAGE_SIZE;
 376                        virtaddr += PAGE_SIZE;
 377                        physaddr += PAGE_SIZE;
 378                }
 379
 380        }
 381#ifdef DEBUG
 382        printk("\n");
 383#endif
 384}
 385
 386/*
 387 * paging_init() continues the virtual memory environment setup which
 388 * was begun by the code in arch/head.S.
 389 */
 390void __init paging_init(void)
 391{
 392        unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 393        unsigned long min_addr, max_addr;
 394        unsigned long addr;
 395        int i;
 396
 397#ifdef DEBUG
 398        printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
 399#endif
 400
 401        /* Fix the cache mode in the page descriptors for the 680[46]0.  */
 402        if (CPU_IS_040_OR_060) {
 403                int i;
 404#ifndef mm_cachebits
 405                mm_cachebits = _PAGE_CACHE040;
 406#endif
 407                for (i = 0; i < 16; i++)
 408                        pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
 409        }
 410
 411        min_addr = m68k_memory[0].addr;
 412        max_addr = min_addr + m68k_memory[0].size;
 413        memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 414        for (i = 1; i < m68k_num_memory;) {
 415                if (m68k_memory[i].addr < min_addr) {
 416                        printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
 417                                m68k_memory[i].addr, m68k_memory[i].size);
 418                        printk("Fix your bootloader or use a memfile to make use of this area!\n");
 419                        m68k_num_memory--;
 420                        memmove(m68k_memory + i, m68k_memory + i + 1,
 421                                (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
 422                        continue;
 423                }
 424                memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
 425                addr = m68k_memory[i].addr + m68k_memory[i].size;
 426                if (addr > max_addr)
 427                        max_addr = addr;
 428                i++;
 429        }
 430        m68k_memoffset = min_addr - PAGE_OFFSET;
 431        m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
 432
 433        module_fixup(NULL, __start_fixup, __stop_fixup);
 434        flush_icache();
 435
 436        high_memory = phys_to_virt(max_addr);
 437
 438        min_low_pfn = availmem >> PAGE_SHIFT;
 439        max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 440
 441        /* Reserve kernel text/data/bss and the memory allocated in head.S */
 442        memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 443
 444        /*
 445         * Map the physical memory available into the kernel virtual
 446         * address space. Make sure memblock will not try to allocate
 447         * pages beyond the memory we already mapped in head.S
 448         */
 449        memblock_set_bottom_up(true);
 450
 451        for (i = 0; i < m68k_num_memory; i++) {
 452                m68k_setup_node(i);
 453                map_node(i);
 454        }
 455
 456        flush_tlb_all();
 457
 458        /*
 459         * initialize the bad page table and bad page to point
 460         * to a couple of allocated pages
 461         */
 462        empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 463        if (!empty_zero_page)
 464                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 465                      __func__, PAGE_SIZE, PAGE_SIZE);
 466
 467        /*
 468         * Set up SFC/DFC registers
 469         */
 470        set_fc(USER_DATA);
 471
 472#ifdef DEBUG
 473        printk ("before free_area_init\n");
 474#endif
 475        for (i = 0; i < m68k_num_memory; i++)
 476                if (node_present_pages(i))
 477                        node_set_state(i, N_NORMAL_MEMORY);
 478
 479        max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
 480        free_area_init(max_zone_pfn);
 481}
 482