linux/arch/sparc/mm/generic_64.c
<<
>>
Prefs
   1/*
   2 * generic.c: Generic Sparc mm routines that are not dependent upon
   3 *            MMU type but are Sparc specific.
   4 *
   5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/mm.h>
  10#include <linux/swap.h>
  11#include <linux/pagemap.h>
  12
  13#include <asm/pgalloc.h>
  14#include <asm/pgtable.h>
  15#include <asm/page.h>
  16#include <asm/tlbflush.h>
  17
  18/* Remap IO memory, the same way as remap_pfn_range(), but use
  19 * the obio memory space.
  20 *
  21 * They use a pgprot that sets PAGE_IO and does not check the
  22 * mem_map table as this is independent of normal memory.
  23 */
  24static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
  25                                      unsigned long address,
  26                                      unsigned long size,
  27                                      unsigned long offset, pgprot_t prot,
  28                                      int space)
  29{
  30        unsigned long end;
  31
  32        /* clear hack bit that was used as a write_combine side-effect flag */
  33        offset &= ~0x1UL;
  34        address &= ~PMD_MASK;
  35        end = address + size;
  36        if (end > PMD_SIZE)
  37                end = PMD_SIZE;
  38        do {
  39                pte_t entry;
  40                unsigned long curend = address + PAGE_SIZE;
  41                
  42                entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
  43                if (!(address & 0xffff)) {
  44                        if (PAGE_SIZE < (4 * 1024 * 1024) &&
  45                            !(address & 0x3fffff) &&
  46                            !(offset & 0x3ffffe) &&
  47                            end >= address + 0x400000) {
  48                                entry = mk_pte_io(offset, prot, space,
  49                                                  4 * 1024 * 1024);
  50                                curend = address + 0x400000;
  51                                offset += 0x400000;
  52                        } else if (PAGE_SIZE < (512 * 1024) &&
  53                                   !(address & 0x7ffff) &&
  54                                   !(offset & 0x7fffe) &&
  55                                   end >= address + 0x80000) {
  56                                entry = mk_pte_io(offset, prot, space,
  57                                                  512 * 1024 * 1024);
  58                                curend = address + 0x80000;
  59                                offset += 0x80000;
  60                        } else if (PAGE_SIZE < (64 * 1024) &&
  61                                   !(offset & 0xfffe) &&
  62                                   end >= address + 0x10000) {
  63                                entry = mk_pte_io(offset, prot, space,
  64                                                  64 * 1024);
  65                                curend = address + 0x10000;
  66                                offset += 0x10000;
  67                        } else
  68                                offset += PAGE_SIZE;
  69                } else
  70                        offset += PAGE_SIZE;
  71
  72                if (pte_write(entry))
  73                        entry = pte_mkdirty(entry);
  74                do {
  75                        BUG_ON(!pte_none(*pte));
  76                        set_pte_at(mm, address, pte, entry);
  77                        address += PAGE_SIZE;
  78                        pte_val(entry) += PAGE_SIZE;
  79                        pte++;
  80                } while (address < curend);
  81        } while (address < end);
  82}
  83
  84static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
  85        unsigned long offset, pgprot_t prot, int space)
  86{
  87        unsigned long end;
  88
  89        address &= ~PGDIR_MASK;
  90        end = address + size;
  91        if (end > PGDIR_SIZE)
  92                end = PGDIR_SIZE;
  93        offset -= address;
  94        do {
  95                pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
  96                if (!pte)
  97                        return -ENOMEM;
  98                io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
  99                pte_unmap(pte);
 100                address = (address + PMD_SIZE) & PMD_MASK;
 101                pmd++;
 102        } while (address < end);
 103        return 0;
 104}
 105
 106static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
 107        unsigned long offset, pgprot_t prot, int space)
 108{
 109        unsigned long end;
 110
 111        address &= ~PUD_MASK;
 112        end = address + size;
 113        if (end > PUD_SIZE)
 114                end = PUD_SIZE;
 115        offset -= address;
 116        do {
 117                pmd_t *pmd = pmd_alloc(mm, pud, address);
 118                if (!pud)
 119                        return -ENOMEM;
 120                io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
 121                address = (address + PUD_SIZE) & PUD_MASK;
 122                pud++;
 123        } while (address < end);
 124        return 0;
 125}
 126
 127int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 128                unsigned long pfn, unsigned long size, pgprot_t prot)
 129{
 130        int error = 0;
 131        pgd_t * dir;
 132        unsigned long beg = from;
 133        unsigned long end = from + size;
 134        struct mm_struct *mm = vma->vm_mm;
 135        int space = GET_IOSPACE(pfn);
 136        unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
 137        unsigned long phys_base;
 138
 139        phys_base = offset | (((unsigned long) space) << 32UL);
 140
 141        /* See comment in mm/memory.c remap_pfn_range */
 142        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 143        vma->vm_pgoff = phys_base >> PAGE_SHIFT;
 144
 145        offset -= from;
 146        dir = pgd_offset(mm, from);
 147        flush_cache_range(vma, beg, end);
 148
 149        while (from < end) {
 150                pud_t *pud = pud_alloc(mm, dir, from);
 151                error = -ENOMEM;
 152                if (!pud)
 153                        break;
 154                error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
 155                if (error)
 156                        break;
 157                from = (from + PGDIR_SIZE) & PGDIR_MASK;
 158                dir++;
 159        }
 160
 161        flush_tlb_range(vma, beg, end);
 162        return error;
 163}
 164EXPORT_SYMBOL(io_remap_pfn_range);
 165