linux/arch/sparc/mm/generic_32.c
<<
>>
Prefs
   1/*
   2 * generic.c: Generic Sparc mm routines that are not dependent upon
   3 *            MMU type but are Sparc specific.
   4 *
   5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/mm.h>
  10#include <linux/swap.h>
  11#include <linux/pagemap.h>
  12
  13#include <asm/pgalloc.h>
  14#include <asm/pgtable.h>
  15#include <asm/page.h>
  16#include <asm/cacheflush.h>
  17#include <asm/tlbflush.h>
  18
  19/* Remap IO memory, the same way as remap_pfn_range(), but use
  20 * the obio memory space.
  21 *
  22 * They use a pgprot that sets PAGE_IO and does not check the
  23 * mem_map table as this is independent of normal memory.
  24 */
  25static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
  26        unsigned long offset, pgprot_t prot, int space)
  27{
  28        unsigned long end;
  29
  30        address &= ~PMD_MASK;
  31        end = address + size;
  32        if (end > PMD_SIZE)
  33                end = PMD_SIZE;
  34        do {
  35                set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
  36                address += PAGE_SIZE;
  37                offset += PAGE_SIZE;
  38                pte++;
  39        } while (address < end);
  40}
  41
  42static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
  43        unsigned long offset, pgprot_t prot, int space)
  44{
  45        unsigned long end;
  46
  47        address &= ~PGDIR_MASK;
  48        end = address + size;
  49        if (end > PGDIR_SIZE)
  50                end = PGDIR_SIZE;
  51        offset -= address;
  52        do {
  53                pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
  54                if (!pte)
  55                        return -ENOMEM;
  56                io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
  57                address = (address + PMD_SIZE) & PMD_MASK;
  58                pmd++;
  59        } while (address < end);
  60        return 0;
  61}
  62
  63int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  64                       unsigned long pfn, unsigned long size, pgprot_t prot)
  65{
  66        int error = 0;
  67        pgd_t * dir;
  68        unsigned long beg = from;
  69        unsigned long end = from + size;
  70        struct mm_struct *mm = vma->vm_mm;
  71        int space = GET_IOSPACE(pfn);
  72        unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
  73
  74        /* See comment in mm/memory.c remap_pfn_range */
  75        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
  76        vma->vm_pgoff = (offset >> PAGE_SHIFT) |
  77                ((unsigned long)space << 28UL);
  78
  79        offset -= from;
  80        dir = pgd_offset(mm, from);
  81        flush_cache_range(vma, beg, end);
  82
  83        while (from < end) {
  84                pmd_t *pmd = pmd_alloc(mm, dir, from);
  85                error = -ENOMEM;
  86                if (!pmd)
  87                        break;
  88                error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
  89                if (error)
  90                        break;
  91                from = (from + PGDIR_SIZE) & PGDIR_MASK;
  92                dir++;
  93        }
  94
  95        flush_tlb_range(vma, beg, end);
  96        return error;
  97}
  98EXPORT_SYMBOL(io_remap_pfn_range);
  99