linux/arch/m68k/sun3x/dvma.c
<<
>>
Prefs
   1/*
   2 * Virtual DMA allocation
   3 *
   4 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
   5 *
   6 * 11/26/2000 -- disabled the existing code because it didn't work for
   7 * me in 2.4.  Replaced with a significantly more primitive version
   8 * similar to the sun3 code.  the old functionality was probably more
   9 * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
  10 *
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/init.h>
  15#include <linux/bitops.h>
  16#include <linux/mm.h>
  17#include <linux/bootmem.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20
  21#include <asm/sun3x.h>
  22#include <asm/dvma.h>
  23#include <asm/io.h>
  24#include <asm/page.h>
  25#include <asm/pgtable.h>
  26#include <asm/pgalloc.h>
  27
  28/* IOMMU support */
  29
  30#define IOMMU_ADDR_MASK            0x03ffe000
  31#define IOMMU_CACHE_INHIBIT        0x00000040
  32#define IOMMU_FULL_BLOCK           0x00000020
  33#define IOMMU_MODIFIED             0x00000010
  34#define IOMMU_USED                 0x00000008
  35#define IOMMU_WRITE_PROTECT        0x00000004
  36#define IOMMU_DT_MASK              0x00000003
  37#define IOMMU_DT_INVALID           0x00000000
  38#define IOMMU_DT_VALID             0x00000001
  39#define IOMMU_DT_BAD               0x00000002
  40
  41
  42static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
  43
  44
  45#define dvma_entry_paddr(index)         (iommu_pte[index] & IOMMU_ADDR_MASK)
  46#define dvma_entry_vaddr(index,paddr)   ((index << DVMA_PAGE_SHIFT) |  \
  47                                         (paddr & (DVMA_PAGE_SIZE-1)))
  48#if 0
  49#define dvma_entry_set(index,addr)      (iommu_pte[index] =            \
  50                                            (addr & IOMMU_ADDR_MASK) | \
  51                                             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
  52#else
  53#define dvma_entry_set(index,addr)      (iommu_pte[index] =            \
  54                                            (addr & IOMMU_ADDR_MASK) | \
  55                                             IOMMU_DT_VALID)
  56#endif
  57#define dvma_entry_clr(index)           (iommu_pte[index] = IOMMU_DT_INVALID)
  58#define dvma_entry_hash(addr)           ((addr >> DVMA_PAGE_SHIFT) ^ \
  59                                         ((addr & 0x03c00000) >>     \
  60                                                (DVMA_PAGE_SHIFT+4)))
  61
  62#undef DEBUG
  63
  64#ifdef DEBUG
  65/* code to print out a dvma mapping for debugging purposes */
  66void dvma_print (unsigned long dvma_addr)
  67{
  68
  69        unsigned long index;
  70
  71        index = dvma_addr >> DVMA_PAGE_SHIFT;
  72
  73        printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
  74               dvma_entry_paddr(index));
  75
  76
  77}
  78#endif
  79
  80
  81/* create a virtual mapping for a page assigned within the IOMMU
  82   so that the cpu can reach it easily */
  83inline int dvma_map_cpu(unsigned long kaddr,
  84                               unsigned long vaddr, int len)
  85{
  86        pgd_t *pgd;
  87        unsigned long end;
  88        int ret = 0;
  89
  90        kaddr &= PAGE_MASK;
  91        vaddr &= PAGE_MASK;
  92
  93        end = PAGE_ALIGN(vaddr + len);
  94
  95#ifdef DEBUG
  96        printk("dvma: mapping kern %08lx to virt %08lx\n",
  97               kaddr, vaddr);
  98#endif
  99        pgd = pgd_offset_k(vaddr);
 100
 101        do {
 102                pmd_t *pmd;
 103                unsigned long end2;
 104
 105                if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
 106                        ret = -ENOMEM;
 107                        goto out;
 108                }
 109
 110                if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
 111                        end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
 112                else
 113                        end2 = end;
 114
 115                do {
 116                        pte_t *pte;
 117                        unsigned long end3;
 118
 119                        if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
 120                                ret = -ENOMEM;
 121                                goto out;
 122                        }
 123
 124                        if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
 125                                end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
 126                        else
 127                                end3 = end2;
 128
 129                        do {
 130#ifdef DEBUG
 131                                printk("mapping %08lx phys to %08lx\n",
 132                                       __pa(kaddr), vaddr);
 133#endif
 134                                set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
 135                                                     PAGE_KERNEL));
 136                                pte++;
 137                                kaddr += PAGE_SIZE;
 138                                vaddr += PAGE_SIZE;
 139                        } while(vaddr < end3);
 140
 141                } while(vaddr < end2);
 142
 143        } while(vaddr < end);
 144
 145        flush_tlb_all();
 146
 147 out:
 148        return ret;
 149}
 150
 151
 152inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
 153                                 int len)
 154{
 155        unsigned long end, index;
 156
 157        index = baddr >> DVMA_PAGE_SHIFT;
 158        end = ((baddr+len) >> DVMA_PAGE_SHIFT);
 159
 160        if(len & ~DVMA_PAGE_MASK)
 161                end++;
 162
 163        for(; index < end ; index++) {
 164//              if(dvma_entry_use(index))
 165//                      BUG();
 166//              printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
 167
 168                dvma_entry_set(index, __pa(kaddr));
 169
 170                iommu_pte[index] |= IOMMU_FULL_BLOCK;
 171//              dvma_entry_inc(index);
 172
 173                kaddr += DVMA_PAGE_SIZE;
 174        }
 175
 176#ifdef DEBUG
 177        for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
 178                dvma_print(index << DVMA_PAGE_SHIFT);
 179#endif
 180        return 0;
 181
 182}
 183
 184void dvma_unmap_iommu(unsigned long baddr, int len)
 185{
 186
 187        int index, end;
 188
 189
 190        index = baddr >> DVMA_PAGE_SHIFT;
 191        end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
 192
 193        for(; index < end ; index++) {
 194#ifdef DEBUG
 195                printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
 196#endif
 197#if 0
 198                if(!dvma_entry_use(index))
 199                        printk("dvma_unmap freeing unused entry %04x\n",
 200                               index);
 201                else
 202                        dvma_entry_dec(index);
 203#endif
 204                dvma_entry_clr(index);
 205        }
 206
 207}
 208
 209