linux/arch/m68k/sun3x/dvma.c
<<
>>
Prefs
   1/*
   2 * Virtual DMA allocation
   3 *
   4 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
   5 *
   6 * 11/26/2000 -- disabled the existing code because it didn't work for
   7 * me in 2.4.  Replaced with a significantly more primitive version
   8 * similar to the sun3 code.  the old functionality was probably more
   9 * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
  10 *
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/init.h>
  15#include <linux/bitops.h>
  16#include <linux/mm.h>
  17#include <linux/bootmem.h>
  18#include <linux/vmalloc.h>
  19
  20#include <asm/sun3x.h>
  21#include <asm/dvma.h>
  22#include <asm/io.h>
  23#include <asm/page.h>
  24#include <asm/pgtable.h>
  25#include <asm/pgalloc.h>
  26
  27/* IOMMU support */
  28
  29#define IOMMU_ADDR_MASK            0x03ffe000
  30#define IOMMU_CACHE_INHIBIT        0x00000040
  31#define IOMMU_FULL_BLOCK           0x00000020
  32#define IOMMU_MODIFIED             0x00000010
  33#define IOMMU_USED                 0x00000008
  34#define IOMMU_WRITE_PROTECT        0x00000004
  35#define IOMMU_DT_MASK              0x00000003
  36#define IOMMU_DT_INVALID           0x00000000
  37#define IOMMU_DT_VALID             0x00000001
  38#define IOMMU_DT_BAD               0x00000002
  39
  40
  41static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
  42
  43
  44#define dvma_entry_paddr(index)         (iommu_pte[index] & IOMMU_ADDR_MASK)
  45#define dvma_entry_vaddr(index,paddr)   ((index << DVMA_PAGE_SHIFT) |  \
  46                                         (paddr & (DVMA_PAGE_SIZE-1)))
  47#if 0
  48#define dvma_entry_set(index,addr)      (iommu_pte[index] =            \
  49                                            (addr & IOMMU_ADDR_MASK) | \
  50                                             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
  51#else
  52#define dvma_entry_set(index,addr)      (iommu_pte[index] =            \
  53                                            (addr & IOMMU_ADDR_MASK) | \
  54                                             IOMMU_DT_VALID)
  55#endif
  56#define dvma_entry_clr(index)           (iommu_pte[index] = IOMMU_DT_INVALID)
  57#define dvma_entry_hash(addr)           ((addr >> DVMA_PAGE_SHIFT) ^ \
  58                                         ((addr & 0x03c00000) >>     \
  59                                                (DVMA_PAGE_SHIFT+4)))
  60
  61#undef DEBUG
  62
  63#ifdef DEBUG
  64/* code to print out a dvma mapping for debugging purposes */
  65void dvma_print (unsigned long dvma_addr)
  66{
  67
  68        unsigned long index;
  69
  70        index = dvma_addr >> DVMA_PAGE_SHIFT;
  71
  72        printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
  73               dvma_entry_paddr(index));
  74
  75
  76}
  77#endif
  78
  79
  80/* create a virtual mapping for a page assigned within the IOMMU
  81   so that the cpu can reach it easily */
  82inline int dvma_map_cpu(unsigned long kaddr,
  83                               unsigned long vaddr, int len)
  84{
  85        pgd_t *pgd;
  86        unsigned long end;
  87        int ret = 0;
  88
  89        kaddr &= PAGE_MASK;
  90        vaddr &= PAGE_MASK;
  91
  92        end = PAGE_ALIGN(vaddr + len);
  93
  94#ifdef DEBUG
  95        printk("dvma: mapping kern %08lx to virt %08lx\n",
  96               kaddr, vaddr);
  97#endif
  98        pgd = pgd_offset_k(vaddr);
  99
 100        do {
 101                pmd_t *pmd;
 102                unsigned long end2;
 103
 104                if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
 105                        ret = -ENOMEM;
 106                        goto out;
 107                }
 108
 109                if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
 110                        end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
 111                else
 112                        end2 = end;
 113
 114                do {
 115                        pte_t *pte;
 116                        unsigned long end3;
 117
 118                        if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
 119                                ret = -ENOMEM;
 120                                goto out;
 121                        }
 122
 123                        if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
 124                                end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
 125                        else
 126                                end3 = end2;
 127
 128                        do {
 129#ifdef DEBUG
 130                                printk("mapping %08lx phys to %08lx\n",
 131                                       __pa(kaddr), vaddr);
 132#endif
 133                                set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
 134                                                     PAGE_KERNEL));
 135                                pte++;
 136                                kaddr += PAGE_SIZE;
 137                                vaddr += PAGE_SIZE;
 138                        } while(vaddr < end3);
 139
 140                } while(vaddr < end2);
 141
 142        } while(vaddr < end);
 143
 144        flush_tlb_all();
 145
 146 out:
 147        return ret;
 148}
 149
 150
 151inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
 152                                 int len)
 153{
 154        unsigned long end, index;
 155
 156        index = baddr >> DVMA_PAGE_SHIFT;
 157        end = ((baddr+len) >> DVMA_PAGE_SHIFT);
 158
 159        if(len & ~DVMA_PAGE_MASK)
 160                end++;
 161
 162        for(; index < end ; index++) {
 163//              if(dvma_entry_use(index))
 164//                      BUG();
 165//              printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
 166
 167                dvma_entry_set(index, __pa(kaddr));
 168
 169                iommu_pte[index] |= IOMMU_FULL_BLOCK;
 170//              dvma_entry_inc(index);
 171
 172                kaddr += DVMA_PAGE_SIZE;
 173        }
 174
 175#ifdef DEBUG
 176        for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
 177                dvma_print(index << DVMA_PAGE_SHIFT);
 178#endif
 179        return 0;
 180
 181}
 182
 183void dvma_unmap_iommu(unsigned long baddr, int len)
 184{
 185
 186        int index, end;
 187
 188
 189        index = baddr >> DVMA_PAGE_SHIFT;
 190        end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
 191
 192        for(; index < end ; index++) {
 193#ifdef DEBUG
 194                printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
 195#endif
 196#if 0
 197                if(!dvma_entry_use(index))
 198                        printk("dvma_unmap freeing unused entry %04x\n",
 199                               index);
 200                else
 201                        dvma_entry_dec(index);
 202#endif
 203                dvma_entry_clr(index);
 204        }
 205
 206}
 207
 208