linux/arch/m68k/sun3/sun3dvma.c
<<
>>
Prefs
   1/*
   2 * linux/arch/m68k/sun3/sun3dvma.c
   3 *
   4 * Copyright (C) 2000 Sam Creasey
   5 *
   6 * Contains common routines for sun3/sun3x DVMA management.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/mm.h>
  12#include <linux/list.h>
  13
  14#include <asm/page.h>
  15#include <asm/pgtable.h>
  16#include <asm/dvma.h>
  17
  18#undef DVMA_DEBUG
  19
  20#ifdef CONFIG_SUN3X
  21extern void dvma_unmap_iommu(unsigned long baddr, int len);
  22#else
  23static inline void dvma_unmap_iommu(unsigned long a, int b)
  24{
  25}
  26#endif
  27
  28#ifdef CONFIG_SUN3
  29extern void sun3_dvma_init(void);
  30#endif
  31
  32static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES];
  33
  34#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
  35
  36#define dvma_entry_use(baddr)           (iommu_use[dvma_index(baddr)])
  37
  38struct hole {
  39        unsigned long start;
  40        unsigned long end;
  41        unsigned long size;
  42        struct list_head list;
  43};
  44
  45static struct list_head hole_list;
  46static struct list_head hole_cache;
  47static struct hole initholes[64];
  48
  49#ifdef DVMA_DEBUG
  50
  51static unsigned long dvma_allocs;
  52static unsigned long dvma_frees;
  53static unsigned long long dvma_alloc_bytes;
  54static unsigned long long dvma_free_bytes;
  55
  56static void print_use(void)
  57{
  58
  59        int i;
  60        int j = 0;
  61
  62        printk("dvma entry usage:\n");
  63
  64        for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
  65                if(!iommu_use[i])
  66                        continue;
  67
  68                j++;
  69
  70                printk("dvma entry: %08lx len %08lx\n",
  71                       ( i << DVMA_PAGE_SHIFT) + DVMA_START,
  72                       iommu_use[i]);
  73        }
  74
  75        printk("%d entries in use total\n", j);
  76
  77        printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
  78        printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
  79               dvma_free_bytes);
  80}
  81
  82static void print_holes(struct list_head *holes)
  83{
  84
  85        struct list_head *cur;
  86        struct hole *hole;
  87
  88        printk("listing dvma holes\n");
  89        list_for_each(cur, holes) {
  90                hole = list_entry(cur, struct hole, list);
  91
  92                if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
  93                        continue;
  94
  95                printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size);
  96        }
  97
  98        printk("end of hole listing...\n");
  99
 100}
 101#endif /* DVMA_DEBUG */
 102
 103static inline int refill(void)
 104{
 105
 106        struct hole *hole;
 107        struct hole *prev = NULL;
 108        struct list_head *cur;
 109        int ret = 0;
 110
 111        list_for_each(cur, &hole_list) {
 112                hole = list_entry(cur, struct hole, list);
 113
 114                if(!prev) {
 115                        prev = hole;
 116                        continue;
 117                }
 118
 119                if(hole->end == prev->start) {
 120                        hole->size += prev->size;
 121                        hole->end = prev->end;
 122                        list_move(&(prev->list), &hole_cache);
 123                        ret++;
 124                }
 125
 126        }
 127
 128        return ret;
 129}
 130
 131static inline struct hole *rmcache(void)
 132{
 133        struct hole *ret;
 134
 135        if(list_empty(&hole_cache)) {
 136                if(!refill()) {
 137                        printk("out of dvma hole cache!\n");
 138                        BUG();
 139                }
 140        }
 141
 142        ret = list_entry(hole_cache.next, struct hole, list);
 143        list_del(&(ret->list));
 144
 145        return ret;
 146
 147}
 148
 149static inline unsigned long get_baddr(int len, unsigned long align)
 150{
 151
 152        struct list_head *cur;
 153        struct hole *hole;
 154
 155        if(list_empty(&hole_list)) {
 156#ifdef DVMA_DEBUG
 157                printk("out of dvma holes! (printing hole cache)\n");
 158                print_holes(&hole_cache);
 159                print_use();
 160#endif
 161                BUG();
 162        }
 163
 164        list_for_each(cur, &hole_list) {
 165                unsigned long newlen;
 166
 167                hole = list_entry(cur, struct hole, list);
 168
 169                if(align > DVMA_PAGE_SIZE)
 170                        newlen = len + ((hole->end - len) & (align-1));
 171                else
 172                        newlen = len;
 173
 174                if(hole->size > newlen) {
 175                        hole->end -= newlen;
 176                        hole->size -= newlen;
 177                        dvma_entry_use(hole->end) = newlen;
 178#ifdef DVMA_DEBUG
 179                        dvma_allocs++;
 180                        dvma_alloc_bytes += newlen;
 181#endif
 182                        return hole->end;
 183                } else if(hole->size == newlen) {
 184                        list_move(&(hole->list), &hole_cache);
 185                        dvma_entry_use(hole->start) = newlen;
 186#ifdef DVMA_DEBUG
 187                        dvma_allocs++;
 188                        dvma_alloc_bytes += newlen;
 189#endif
 190                        return hole->start;
 191                }
 192
 193        }
 194
 195        printk("unable to find dvma hole!\n");
 196        BUG();
 197        return 0;
 198}
 199
 200static inline int free_baddr(unsigned long baddr)
 201{
 202
 203        unsigned long len;
 204        struct hole *hole;
 205        struct list_head *cur;
 206        unsigned long orig_baddr;
 207
 208        orig_baddr = baddr;
 209        len = dvma_entry_use(baddr);
 210        dvma_entry_use(baddr) = 0;
 211        baddr &= DVMA_PAGE_MASK;
 212        dvma_unmap_iommu(baddr, len);
 213
 214#ifdef DVMA_DEBUG
 215        dvma_frees++;
 216        dvma_free_bytes += len;
 217#endif
 218
 219        list_for_each(cur, &hole_list) {
 220                hole = list_entry(cur, struct hole, list);
 221
 222                if(hole->end == baddr) {
 223                        hole->end += len;
 224                        hole->size += len;
 225                        return 0;
 226                } else if(hole->start == (baddr + len)) {
 227                        hole->start = baddr;
 228                        hole->size += len;
 229                        return 0;
 230                }
 231
 232        }
 233
 234        hole = rmcache();
 235
 236        hole->start = baddr;
 237        hole->end = baddr + len;
 238        hole->size = len;
 239
 240//      list_add_tail(&(hole->list), cur);
 241        list_add(&(hole->list), cur);
 242
 243        return 0;
 244
 245}
 246
 247void dvma_init(void)
 248{
 249
 250        struct hole *hole;
 251        int i;
 252
 253        INIT_LIST_HEAD(&hole_list);
 254        INIT_LIST_HEAD(&hole_cache);
 255
 256        /* prepare the hole cache */
 257        for(i = 0; i < 64; i++)
 258                list_add(&(initholes[i].list), &hole_cache);
 259
 260        hole = rmcache();
 261        hole->start = DVMA_START;
 262        hole->end = DVMA_END;
 263        hole->size = DVMA_SIZE;
 264
 265        list_add(&(hole->list), &hole_list);
 266
 267        memset(iommu_use, 0, sizeof(iommu_use));
 268
 269        dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
 270
 271#ifdef CONFIG_SUN3
 272        sun3_dvma_init();
 273#endif
 274
 275}
 276
 277inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
 278{
 279
 280        unsigned long baddr;
 281        unsigned long off;
 282
 283        if(!len)
 284                len = 0x800;
 285
 286        if(!kaddr || !len) {
 287//              printk("error: kaddr %lx len %x\n", kaddr, len);
 288//              *(int *)4 = 0;
 289                return 0;
 290        }
 291
 292#ifdef DEBUG
 293        printk("dvma_map request %08lx bytes from %08lx\n",
 294               len, kaddr);
 295#endif
 296        off = kaddr & ~DVMA_PAGE_MASK;
 297        kaddr &= PAGE_MASK;
 298        len += off;
 299        len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
 300
 301        if(align == 0)
 302                align = DVMA_PAGE_SIZE;
 303        else
 304                align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
 305
 306        baddr = get_baddr(len, align);
 307//      printk("using baddr %lx\n", baddr);
 308
 309        if(!dvma_map_iommu(kaddr, baddr, len))
 310                return (baddr + off);
 311
 312        printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len);
 313        BUG();
 314        return 0;
 315}
 316EXPORT_SYMBOL(dvma_map_align);
 317
 318void dvma_unmap(void *baddr)
 319{
 320        unsigned long addr;
 321
 322        addr = (unsigned long)baddr;
 323        /* check if this is a vme mapping */
 324        if(!(addr & 0x00f00000))
 325                addr |= 0xf00000;
 326
 327        free_baddr(addr);
 328
 329        return;
 330
 331}
 332EXPORT_SYMBOL(dvma_unmap);
 333
 334void *dvma_malloc_align(unsigned long len, unsigned long align)
 335{
 336        unsigned long kaddr;
 337        unsigned long baddr;
 338        unsigned long vaddr;
 339
 340        if(!len)
 341                return NULL;
 342
 343#ifdef DEBUG
 344        printk("dvma_malloc request %lx bytes\n", len);
 345#endif
 346        len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
 347
 348        if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
 349                return NULL;
 350
 351        if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
 352                free_pages(kaddr, get_order(len));
 353                return NULL;
 354        }
 355
 356        vaddr = dvma_btov(baddr);
 357
 358        if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
 359                dvma_unmap((void *)baddr);
 360                free_pages(kaddr, get_order(len));
 361                return NULL;
 362        }
 363
 364#ifdef DEBUG
 365        printk("mapped %08lx bytes %08lx kern -> %08lx bus\n",
 366               len, kaddr, baddr);
 367#endif
 368
 369        return (void *)vaddr;
 370
 371}
 372EXPORT_SYMBOL(dvma_malloc_align);
 373
 374void dvma_free(void *vaddr)
 375{
 376
 377        return;
 378
 379}
 380EXPORT_SYMBOL(dvma_free);
 381