linux/sound/pci/ctxfi/ctvmem.c
<<
>>
Prefs
   1/**
   2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
   3 *
   4 * This source file is released under GPL v2 license (no other versions).
   5 * See the COPYING file included in the main directory of this source
   6 * distribution for the license terms and conditions.
   7 *
   8 * @File    ctvmem.c
   9 *
  10 * @Brief
  11 * This file contains the implementation of virtual memory management object
  12 * for card device.
  13 *
  14 * @Author Liu Chun
  15 * @Date Apr 1 2008
  16 */
  17
  18#include "ctvmem.h"
  19#include "ctatc.h"
  20#include <linux/slab.h>
  21#include <linux/mm.h>
  22#include <linux/io.h>
  23#include <sound/pcm.h>
  24
  25#define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  26#define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  27
  28/* *
  29 * Find or create vm block based on requested @size.
  30 * @size must be page aligned.
  31 * */
  32static struct ct_vm_block *
  33get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
  34{
  35        struct ct_vm_block *block = NULL, *entry;
  36        struct list_head *pos;
  37
  38        size = CT_PAGE_ALIGN(size);
  39        if (size > vm->size) {
  40                dev_err(atc->card->dev,
  41                        "Fail! No sufficient device virtual memory space available!\n");
  42                return NULL;
  43        }
  44
  45        mutex_lock(&vm->lock);
  46        list_for_each(pos, &vm->unused) {
  47                entry = list_entry(pos, struct ct_vm_block, list);
  48                if (entry->size >= size)
  49                        break; /* found a block that is big enough */
  50        }
  51        if (pos == &vm->unused)
  52                goto out;
  53
  54        if (entry->size == size) {
  55                /* Move the vm node from unused list to used list directly */
  56                list_move(&entry->list, &vm->used);
  57                vm->size -= size;
  58                block = entry;
  59                goto out;
  60        }
  61
  62        block = kzalloc(sizeof(*block), GFP_KERNEL);
  63        if (!block)
  64                goto out;
  65
  66        block->addr = entry->addr;
  67        block->size = size;
  68        list_add(&block->list, &vm->used);
  69        entry->addr += size;
  70        entry->size -= size;
  71        vm->size -= size;
  72
  73 out:
  74        mutex_unlock(&vm->lock);
  75        return block;
  76}
  77
  78static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  79{
  80        struct ct_vm_block *entry, *pre_ent;
  81        struct list_head *pos, *pre;
  82
  83        block->size = CT_PAGE_ALIGN(block->size);
  84
  85        mutex_lock(&vm->lock);
  86        list_del(&block->list);
  87        vm->size += block->size;
  88
  89        list_for_each(pos, &vm->unused) {
  90                entry = list_entry(pos, struct ct_vm_block, list);
  91                if (entry->addr >= (block->addr + block->size))
  92                        break; /* found a position */
  93        }
  94        if (pos == &vm->unused) {
  95                list_add_tail(&block->list, &vm->unused);
  96                entry = block;
  97        } else {
  98                if ((block->addr + block->size) == entry->addr) {
  99                        entry->addr = block->addr;
 100                        entry->size += block->size;
 101                        kfree(block);
 102                } else {
 103                        __list_add(&block->list, pos->prev, pos);
 104                        entry = block;
 105                }
 106        }
 107
 108        pos = &entry->list;
 109        pre = pos->prev;
 110        while (pre != &vm->unused) {
 111                entry = list_entry(pos, struct ct_vm_block, list);
 112                pre_ent = list_entry(pre, struct ct_vm_block, list);
 113                if ((pre_ent->addr + pre_ent->size) > entry->addr)
 114                        break;
 115
 116                pre_ent->size += entry->size;
 117                list_del(pos);
 118                kfree(entry);
 119                pos = pre;
 120                pre = pos->prev;
 121        }
 122        mutex_unlock(&vm->lock);
 123}
 124
 125/* Map host addr (kmalloced/vmalloced) to device logical addr. */
 126static struct ct_vm_block *
 127ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
 128{
 129        struct ct_vm_block *block;
 130        unsigned int pte_start;
 131        unsigned i, pages;
 132        unsigned long *ptp;
 133        struct ct_atc *atc = snd_pcm_substream_chip(substream);
 134
 135        block = get_vm_block(vm, size, atc);
 136        if (block == NULL) {
 137                dev_err(atc->card->dev,
 138                        "No virtual memory block that is big enough to allocate!\n");
 139                return NULL;
 140        }
 141
 142        ptp = (unsigned long *)vm->ptp[0].area;
 143        pte_start = (block->addr >> CT_PAGE_SHIFT);
 144        pages = block->size >> CT_PAGE_SHIFT;
 145        for (i = 0; i < pages; i++) {
 146                unsigned long addr;
 147                addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
 148                ptp[pte_start + i] = addr;
 149        }
 150
 151        block->size = size;
 152        return block;
 153}
 154
 155static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
 156{
 157        /* do unmapping */
 158        put_vm_block(vm, block);
 159}
 160
 161/* *
 162 * return the host physical addr of the @index-th device
 163 * page table page on success, or ~0UL on failure.
 164 * The first returned ~0UL indicates the termination.
 165 * */
 166static dma_addr_t
 167ct_get_ptp_phys(struct ct_vm *vm, int index)
 168{
 169        return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
 170}
 171
 172int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
 173{
 174        struct ct_vm *vm;
 175        struct ct_vm_block *block;
 176        int i, err = 0;
 177
 178        *rvm = NULL;
 179
 180        vm = kzalloc(sizeof(*vm), GFP_KERNEL);
 181        if (!vm)
 182                return -ENOMEM;
 183
 184        mutex_init(&vm->lock);
 185
 186        /* Allocate page table pages */
 187        for (i = 0; i < CT_PTP_NUM; i++) {
 188                err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
 189                                          snd_dma_pci_data(pci),
 190                                          PAGE_SIZE, &vm->ptp[i]);
 191                if (err < 0)
 192                        break;
 193        }
 194        if (err < 0) {
 195                /* no page table pages are allocated */
 196                ct_vm_destroy(vm);
 197                return -ENOMEM;
 198        }
 199        vm->size = CT_ADDRS_PER_PAGE * i;
 200        vm->map = ct_vm_map;
 201        vm->unmap = ct_vm_unmap;
 202        vm->get_ptp_phys = ct_get_ptp_phys;
 203        INIT_LIST_HEAD(&vm->unused);
 204        INIT_LIST_HEAD(&vm->used);
 205        block = kzalloc(sizeof(*block), GFP_KERNEL);
 206        if (NULL != block) {
 207                block->addr = 0;
 208                block->size = vm->size;
 209                list_add(&block->list, &vm->unused);
 210        }
 211
 212        *rvm = vm;
 213        return 0;
 214}
 215
 216/* The caller must ensure no mapping pages are being used
 217 * by hardware before calling this function */
 218void ct_vm_destroy(struct ct_vm *vm)
 219{
 220        int i;
 221        struct list_head *pos;
 222        struct ct_vm_block *entry;
 223
 224        /* free used and unused list nodes */
 225        while (!list_empty(&vm->used)) {
 226                pos = vm->used.next;
 227                list_del(pos);
 228                entry = list_entry(pos, struct ct_vm_block, list);
 229                kfree(entry);
 230        }
 231        while (!list_empty(&vm->unused)) {
 232                pos = vm->unused.next;
 233                list_del(pos);
 234                entry = list_entry(pos, struct ct_vm_block, list);
 235                kfree(entry);
 236        }
 237
 238        /* free allocated page table pages */
 239        for (i = 0; i < CT_PTP_NUM; i++)
 240                snd_dma_free_pages(&vm->ptp[i]);
 241
 242        vm->size = 0;
 243
 244        kfree(vm);
 245}
 246