linux/sound/pci/emu10k1/memory.c
<<
>>
Prefs
   1/*
   2 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   3 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
   4 *
   5 *  EMU10K1 memory page allocation (PTB area)
   6 *
   7 *
   8 *   This program is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU General Public License as published by
  10 *   the Free Software Foundation; either version 2 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This program is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *   GNU General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU General Public License
  19 *   along with this program; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  21 *
  22 */
  23
  24#include <linux/pci.h>
  25#include <linux/gfp.h>
  26#include <linux/time.h>
  27#include <linux/mutex.h>
  28#include <linux/export.h>
  29
  30#include <sound/core.h>
  31#include <sound/emu10k1.h>
  32
  33/* page arguments of these two macros are Emu page (4096 bytes), not like
  34 * aligned pages in others
  35 */
  36#define __set_ptb_entry(emu,page,addr) \
  37        (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
  38
  39#define UNIT_PAGES              (PAGE_SIZE / EMUPAGESIZE)
  40#define MAX_ALIGN_PAGES0                (MAXPAGES0 / UNIT_PAGES)
  41#define MAX_ALIGN_PAGES1                (MAXPAGES1 / UNIT_PAGES)
  42/* get aligned page from offset address */
  43#define get_aligned_page(offset)        ((offset) >> PAGE_SHIFT)
  44/* get offset address from aligned page */
  45#define aligned_page_offset(page)       ((page) << PAGE_SHIFT)
  46
  47#if PAGE_SIZE == 4096
  48/* page size == EMUPAGESIZE */
  49/* fill PTB entrie(s) corresponding to page with addr */
  50#define set_ptb_entry(emu,page,addr)    __set_ptb_entry(emu,page,addr)
  51/* fill PTB entrie(s) corresponding to page with silence pointer */
  52#define set_silent_ptb(emu,page)        __set_ptb_entry(emu,page,emu->silent_page.addr)
  53#else
  54/* fill PTB entries -- we need to fill UNIT_PAGES entries */
  55static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
  56{
  57        int i;
  58        page *= UNIT_PAGES;
  59        for (i = 0; i < UNIT_PAGES; i++, page++) {
  60                __set_ptb_entry(emu, page, addr);
  61                addr += EMUPAGESIZE;
  62        }
  63}
  64static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
  65{
  66        int i;
  67        page *= UNIT_PAGES;
  68        for (i = 0; i < UNIT_PAGES; i++, page++)
  69                /* do not increment ptr */
  70                __set_ptb_entry(emu, page, emu->silent_page.addr);
  71}
  72#endif /* PAGE_SIZE */
  73
  74
  75/*
  76 */
  77static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  78static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  79
  80#define get_emu10k1_memblk(l,member)    list_entry(l, struct snd_emu10k1_memblk, member)
  81
  82
  83/* initialize emu10k1 part */
  84static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
  85{
  86        blk->mapped_page = -1;
  87        INIT_LIST_HEAD(&blk->mapped_link);
  88        INIT_LIST_HEAD(&blk->mapped_order_link);
  89        blk->map_locked = 0;
  90
  91        blk->first_page = get_aligned_page(blk->mem.offset);
  92        blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
  93        blk->pages = blk->last_page - blk->first_page + 1;
  94}
  95
  96/*
  97 * search empty region on PTB with the given size
  98 *
  99 * if an empty region is found, return the page and store the next mapped block
 100 * in nextp
 101 * if not found, return a negative error code.
 102 */
 103static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
 104{
 105        int page = 0, found_page = -ENOMEM;
 106        int max_size = npages;
 107        int size;
 108        struct list_head *candidate = &emu->mapped_link_head;
 109        struct list_head *pos;
 110
 111        list_for_each (pos, &emu->mapped_link_head) {
 112                struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
 113                if (blk->mapped_page < 0)
 114                        continue;
 115                size = blk->mapped_page - page;
 116                if (size == npages) {
 117                        *nextp = pos;
 118                        return page;
 119                }
 120                else if (size > max_size) {
 121                        /* we look for the maximum empty hole */
 122                        max_size = size;
 123                        candidate = pos;
 124                        found_page = page;
 125                }
 126                page = blk->mapped_page + blk->pages;
 127        }
 128        size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
 129        if (size >= max_size) {
 130                *nextp = pos;
 131                return page;
 132        }
 133        *nextp = candidate;
 134        return found_page;
 135}
 136
 137/*
 138 * map a memory block onto emu10k1's PTB
 139 *
 140 * call with memblk_lock held
 141 */
 142static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 143{
 144        int page, pg;
 145        struct list_head *next;
 146
 147        page = search_empty_map_area(emu, blk->pages, &next);
 148        if (page < 0) /* not found */
 149                return page;
 150        /* insert this block in the proper position of mapped list */
 151        list_add_tail(&blk->mapped_link, next);
 152        /* append this as a newest block in order list */
 153        list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
 154        blk->mapped_page = page;
 155        /* fill PTB */
 156        for (pg = blk->first_page; pg <= blk->last_page; pg++) {
 157                set_ptb_entry(emu, page, emu->page_addr_table[pg]);
 158                page++;
 159        }
 160        return 0;
 161}
 162
 163/*
 164 * unmap the block
 165 * return the size of resultant empty pages
 166 *
 167 * call with memblk_lock held
 168 */
 169static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 170{
 171        int start_page, end_page, mpage, pg;
 172        struct list_head *p;
 173        struct snd_emu10k1_memblk *q;
 174
 175        /* calculate the expected size of empty region */
 176        if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
 177                q = get_emu10k1_memblk(p, mapped_link);
 178                start_page = q->mapped_page + q->pages;
 179        } else
 180                start_page = 0;
 181        if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
 182                q = get_emu10k1_memblk(p, mapped_link);
 183                end_page = q->mapped_page;
 184        } else
 185                end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
 186
 187        /* remove links */
 188        list_del(&blk->mapped_link);
 189        list_del(&blk->mapped_order_link);
 190        /* clear PTB */
 191        mpage = blk->mapped_page;
 192        for (pg = blk->first_page; pg <= blk->last_page; pg++) {
 193                set_silent_ptb(emu, mpage);
 194                mpage++;
 195        }
 196        blk->mapped_page = -1;
 197        return end_page - start_page; /* return the new empty size */
 198}
 199
 200/*
 201 * search empty pages with the given size, and create a memory block
 202 *
 203 * unlike synth_alloc the memory block is aligned to the page start
 204 */
 205static struct snd_emu10k1_memblk *
 206search_empty(struct snd_emu10k1 *emu, int size)
 207{
 208        struct list_head *p;
 209        struct snd_emu10k1_memblk *blk;
 210        int page, psize;
 211
 212        psize = get_aligned_page(size + PAGE_SIZE -1);
 213        page = 0;
 214        list_for_each(p, &emu->memhdr->block) {
 215                blk = get_emu10k1_memblk(p, mem.list);
 216                if (page + psize <= blk->first_page)
 217                        goto __found_pages;
 218                page = blk->last_page + 1;
 219        }
 220        if (page + psize > emu->max_cache_pages)
 221                return NULL;
 222
 223__found_pages:
 224        /* create a new memory block */
 225        blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
 226        if (blk == NULL)
 227                return NULL;
 228        blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
 229        emu10k1_memblk_init(blk);
 230        return blk;
 231}
 232
 233
 234/*
 235 * check if the given pointer is valid for pages
 236 */
 237static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
 238{
 239        if (addr & ~emu->dma_mask) {
 240                dev_err(emu->card->dev,
 241                        "max memory size is 0x%lx (addr = 0x%lx)!!\n",
 242                        emu->dma_mask, (unsigned long)addr);
 243                return 0;
 244        }
 245        if (addr & (EMUPAGESIZE-1)) {
 246                dev_err(emu->card->dev, "page is not aligned\n");
 247                return 0;
 248        }
 249        return 1;
 250}
 251
 252/*
 253 * map the given memory block on PTB.
 254 * if the block is already mapped, update the link order.
 255 * if no empty pages are found, tries to release unused memory blocks
 256 * and retry the mapping.
 257 */
 258int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 259{
 260        int err;
 261        int size;
 262        struct list_head *p, *nextp;
 263        struct snd_emu10k1_memblk *deleted;
 264        unsigned long flags;
 265
 266        spin_lock_irqsave(&emu->memblk_lock, flags);
 267        if (blk->mapped_page >= 0) {
 268                /* update order link */
 269                list_move_tail(&blk->mapped_order_link,
 270                               &emu->mapped_order_link_head);
 271                spin_unlock_irqrestore(&emu->memblk_lock, flags);
 272                return 0;
 273        }
 274        if ((err = map_memblk(emu, blk)) < 0) {
 275                /* no enough page - try to unmap some blocks */
 276                /* starting from the oldest block */
 277                p = emu->mapped_order_link_head.next;
 278                for (; p != &emu->mapped_order_link_head; p = nextp) {
 279                        nextp = p->next;
 280                        deleted = get_emu10k1_memblk(p, mapped_order_link);
 281                        if (deleted->map_locked)
 282                                continue;
 283                        size = unmap_memblk(emu, deleted);
 284                        if (size >= blk->pages) {
 285                                /* ok the empty region is enough large */
 286                                err = map_memblk(emu, blk);
 287                                break;
 288                        }
 289                }
 290        }
 291        spin_unlock_irqrestore(&emu->memblk_lock, flags);
 292        return err;
 293}
 294
 295EXPORT_SYMBOL(snd_emu10k1_memblk_map);
 296
 297/*
 298 * page allocation for DMA
 299 */
 300struct snd_util_memblk *
 301snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
 302{
 303        struct snd_pcm_runtime *runtime = substream->runtime;
 304        struct snd_util_memhdr *hdr;
 305        struct snd_emu10k1_memblk *blk;
 306        int page, err, idx;
 307
 308        if (snd_BUG_ON(!emu))
 309                return NULL;
 310        if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
 311                       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
 312                return NULL;
 313        hdr = emu->memhdr;
 314        if (snd_BUG_ON(!hdr))
 315                return NULL;
 316
 317        idx = runtime->period_size >= runtime->buffer_size ?
 318                                        (emu->delay_pcm_irq * 2) : 0;
 319        mutex_lock(&hdr->block_mutex);
 320        blk = search_empty(emu, runtime->dma_bytes + idx);
 321        if (blk == NULL) {
 322                mutex_unlock(&hdr->block_mutex);
 323                return NULL;
 324        }
 325        /* fill buffer addresses but pointers are not stored so that
 326         * snd_free_pci_page() is not called in in synth_free()
 327         */
 328        idx = 0;
 329        for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
 330                unsigned long ofs = idx << PAGE_SHIFT;
 331                dma_addr_t addr;
 332                if (ofs >= runtime->dma_bytes)
 333                        addr = emu->silent_page.addr;
 334                else
 335                        addr = snd_pcm_sgbuf_get_addr(substream, ofs);
 336                if (! is_valid_page(emu, addr)) {
 337                        dev_err(emu->card->dev,
 338                                "emu: failure page = %d\n", idx);
 339                        mutex_unlock(&hdr->block_mutex);
 340                        return NULL;
 341                }
 342                emu->page_addr_table[page] = addr;
 343                emu->page_ptr_table[page] = NULL;
 344        }
 345
 346        /* set PTB entries */
 347        blk->map_locked = 1; /* do not unmap this block! */
 348        err = snd_emu10k1_memblk_map(emu, blk);
 349        if (err < 0) {
 350                __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
 351                mutex_unlock(&hdr->block_mutex);
 352                return NULL;
 353        }
 354        mutex_unlock(&hdr->block_mutex);
 355        return (struct snd_util_memblk *)blk;
 356}
 357
 358
 359/*
 360 * release DMA buffer from page table
 361 */
 362int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
 363{
 364        if (snd_BUG_ON(!emu || !blk))
 365                return -EINVAL;
 366        return snd_emu10k1_synth_free(emu, blk);
 367}
 368
 369
 370/*
 371 * memory allocation using multiple pages (for synth)
 372 * Unlike the DMA allocation above, non-contiguous pages are assined.
 373 */
 374
 375/*
 376 * allocate a synth sample area
 377 */
 378struct snd_util_memblk *
 379snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
 380{
 381        struct snd_emu10k1_memblk *blk;
 382        struct snd_util_memhdr *hdr = hw->memhdr; 
 383
 384        mutex_lock(&hdr->block_mutex);
 385        blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
 386        if (blk == NULL) {
 387                mutex_unlock(&hdr->block_mutex);
 388                return NULL;
 389        }
 390        if (synth_alloc_pages(hw, blk)) {
 391                __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
 392                mutex_unlock(&hdr->block_mutex);
 393                return NULL;
 394        }
 395        snd_emu10k1_memblk_map(hw, blk);
 396        mutex_unlock(&hdr->block_mutex);
 397        return (struct snd_util_memblk *)blk;
 398}
 399
 400EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
 401
 402/*
 403 * free a synth sample area
 404 */
 405int
 406snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
 407{
 408        struct snd_util_memhdr *hdr = emu->memhdr; 
 409        struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
 410        unsigned long flags;
 411
 412        mutex_lock(&hdr->block_mutex);
 413        spin_lock_irqsave(&emu->memblk_lock, flags);
 414        if (blk->mapped_page >= 0)
 415                unmap_memblk(emu, blk);
 416        spin_unlock_irqrestore(&emu->memblk_lock, flags);
 417        synth_free_pages(emu, blk);
 418         __snd_util_mem_free(hdr, memblk);
 419        mutex_unlock(&hdr->block_mutex);
 420        return 0;
 421}
 422
 423EXPORT_SYMBOL(snd_emu10k1_synth_free);
 424
 425/* check new allocation range */
 426static void get_single_page_range(struct snd_util_memhdr *hdr,
 427                                  struct snd_emu10k1_memblk *blk,
 428                                  int *first_page_ret, int *last_page_ret)
 429{
 430        struct list_head *p;
 431        struct snd_emu10k1_memblk *q;
 432        int first_page, last_page;
 433        first_page = blk->first_page;
 434        if ((p = blk->mem.list.prev) != &hdr->block) {
 435                q = get_emu10k1_memblk(p, mem.list);
 436                if (q->last_page == first_page)
 437                        first_page++;  /* first page was already allocated */
 438        }
 439        last_page = blk->last_page;
 440        if ((p = blk->mem.list.next) != &hdr->block) {
 441                q = get_emu10k1_memblk(p, mem.list);
 442                if (q->first_page == last_page)
 443                        last_page--; /* last page was already allocated */
 444        }
 445        *first_page_ret = first_page;
 446        *last_page_ret = last_page;
 447}
 448
 449/* release allocated pages */
 450static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
 451                               int last_page)
 452{
 453        int page;
 454
 455        for (page = first_page; page <= last_page; page++) {
 456                free_page((unsigned long)emu->page_ptr_table[page]);
 457                emu->page_addr_table[page] = 0;
 458                emu->page_ptr_table[page] = NULL;
 459        }
 460}
 461
 462/*
 463 * allocate kernel pages
 464 */
 465static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 466{
 467        int page, first_page, last_page;
 468
 469        emu10k1_memblk_init(blk);
 470        get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
 471        /* allocate kernel pages */
 472        for (page = first_page; page <= last_page; page++) {
 473                /* first try to allocate from <4GB zone */
 474                struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
 475                                            __GFP_NOWARN);
 476                if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
 477                        if (p)
 478                                __free_page(p);
 479                        /* try to allocate from <16MB zone */
 480                        p = alloc_page(GFP_ATOMIC | GFP_DMA |
 481                                       __GFP_NORETRY | /* no OOM-killer */
 482                                       __GFP_NOWARN);
 483                }
 484                if (!p) {
 485                        __synth_free_pages(emu, first_page, page - 1);
 486                        return -ENOMEM;
 487                }
 488                emu->page_addr_table[page] = page_to_phys(p);
 489                emu->page_ptr_table[page] = page_address(p);
 490        }
 491        return 0;
 492}
 493
 494/*
 495 * free pages
 496 */
 497static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 498{
 499        int first_page, last_page;
 500
 501        get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
 502        __synth_free_pages(emu, first_page, last_page);
 503        return 0;
 504}
 505
 506/* calculate buffer pointer from offset address */
 507static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
 508{
 509        char *ptr;
 510        if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
 511                return NULL;
 512        ptr = emu->page_ptr_table[page];
 513        if (! ptr) {
 514                dev_err(emu->card->dev,
 515                        "access to NULL ptr: page = %d\n", page);
 516                return NULL;
 517        }
 518        ptr += offset & (PAGE_SIZE - 1);
 519        return (void*)ptr;
 520}
 521
 522/*
 523 * bzero(blk + offset, size)
 524 */
 525int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
 526                            int offset, int size)
 527{
 528        int page, nextofs, end_offset, temp, temp1;
 529        void *ptr;
 530        struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
 531
 532        offset += blk->offset & (PAGE_SIZE - 1);
 533        end_offset = offset + size;
 534        page = get_aligned_page(offset);
 535        do {
 536                nextofs = aligned_page_offset(page + 1);
 537                temp = nextofs - offset;
 538                temp1 = end_offset - offset;
 539                if (temp1 < temp)
 540                        temp = temp1;
 541                ptr = offset_ptr(emu, page + p->first_page, offset);
 542                if (ptr)
 543                        memset(ptr, 0, temp);
 544                offset = nextofs;
 545                page++;
 546        } while (offset < end_offset);
 547        return 0;
 548}
 549
 550EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
 551
 552/*
 553 * copy_from_user(blk + offset, data, size)
 554 */
 555int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
 556                                     int offset, const char __user *data, int size)
 557{
 558        int page, nextofs, end_offset, temp, temp1;
 559        void *ptr;
 560        struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
 561
 562        offset += blk->offset & (PAGE_SIZE - 1);
 563        end_offset = offset + size;
 564        page = get_aligned_page(offset);
 565        do {
 566                nextofs = aligned_page_offset(page + 1);
 567                temp = nextofs - offset;
 568                temp1 = end_offset - offset;
 569                if (temp1 < temp)
 570                        temp = temp1;
 571                ptr = offset_ptr(emu, page + p->first_page, offset);
 572                if (ptr && copy_from_user(ptr, data, temp))
 573                        return -EFAULT;
 574                offset = nextofs;
 575                data += temp;
 576                page++;
 577        } while (offset < end_offset);
 578        return 0;
 579}
 580
 581EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
 582