linux/sound/pci/emu10k1/memory.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   4 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
   5 *
   6 *  EMU10K1 memory page allocation (PTB area)
   7 */
   8
   9#include <linux/pci.h>
  10#include <linux/gfp.h>
  11#include <linux/time.h>
  12#include <linux/mutex.h>
  13#include <linux/export.h>
  14
  15#include <sound/core.h>
  16#include <sound/emu10k1.h>
  17
  18/* page arguments of these two macros are Emu page (4096 bytes), not like
  19 * aligned pages in others
  20 */
  21#define __set_ptb_entry(emu,page,addr) \
  22        (((__le32 *)(emu)->ptb_pages.area)[page] = \
  23         cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
  24#define __get_ptb_entry(emu, page) \
  25        (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
  26
  27#define UNIT_PAGES              (PAGE_SIZE / EMUPAGESIZE)
  28#define MAX_ALIGN_PAGES0                (MAXPAGES0 / UNIT_PAGES)
  29#define MAX_ALIGN_PAGES1                (MAXPAGES1 / UNIT_PAGES)
  30/* get aligned page from offset address */
  31#define get_aligned_page(offset)        ((offset) >> PAGE_SHIFT)
  32/* get offset address from aligned page */
  33#define aligned_page_offset(page)       ((page) << PAGE_SHIFT)
  34
  35#if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
  36/* fill PTB entrie(s) corresponding to page with addr */
  37#define set_ptb_entry(emu,page,addr)    __set_ptb_entry(emu,page,addr)
  38/* fill PTB entrie(s) corresponding to page with silence pointer */
  39#define set_silent_ptb(emu,page)        __set_ptb_entry(emu,page,emu->silent_page.addr)
  40#else
  41/* fill PTB entries -- we need to fill UNIT_PAGES entries */
  42static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
  43{
  44        int i;
  45        page *= UNIT_PAGES;
  46        for (i = 0; i < UNIT_PAGES; i++, page++) {
  47                __set_ptb_entry(emu, page, addr);
  48                dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
  49                        (unsigned int)__get_ptb_entry(emu, page));
  50                addr += EMUPAGESIZE;
  51        }
  52}
  53static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
  54{
  55        int i;
  56        page *= UNIT_PAGES;
  57        for (i = 0; i < UNIT_PAGES; i++, page++) {
  58                /* do not increment ptr */
  59                __set_ptb_entry(emu, page, emu->silent_page.addr);
  60                dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
  61                        page, (unsigned int)__get_ptb_entry(emu, page));
  62        }
  63}
  64#endif /* PAGE_SIZE */
  65
  66
  67/*
  68 */
  69static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  70static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  71
  72#define get_emu10k1_memblk(l,member)    list_entry(l, struct snd_emu10k1_memblk, member)
  73
  74
  75/* initialize emu10k1 part */
  76static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
  77{
  78        blk->mapped_page = -1;
  79        INIT_LIST_HEAD(&blk->mapped_link);
  80        INIT_LIST_HEAD(&blk->mapped_order_link);
  81        blk->map_locked = 0;
  82
  83        blk->first_page = get_aligned_page(blk->mem.offset);
  84        blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
  85        blk->pages = blk->last_page - blk->first_page + 1;
  86}
  87
  88/*
  89 * search empty region on PTB with the given size
  90 *
  91 * if an empty region is found, return the page and store the next mapped block
  92 * in nextp
  93 * if not found, return a negative error code.
  94 */
  95static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
  96{
  97        int page = 1, found_page = -ENOMEM;
  98        int max_size = npages;
  99        int size;
 100        struct list_head *candidate = &emu->mapped_link_head;
 101        struct list_head *pos;
 102
 103        list_for_each (pos, &emu->mapped_link_head) {
 104                struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
 105                if (blk->mapped_page < 0)
 106                        continue;
 107                size = blk->mapped_page - page;
 108                if (size == npages) {
 109                        *nextp = pos;
 110                        return page;
 111                }
 112                else if (size > max_size) {
 113                        /* we look for the maximum empty hole */
 114                        max_size = size;
 115                        candidate = pos;
 116                        found_page = page;
 117                }
 118                page = blk->mapped_page + blk->pages;
 119        }
 120        size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
 121        if (size >= max_size) {
 122                *nextp = pos;
 123                return page;
 124        }
 125        *nextp = candidate;
 126        return found_page;
 127}
 128
 129/*
 130 * map a memory block onto emu10k1's PTB
 131 *
 132 * call with memblk_lock held
 133 */
 134static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 135{
 136        int page, pg;
 137        struct list_head *next;
 138
 139        page = search_empty_map_area(emu, blk->pages, &next);
 140        if (page < 0) /* not found */
 141                return page;
 142        if (page == 0) {
 143                dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
 144                return -EINVAL;
 145        }
 146        /* insert this block in the proper position of mapped list */
 147        list_add_tail(&blk->mapped_link, next);
 148        /* append this as a newest block in order list */
 149        list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
 150        blk->mapped_page = page;
 151        /* fill PTB */
 152        for (pg = blk->first_page; pg <= blk->last_page; pg++) {
 153                set_ptb_entry(emu, page, emu->page_addr_table[pg]);
 154                page++;
 155        }
 156        return 0;
 157}
 158
 159/*
 160 * unmap the block
 161 * return the size of resultant empty pages
 162 *
 163 * call with memblk_lock held
 164 */
 165static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 166{
 167        int start_page, end_page, mpage, pg;
 168        struct list_head *p;
 169        struct snd_emu10k1_memblk *q;
 170
 171        /* calculate the expected size of empty region */
 172        p = blk->mapped_link.prev;
 173        if (p != &emu->mapped_link_head) {
 174                q = get_emu10k1_memblk(p, mapped_link);
 175                start_page = q->mapped_page + q->pages;
 176        } else {
 177                start_page = 1;
 178        }
 179        p = blk->mapped_link.next;
 180        if (p != &emu->mapped_link_head) {
 181                q = get_emu10k1_memblk(p, mapped_link);
 182                end_page = q->mapped_page;
 183        } else {
 184                end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
 185        }
 186
 187        /* remove links */
 188        list_del(&blk->mapped_link);
 189        list_del(&blk->mapped_order_link);
 190        /* clear PTB */
 191        mpage = blk->mapped_page;
 192        for (pg = blk->first_page; pg <= blk->last_page; pg++) {
 193                set_silent_ptb(emu, mpage);
 194                mpage++;
 195        }
 196        blk->mapped_page = -1;
 197        return end_page - start_page; /* return the new empty size */
 198}
 199
 200/*
 201 * search empty pages with the given size, and create a memory block
 202 *
 203 * unlike synth_alloc the memory block is aligned to the page start
 204 */
 205static struct snd_emu10k1_memblk *
 206search_empty(struct snd_emu10k1 *emu, int size)
 207{
 208        struct list_head *p;
 209        struct snd_emu10k1_memblk *blk;
 210        int page, psize;
 211
 212        psize = get_aligned_page(size + PAGE_SIZE -1);
 213        page = 0;
 214        list_for_each(p, &emu->memhdr->block) {
 215                blk = get_emu10k1_memblk(p, mem.list);
 216                if (page + psize <= blk->first_page)
 217                        goto __found_pages;
 218                page = blk->last_page + 1;
 219        }
 220        if (page + psize > emu->max_cache_pages)
 221                return NULL;
 222
 223__found_pages:
 224        /* create a new memory block */
 225        blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
 226        if (blk == NULL)
 227                return NULL;
 228        blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
 229        emu10k1_memblk_init(blk);
 230        return blk;
 231}
 232
 233
 234/*
 235 * check if the given pointer is valid for pages
 236 */
 237static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
 238{
 239        if (addr & ~emu->dma_mask) {
 240                dev_err_ratelimited(emu->card->dev,
 241                        "max memory size is 0x%lx (addr = 0x%lx)!!\n",
 242                        emu->dma_mask, (unsigned long)addr);
 243                return 0;
 244        }
 245        if (addr & (EMUPAGESIZE-1)) {
 246                dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
 247                return 0;
 248        }
 249        return 1;
 250}
 251
 252/*
 253 * map the given memory block on PTB.
 254 * if the block is already mapped, update the link order.
 255 * if no empty pages are found, tries to release unused memory blocks
 256 * and retry the mapping.
 257 */
 258int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 259{
 260        int err;
 261        int size;
 262        struct list_head *p, *nextp;
 263        struct snd_emu10k1_memblk *deleted;
 264        unsigned long flags;
 265
 266        spin_lock_irqsave(&emu->memblk_lock, flags);
 267        if (blk->mapped_page >= 0) {
 268                /* update order link */
 269                list_move_tail(&blk->mapped_order_link,
 270                               &emu->mapped_order_link_head);
 271                spin_unlock_irqrestore(&emu->memblk_lock, flags);
 272                return 0;
 273        }
 274        err = map_memblk(emu, blk);
 275        if (err < 0) {
 276                /* no enough page - try to unmap some blocks */
 277                /* starting from the oldest block */
 278                p = emu->mapped_order_link_head.next;
 279                for (; p != &emu->mapped_order_link_head; p = nextp) {
 280                        nextp = p->next;
 281                        deleted = get_emu10k1_memblk(p, mapped_order_link);
 282                        if (deleted->map_locked)
 283                                continue;
 284                        size = unmap_memblk(emu, deleted);
 285                        if (size >= blk->pages) {
 286                                /* ok the empty region is enough large */
 287                                err = map_memblk(emu, blk);
 288                                break;
 289                        }
 290                }
 291        }
 292        spin_unlock_irqrestore(&emu->memblk_lock, flags);
 293        return err;
 294}
 295
 296EXPORT_SYMBOL(snd_emu10k1_memblk_map);
 297
 298/*
 299 * page allocation for DMA
 300 */
 301struct snd_util_memblk *
 302snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
 303{
 304        struct snd_pcm_runtime *runtime = substream->runtime;
 305        struct snd_util_memhdr *hdr;
 306        struct snd_emu10k1_memblk *blk;
 307        int page, err, idx;
 308
 309        if (snd_BUG_ON(!emu))
 310                return NULL;
 311        if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
 312                       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
 313                return NULL;
 314        hdr = emu->memhdr;
 315        if (snd_BUG_ON(!hdr))
 316                return NULL;
 317
 318        idx = runtime->period_size >= runtime->buffer_size ?
 319                                        (emu->delay_pcm_irq * 2) : 0;
 320        mutex_lock(&hdr->block_mutex);
 321        blk = search_empty(emu, runtime->dma_bytes + idx);
 322        if (blk == NULL) {
 323                mutex_unlock(&hdr->block_mutex);
 324                return NULL;
 325        }
 326        /* fill buffer addresses but pointers are not stored so that
 327         * snd_free_pci_page() is not called in in synth_free()
 328         */
 329        idx = 0;
 330        for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
 331                unsigned long ofs = idx << PAGE_SHIFT;
 332                dma_addr_t addr;
 333                if (ofs >= runtime->dma_bytes)
 334                        addr = emu->silent_page.addr;
 335                else
 336                        addr = snd_pcm_sgbuf_get_addr(substream, ofs);
 337                if (! is_valid_page(emu, addr)) {
 338                        dev_err_ratelimited(emu->card->dev,
 339                                "emu: failure page = %d\n", idx);
 340                        mutex_unlock(&hdr->block_mutex);
 341                        return NULL;
 342                }
 343                emu->page_addr_table[page] = addr;
 344                emu->page_ptr_table[page] = NULL;
 345        }
 346
 347        /* set PTB entries */
 348        blk->map_locked = 1; /* do not unmap this block! */
 349        err = snd_emu10k1_memblk_map(emu, blk);
 350        if (err < 0) {
 351                __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
 352                mutex_unlock(&hdr->block_mutex);
 353                return NULL;
 354        }
 355        mutex_unlock(&hdr->block_mutex);
 356        return (struct snd_util_memblk *)blk;
 357}
 358
 359
 360/*
 361 * release DMA buffer from page table
 362 */
 363int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
 364{
 365        if (snd_BUG_ON(!emu || !blk))
 366                return -EINVAL;
 367        return snd_emu10k1_synth_free(emu, blk);
 368}
 369
 370/*
 371 * allocate DMA pages, widening the allocation if necessary
 372 *
 373 * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
 374 * this might be needed.
 375 *
 376 * If you modify this function check whether __synth_free_pages() also needs
 377 * changes.
 378 */
 379int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
 380                                        struct snd_dma_buffer *dmab)
 381{
 382        if (emu->iommu_workaround) {
 383                size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
 384                size_t size_real = npages * PAGE_SIZE;
 385
 386                /*
 387                 * The device has been observed to accesses up to 256 extra
 388                 * bytes, but use 1k to be safe.
 389                 */
 390                if (size_real < size + 1024)
 391                        size += PAGE_SIZE;
 392        }
 393
 394        return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
 395                                   &emu->pci->dev, size, dmab);
 396}
 397
 398/*
 399 * memory allocation using multiple pages (for synth)
 400 * Unlike the DMA allocation above, non-contiguous pages are assined.
 401 */
 402
 403/*
 404 * allocate a synth sample area
 405 */
 406struct snd_util_memblk *
 407snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
 408{
 409        struct snd_emu10k1_memblk *blk;
 410        struct snd_util_memhdr *hdr = hw->memhdr; 
 411
 412        mutex_lock(&hdr->block_mutex);
 413        blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
 414        if (blk == NULL) {
 415                mutex_unlock(&hdr->block_mutex);
 416                return NULL;
 417        }
 418        if (synth_alloc_pages(hw, blk)) {
 419                __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
 420                mutex_unlock(&hdr->block_mutex);
 421                return NULL;
 422        }
 423        snd_emu10k1_memblk_map(hw, blk);
 424        mutex_unlock(&hdr->block_mutex);
 425        return (struct snd_util_memblk *)blk;
 426}
 427
 428EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
 429
 430/*
 431 * free a synth sample area
 432 */
 433int
 434snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
 435{
 436        struct snd_util_memhdr *hdr = emu->memhdr; 
 437        struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
 438        unsigned long flags;
 439
 440        mutex_lock(&hdr->block_mutex);
 441        spin_lock_irqsave(&emu->memblk_lock, flags);
 442        if (blk->mapped_page >= 0)
 443                unmap_memblk(emu, blk);
 444        spin_unlock_irqrestore(&emu->memblk_lock, flags);
 445        synth_free_pages(emu, blk);
 446         __snd_util_mem_free(hdr, memblk);
 447        mutex_unlock(&hdr->block_mutex);
 448        return 0;
 449}
 450
 451EXPORT_SYMBOL(snd_emu10k1_synth_free);
 452
 453/* check new allocation range */
 454static void get_single_page_range(struct snd_util_memhdr *hdr,
 455                                  struct snd_emu10k1_memblk *blk,
 456                                  int *first_page_ret, int *last_page_ret)
 457{
 458        struct list_head *p;
 459        struct snd_emu10k1_memblk *q;
 460        int first_page, last_page;
 461        first_page = blk->first_page;
 462        p = blk->mem.list.prev;
 463        if (p != &hdr->block) {
 464                q = get_emu10k1_memblk(p, mem.list);
 465                if (q->last_page == first_page)
 466                        first_page++;  /* first page was already allocated */
 467        }
 468        last_page = blk->last_page;
 469        p = blk->mem.list.next;
 470        if (p != &hdr->block) {
 471                q = get_emu10k1_memblk(p, mem.list);
 472                if (q->first_page == last_page)
 473                        last_page--; /* last page was already allocated */
 474        }
 475        *first_page_ret = first_page;
 476        *last_page_ret = last_page;
 477}
 478
 479/* release allocated pages */
 480static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
 481                               int last_page)
 482{
 483        struct snd_dma_buffer dmab;
 484        int page;
 485
 486        dmab.dev.type = SNDRV_DMA_TYPE_DEV;
 487        dmab.dev.dev = &emu->pci->dev;
 488
 489        for (page = first_page; page <= last_page; page++) {
 490                if (emu->page_ptr_table[page] == NULL)
 491                        continue;
 492                dmab.area = emu->page_ptr_table[page];
 493                dmab.addr = emu->page_addr_table[page];
 494
 495                /*
 496                 * please keep me in sync with logic in
 497                 * snd_emu10k1_alloc_pages_maybe_wider()
 498                 */
 499                dmab.bytes = PAGE_SIZE;
 500                if (emu->iommu_workaround)
 501                        dmab.bytes *= 2;
 502
 503                snd_dma_free_pages(&dmab);
 504                emu->page_addr_table[page] = 0;
 505                emu->page_ptr_table[page] = NULL;
 506        }
 507}
 508
 509/*
 510 * allocate kernel pages
 511 */
 512static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 513{
 514        int page, first_page, last_page;
 515        struct snd_dma_buffer dmab;
 516
 517        emu10k1_memblk_init(blk);
 518        get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
 519        /* allocate kernel pages */
 520        for (page = first_page; page <= last_page; page++) {
 521                if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
 522                                                        &dmab) < 0)
 523                        goto __fail;
 524                if (!is_valid_page(emu, dmab.addr)) {
 525                        snd_dma_free_pages(&dmab);
 526                        goto __fail;
 527                }
 528                emu->page_addr_table[page] = dmab.addr;
 529                emu->page_ptr_table[page] = dmab.area;
 530        }
 531        return 0;
 532
 533__fail:
 534        /* release allocated pages */
 535        last_page = page - 1;
 536        __synth_free_pages(emu, first_page, last_page);
 537
 538        return -ENOMEM;
 539}
 540
 541/*
 542 * free pages
 543 */
 544static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
 545{
 546        int first_page, last_page;
 547
 548        get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
 549        __synth_free_pages(emu, first_page, last_page);
 550        return 0;
 551}
 552
 553/* calculate buffer pointer from offset address */
 554static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
 555{
 556        char *ptr;
 557        if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
 558                return NULL;
 559        ptr = emu->page_ptr_table[page];
 560        if (! ptr) {
 561                dev_err(emu->card->dev,
 562                        "access to NULL ptr: page = %d\n", page);
 563                return NULL;
 564        }
 565        ptr += offset & (PAGE_SIZE - 1);
 566        return (void*)ptr;
 567}
 568
 569/*
 570 * bzero(blk + offset, size)
 571 */
 572int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
 573                            int offset, int size)
 574{
 575        int page, nextofs, end_offset, temp, temp1;
 576        void *ptr;
 577        struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
 578
 579        offset += blk->offset & (PAGE_SIZE - 1);
 580        end_offset = offset + size;
 581        page = get_aligned_page(offset);
 582        do {
 583                nextofs = aligned_page_offset(page + 1);
 584                temp = nextofs - offset;
 585                temp1 = end_offset - offset;
 586                if (temp1 < temp)
 587                        temp = temp1;
 588                ptr = offset_ptr(emu, page + p->first_page, offset);
 589                if (ptr)
 590                        memset(ptr, 0, temp);
 591                offset = nextofs;
 592                page++;
 593        } while (offset < end_offset);
 594        return 0;
 595}
 596
 597EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
 598
 599/*
 600 * copy_from_user(blk + offset, data, size)
 601 */
 602int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
 603                                     int offset, const char __user *data, int size)
 604{
 605        int page, nextofs, end_offset, temp, temp1;
 606        void *ptr;
 607        struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
 608
 609        offset += blk->offset & (PAGE_SIZE - 1);
 610        end_offset = offset + size;
 611        page = get_aligned_page(offset);
 612        do {
 613                nextofs = aligned_page_offset(page + 1);
 614                temp = nextofs - offset;
 615                temp1 = end_offset - offset;
 616                if (temp1 < temp)
 617                        temp = temp1;
 618                ptr = offset_ptr(emu, page + p->first_page, offset);
 619                if (ptr && copy_from_user(ptr, data, temp))
 620                        return -EFAULT;
 621                offset = nextofs;
 622                data += temp;
 623                page++;
 624        } while (offset < end_offset);
 625        return 0;
 626}
 627
 628EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
 629