linux/arch/s390/pci/pci_dma.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 2012
   3 *
   4 * Author(s):
   5 *   Jan Glauber <jang@linux.vnet.ibm.com>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/slab.h>
  10#include <linux/export.h>
  11#include <linux/iommu-helper.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/pci.h>
  14#include <asm/pci_dma.h>
  15
  16static struct kmem_cache *dma_region_table_cache;
  17static struct kmem_cache *dma_page_table_cache;
  18
  19static unsigned long *dma_alloc_cpu_table(void)
  20{
  21        unsigned long *table, *entry;
  22
  23        table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
  24        if (!table)
  25                return NULL;
  26
  27        for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
  28                *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
  29        return table;
  30}
  31
  32static void dma_free_cpu_table(void *table)
  33{
  34        kmem_cache_free(dma_region_table_cache, table);
  35}
  36
  37static unsigned long *dma_alloc_page_table(void)
  38{
  39        unsigned long *table, *entry;
  40
  41        table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
  42        if (!table)
  43                return NULL;
  44
  45        for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
  46                *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
  47        return table;
  48}
  49
  50static void dma_free_page_table(void *table)
  51{
  52        kmem_cache_free(dma_page_table_cache, table);
  53}
  54
  55static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
  56{
  57        unsigned long *sto;
  58
  59        if (reg_entry_isvalid(*entry))
  60                sto = get_rt_sto(*entry);
  61        else {
  62                sto = dma_alloc_cpu_table();
  63                if (!sto)
  64                        return NULL;
  65
  66                set_rt_sto(entry, sto);
  67                validate_rt_entry(entry);
  68                entry_clr_protected(entry);
  69        }
  70        return sto;
  71}
  72
  73static unsigned long *dma_get_page_table_origin(unsigned long *entry)
  74{
  75        unsigned long *pto;
  76
  77        if (reg_entry_isvalid(*entry))
  78                pto = get_st_pto(*entry);
  79        else {
  80                pto = dma_alloc_page_table();
  81                if (!pto)
  82                        return NULL;
  83                set_st_pto(entry, pto);
  84                validate_st_entry(entry);
  85                entry_clr_protected(entry);
  86        }
  87        return pto;
  88}
  89
  90static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
  91{
  92        unsigned long *sto, *pto;
  93        unsigned int rtx, sx, px;
  94
  95        rtx = calc_rtx(dma_addr);
  96        sto = dma_get_seg_table_origin(&rto[rtx]);
  97        if (!sto)
  98                return NULL;
  99
 100        sx = calc_sx(dma_addr);
 101        pto = dma_get_page_table_origin(&sto[sx]);
 102        if (!pto)
 103                return NULL;
 104
 105        px = calc_px(dma_addr);
 106        return &pto[px];
 107}
 108
 109static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
 110                                 dma_addr_t dma_addr, int flags)
 111{
 112        unsigned long *entry;
 113
 114        entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
 115        if (!entry) {
 116                WARN_ON_ONCE(1);
 117                return;
 118        }
 119
 120        if (flags & ZPCI_PTE_INVALID) {
 121                invalidate_pt_entry(entry);
 122                return;
 123        } else {
 124                set_pt_pfaa(entry, page_addr);
 125                validate_pt_entry(entry);
 126        }
 127
 128        if (flags & ZPCI_TABLE_PROTECTED)
 129                entry_set_protected(entry);
 130        else
 131                entry_clr_protected(entry);
 132}
 133
 134static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
 135                            dma_addr_t dma_addr, size_t size, int flags)
 136{
 137        unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 138        u8 *page_addr = (u8 *) (pa & PAGE_MASK);
 139        dma_addr_t start_dma_addr = dma_addr;
 140        unsigned long irq_flags;
 141        int i, rc = 0;
 142
 143        if (!nr_pages)
 144                return -EINVAL;
 145
 146        spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
 147        if (!zdev->dma_table) {
 148                dev_err(&zdev->pdev->dev, "Missing DMA table\n");
 149                goto no_refresh;
 150        }
 151
 152        for (i = 0; i < nr_pages; i++) {
 153                dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
 154                page_addr += PAGE_SIZE;
 155                dma_addr += PAGE_SIZE;
 156        }
 157
 158        /*
 159         * rpcit is not required to establish new translations when previously
 160         * invalid translation-table entries are validated, however it is
 161         * required when altering previously valid entries.
 162         */
 163        if (!zdev->tlb_refresh &&
 164            ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
 165                /*
 166                 * TODO: also need to check that the old entry is indeed INVALID
 167                 * and not only for one page but for the whole range...
 168                 * -> now we WARN_ON in that case but with lazy unmap that
 169                 * needs to be redone!
 170                 */
 171                goto no_refresh;
 172
 173        rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
 174                                   nr_pages * PAGE_SIZE);
 175
 176no_refresh:
 177        spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
 178        return rc;
 179}
 180
 181static void dma_free_seg_table(unsigned long entry)
 182{
 183        unsigned long *sto = get_rt_sto(entry);
 184        int sx;
 185
 186        for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
 187                if (reg_entry_isvalid(sto[sx]))
 188                        dma_free_page_table(get_st_pto(sto[sx]));
 189
 190        dma_free_cpu_table(sto);
 191}
 192
 193static void dma_cleanup_tables(struct zpci_dev *zdev)
 194{
 195        unsigned long *table;
 196        int rtx;
 197
 198        if (!zdev || !zdev->dma_table)
 199                return;
 200
 201        table = zdev->dma_table;
 202        for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
 203                if (reg_entry_isvalid(table[rtx]))
 204                        dma_free_seg_table(table[rtx]);
 205
 206        dma_free_cpu_table(table);
 207        zdev->dma_table = NULL;
 208}
 209
 210static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
 211                                   int size)
 212{
 213        unsigned long boundary_size = 0x1000000;
 214
 215        return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
 216                                start, size, 0, boundary_size, 0);
 217}
 218
 219static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
 220{
 221        unsigned long offset, flags;
 222
 223        spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
 224        offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
 225        if (offset == -1)
 226                offset = __dma_alloc_iommu(zdev, 0, size);
 227
 228        if (offset != -1) {
 229                zdev->next_bit = offset + size;
 230                if (zdev->next_bit >= zdev->iommu_pages)
 231                        zdev->next_bit = 0;
 232        }
 233        spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 234        return offset;
 235}
 236
 237static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
 238{
 239        unsigned long flags;
 240
 241        spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
 242        if (!zdev->iommu_bitmap)
 243                goto out;
 244        bitmap_clear(zdev->iommu_bitmap, offset, size);
 245        if (offset >= zdev->next_bit)
 246                zdev->next_bit = offset + size;
 247out:
 248        spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 249}
 250
 251int dma_set_mask(struct device *dev, u64 mask)
 252{
 253        if (!dev->dma_mask || !dma_supported(dev, mask))
 254                return -EIO;
 255
 256        *dev->dma_mask = mask;
 257        return 0;
 258}
 259EXPORT_SYMBOL_GPL(dma_set_mask);
 260
 261static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 262                                     unsigned long offset, size_t size,
 263                                     enum dma_data_direction direction,
 264                                     struct dma_attrs *attrs)
 265{
 266        struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 267        unsigned long nr_pages, iommu_page_index;
 268        unsigned long pa = page_to_phys(page) + offset;
 269        int flags = ZPCI_PTE_VALID;
 270        dma_addr_t dma_addr;
 271
 272        /* This rounds up number of pages based on size and offset */
 273        nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
 274        iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
 275        if (iommu_page_index == -1)
 276                goto out_err;
 277
 278        /* Use rounded up size */
 279        size = nr_pages * PAGE_SIZE;
 280
 281        dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
 282        if (dma_addr + size > zdev->end_dma) {
 283                dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
 284                         dma_addr, size, zdev->end_dma);
 285                goto out_free;
 286        }
 287
 288        if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
 289                flags |= ZPCI_TABLE_PROTECTED;
 290
 291        if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
 292                atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
 293                return dma_addr + (offset & ~PAGE_MASK);
 294        }
 295
 296out_free:
 297        dma_free_iommu(zdev, iommu_page_index, nr_pages);
 298out_err:
 299        dev_err(dev, "Failed to map addr: %lx\n", pa);
 300        return DMA_ERROR_CODE;
 301}
 302
 303static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
 304                                 size_t size, enum dma_data_direction direction,
 305                                 struct dma_attrs *attrs)
 306{
 307        struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 308        unsigned long iommu_page_index;
 309        int npages;
 310
 311        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
 312        dma_addr = dma_addr & PAGE_MASK;
 313        if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
 314                             ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
 315                dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
 316
 317        atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
 318        iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
 319        dma_free_iommu(zdev, iommu_page_index, npages);
 320}
 321
 322static void *s390_dma_alloc(struct device *dev, size_t size,
 323                            dma_addr_t *dma_handle, gfp_t flag,
 324                            struct dma_attrs *attrs)
 325{
 326        struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 327        struct page *page;
 328        unsigned long pa;
 329        dma_addr_t map;
 330
 331        size = PAGE_ALIGN(size);
 332        page = alloc_pages(flag, get_order(size));
 333        if (!page)
 334                return NULL;
 335
 336        atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
 337        pa = page_to_phys(page);
 338        memset((void *) pa, 0, size);
 339
 340        map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
 341                                 size, DMA_BIDIRECTIONAL, NULL);
 342        if (dma_mapping_error(dev, map)) {
 343                free_pages(pa, get_order(size));
 344                return NULL;
 345        }
 346
 347        if (dma_handle)
 348                *dma_handle = map;
 349        return (void *) pa;
 350}
 351
 352static void s390_dma_free(struct device *dev, size_t size,
 353                          void *pa, dma_addr_t dma_handle,
 354                          struct dma_attrs *attrs)
 355{
 356        s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
 357                             DMA_BIDIRECTIONAL, NULL);
 358        free_pages((unsigned long) pa, get_order(size));
 359}
 360
 361static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 362                           int nr_elements, enum dma_data_direction dir,
 363                           struct dma_attrs *attrs)
 364{
 365        int mapped_elements = 0;
 366        struct scatterlist *s;
 367        int i;
 368
 369        for_each_sg(sg, s, nr_elements, i) {
 370                struct page *page = sg_page(s);
 371                s->dma_address = s390_dma_map_pages(dev, page, s->offset,
 372                                                    s->length, dir, NULL);
 373                if (!dma_mapping_error(dev, s->dma_address)) {
 374                        s->dma_length = s->length;
 375                        mapped_elements++;
 376                } else
 377                        goto unmap;
 378        }
 379out:
 380        return mapped_elements;
 381
 382unmap:
 383        for_each_sg(sg, s, mapped_elements, i) {
 384                if (s->dma_address)
 385                        s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
 386                                             dir, NULL);
 387                s->dma_address = 0;
 388                s->dma_length = 0;
 389        }
 390        mapped_elements = 0;
 391        goto out;
 392}
 393
 394static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 395                              int nr_elements, enum dma_data_direction dir,
 396                              struct dma_attrs *attrs)
 397{
 398        struct scatterlist *s;
 399        int i;
 400
 401        for_each_sg(sg, s, nr_elements, i) {
 402                s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
 403                s->dma_address = 0;
 404                s->dma_length = 0;
 405        }
 406}
 407
 408int zpci_dma_init_device(struct zpci_dev *zdev)
 409{
 410        unsigned int bitmap_order;
 411        int rc;
 412
 413        spin_lock_init(&zdev->iommu_bitmap_lock);
 414        spin_lock_init(&zdev->dma_table_lock);
 415
 416        zdev->dma_table = dma_alloc_cpu_table();
 417        if (!zdev->dma_table) {
 418                rc = -ENOMEM;
 419                goto out_clean;
 420        }
 421
 422        zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
 423        zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
 424        bitmap_order = get_order(zdev->iommu_pages / 8);
 425        pr_info("iommu_size: 0x%lx  iommu_pages: 0x%lx  bitmap_order: %i\n",
 426                 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
 427
 428        zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
 429                                                       bitmap_order);
 430        if (!zdev->iommu_bitmap) {
 431                rc = -ENOMEM;
 432                goto out_reg;
 433        }
 434
 435        rc = zpci_register_ioat(zdev,
 436                                0,
 437                                zdev->start_dma + PAGE_OFFSET,
 438                                zdev->start_dma + zdev->iommu_size - 1,
 439                                (u64) zdev->dma_table);
 440        if (rc)
 441                goto out_reg;
 442        return 0;
 443
 444out_reg:
 445        dma_free_cpu_table(zdev->dma_table);
 446out_clean:
 447        return rc;
 448}
 449
 450void zpci_dma_exit_device(struct zpci_dev *zdev)
 451{
 452        zpci_unregister_ioat(zdev, 0);
 453        dma_cleanup_tables(zdev);
 454        free_pages((unsigned long) zdev->iommu_bitmap,
 455                   get_order(zdev->iommu_pages / 8));
 456        zdev->iommu_bitmap = NULL;
 457        zdev->next_bit = 0;
 458}
 459
 460static int __init dma_alloc_cpu_table_caches(void)
 461{
 462        dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
 463                                        ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
 464                                        0, NULL);
 465        if (!dma_region_table_cache)
 466                return -ENOMEM;
 467
 468        dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
 469                                        ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
 470                                        0, NULL);
 471        if (!dma_page_table_cache) {
 472                kmem_cache_destroy(dma_region_table_cache);
 473                return -ENOMEM;
 474        }
 475        return 0;
 476}
 477
 478int __init zpci_dma_init(void)
 479{
 480        return dma_alloc_cpu_table_caches();
 481}
 482
 483void zpci_dma_exit(void)
 484{
 485        kmem_cache_destroy(dma_page_table_cache);
 486        kmem_cache_destroy(dma_region_table_cache);
 487}
 488
 489#define PREALLOC_DMA_DEBUG_ENTRIES      (1 << 16)
 490
 491static int __init dma_debug_do_init(void)
 492{
 493        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 494        return 0;
 495}
 496fs_initcall(dma_debug_do_init);
 497
 498struct dma_map_ops s390_dma_ops = {
 499        .alloc          = s390_dma_alloc,
 500        .free           = s390_dma_free,
 501        .map_sg         = s390_dma_map_sg,
 502        .unmap_sg       = s390_dma_unmap_sg,
 503        .map_page       = s390_dma_map_pages,
 504        .unmap_page     = s390_dma_unmap_pages,
 505        /* if we support direct DMA this must be conditional */
 506        .is_phys        = 0,
 507        /* dma_supported is unconditionally true without a callback */
 508};
 509EXPORT_SYMBOL_GPL(s390_dma_ops);
 510