linux/arch/s390/pci/pci_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2012
   4 *
   5 * Author(s):
   6 *   Jan Glauber <jang@linux.vnet.ibm.com>
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/export.h>
  12#include <linux/iommu-helper.h>
  13#include <linux/dma-map-ops.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pci.h>
  16#include <asm/pci_dma.h>
  17
  18static struct kmem_cache *dma_region_table_cache;
  19static struct kmem_cache *dma_page_table_cache;
  20static int s390_iommu_strict;
  21static u64 s390_iommu_aperture;
  22static u32 s390_iommu_aperture_factor = 1;
  23
  24static int zpci_refresh_global(struct zpci_dev *zdev)
  25{
  26        return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
  27                                  zdev->iommu_pages * PAGE_SIZE);
  28}
  29
  30unsigned long *dma_alloc_cpu_table(void)
  31{
  32        unsigned long *table, *entry;
  33
  34        table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
  35        if (!table)
  36                return NULL;
  37
  38        for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
  39                *entry = ZPCI_TABLE_INVALID;
  40        return table;
  41}
  42
  43static void dma_free_cpu_table(void *table)
  44{
  45        kmem_cache_free(dma_region_table_cache, table);
  46}
  47
  48static unsigned long *dma_alloc_page_table(void)
  49{
  50        unsigned long *table, *entry;
  51
  52        table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
  53        if (!table)
  54                return NULL;
  55
  56        for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
  57                *entry = ZPCI_PTE_INVALID;
  58        return table;
  59}
  60
  61static void dma_free_page_table(void *table)
  62{
  63        kmem_cache_free(dma_page_table_cache, table);
  64}
  65
  66static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
  67{
  68        unsigned long *sto;
  69
  70        if (reg_entry_isvalid(*entry))
  71                sto = get_rt_sto(*entry);
  72        else {
  73                sto = dma_alloc_cpu_table();
  74                if (!sto)
  75                        return NULL;
  76
  77                set_rt_sto(entry, sto);
  78                validate_rt_entry(entry);
  79                entry_clr_protected(entry);
  80        }
  81        return sto;
  82}
  83
  84static unsigned long *dma_get_page_table_origin(unsigned long *entry)
  85{
  86        unsigned long *pto;
  87
  88        if (reg_entry_isvalid(*entry))
  89                pto = get_st_pto(*entry);
  90        else {
  91                pto = dma_alloc_page_table();
  92                if (!pto)
  93                        return NULL;
  94                set_st_pto(entry, pto);
  95                validate_st_entry(entry);
  96                entry_clr_protected(entry);
  97        }
  98        return pto;
  99}
 100
 101unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
 102{
 103        unsigned long *sto, *pto;
 104        unsigned int rtx, sx, px;
 105
 106        rtx = calc_rtx(dma_addr);
 107        sto = dma_get_seg_table_origin(&rto[rtx]);
 108        if (!sto)
 109                return NULL;
 110
 111        sx = calc_sx(dma_addr);
 112        pto = dma_get_page_table_origin(&sto[sx]);
 113        if (!pto)
 114                return NULL;
 115
 116        px = calc_px(dma_addr);
 117        return &pto[px];
 118}
 119
 120void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
 121{
 122        if (flags & ZPCI_PTE_INVALID) {
 123                invalidate_pt_entry(entry);
 124        } else {
 125                set_pt_pfaa(entry, page_addr);
 126                validate_pt_entry(entry);
 127        }
 128
 129        if (flags & ZPCI_TABLE_PROTECTED)
 130                entry_set_protected(entry);
 131        else
 132                entry_clr_protected(entry);
 133}
 134
 135static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
 136                              dma_addr_t dma_addr, size_t size, int flags)
 137{
 138        unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 139        u8 *page_addr = (u8 *) (pa & PAGE_MASK);
 140        unsigned long irq_flags;
 141        unsigned long *entry;
 142        int i, rc = 0;
 143
 144        if (!nr_pages)
 145                return -EINVAL;
 146
 147        spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
 148        if (!zdev->dma_table) {
 149                rc = -EINVAL;
 150                goto out_unlock;
 151        }
 152
 153        for (i = 0; i < nr_pages; i++) {
 154                entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
 155                if (!entry) {
 156                        rc = -ENOMEM;
 157                        goto undo_cpu_trans;
 158                }
 159                dma_update_cpu_trans(entry, page_addr, flags);
 160                page_addr += PAGE_SIZE;
 161                dma_addr += PAGE_SIZE;
 162        }
 163
 164undo_cpu_trans:
 165        if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
 166                flags = ZPCI_PTE_INVALID;
 167                while (i-- > 0) {
 168                        page_addr -= PAGE_SIZE;
 169                        dma_addr -= PAGE_SIZE;
 170                        entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
 171                        if (!entry)
 172                                break;
 173                        dma_update_cpu_trans(entry, page_addr, flags);
 174                }
 175        }
 176out_unlock:
 177        spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
 178        return rc;
 179}
 180
 181static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
 182                           size_t size, int flags)
 183{
 184        unsigned long irqflags;
 185        int ret;
 186
 187        /*
 188         * With zdev->tlb_refresh == 0, rpcit is not required to establish new
 189         * translations when previously invalid translation-table entries are
 190         * validated. With lazy unmap, rpcit is skipped for previously valid
 191         * entries, but a global rpcit is then required before any address can
 192         * be re-used, i.e. after each iommu bitmap wrap-around.
 193         */
 194        if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
 195                if (!zdev->tlb_refresh)
 196                        return 0;
 197        } else {
 198                if (!s390_iommu_strict)
 199                        return 0;
 200        }
 201
 202        ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
 203                                 PAGE_ALIGN(size));
 204        if (ret == -ENOMEM && !s390_iommu_strict) {
 205                /* enable the hypervisor to free some resources */
 206                if (zpci_refresh_global(zdev))
 207                        goto out;
 208
 209                spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
 210                bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
 211                              zdev->lazy_bitmap, zdev->iommu_pages);
 212                bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
 213                spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
 214                ret = 0;
 215        }
 216out:
 217        return ret;
 218}
 219
 220static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
 221                            dma_addr_t dma_addr, size_t size, int flags)
 222{
 223        int rc;
 224
 225        rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
 226        if (rc)
 227                return rc;
 228
 229        rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
 230        if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
 231                __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
 232
 233        return rc;
 234}
 235
 236void dma_free_seg_table(unsigned long entry)
 237{
 238        unsigned long *sto = get_rt_sto(entry);
 239        int sx;
 240
 241        for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
 242                if (reg_entry_isvalid(sto[sx]))
 243                        dma_free_page_table(get_st_pto(sto[sx]));
 244
 245        dma_free_cpu_table(sto);
 246}
 247
 248void dma_cleanup_tables(unsigned long *table)
 249{
 250        int rtx;
 251
 252        if (!table)
 253                return;
 254
 255        for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
 256                if (reg_entry_isvalid(table[rtx]))
 257                        dma_free_seg_table(table[rtx]);
 258
 259        dma_free_cpu_table(table);
 260}
 261
 262static unsigned long __dma_alloc_iommu(struct device *dev,
 263                                       unsigned long start, int size)
 264{
 265        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 266
 267        return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
 268                                start, size, zdev->start_dma >> PAGE_SHIFT,
 269                                dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
 270                                0);
 271}
 272
 273static dma_addr_t dma_alloc_address(struct device *dev, int size)
 274{
 275        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 276        unsigned long offset, flags;
 277
 278        spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
 279        offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
 280        if (offset == -1) {
 281                if (!s390_iommu_strict) {
 282                        /* global flush before DMA addresses are reused */
 283                        if (zpci_refresh_global(zdev))
 284                                goto out_error;
 285
 286                        bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
 287                                      zdev->lazy_bitmap, zdev->iommu_pages);
 288                        bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
 289                }
 290                /* wrap-around */
 291                offset = __dma_alloc_iommu(dev, 0, size);
 292                if (offset == -1)
 293                        goto out_error;
 294        }
 295        zdev->next_bit = offset + size;
 296        spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 297
 298        return zdev->start_dma + offset * PAGE_SIZE;
 299
 300out_error:
 301        spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 302        return DMA_MAPPING_ERROR;
 303}
 304
 305static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
 306{
 307        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 308        unsigned long flags, offset;
 309
 310        offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
 311
 312        spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
 313        if (!zdev->iommu_bitmap)
 314                goto out;
 315
 316        if (s390_iommu_strict)
 317                bitmap_clear(zdev->iommu_bitmap, offset, size);
 318        else
 319                bitmap_set(zdev->lazy_bitmap, offset, size);
 320
 321out:
 322        spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 323}
 324
 325static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
 326{
 327        struct {
 328                unsigned long rc;
 329                unsigned long addr;
 330        } __packed data = {rc, addr};
 331
 332        zpci_err_hex(&data, sizeof(data));
 333}
 334
 335static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 336                                     unsigned long offset, size_t size,
 337                                     enum dma_data_direction direction,
 338                                     unsigned long attrs)
 339{
 340        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 341        unsigned long pa = page_to_phys(page) + offset;
 342        int flags = ZPCI_PTE_VALID;
 343        unsigned long nr_pages;
 344        dma_addr_t dma_addr;
 345        int ret;
 346
 347        /* This rounds up number of pages based on size and offset */
 348        nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
 349        dma_addr = dma_alloc_address(dev, nr_pages);
 350        if (dma_addr == DMA_MAPPING_ERROR) {
 351                ret = -ENOSPC;
 352                goto out_err;
 353        }
 354
 355        /* Use rounded up size */
 356        size = nr_pages * PAGE_SIZE;
 357
 358        if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
 359                flags |= ZPCI_TABLE_PROTECTED;
 360
 361        ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
 362        if (ret)
 363                goto out_free;
 364
 365        atomic64_add(nr_pages, &zdev->mapped_pages);
 366        return dma_addr + (offset & ~PAGE_MASK);
 367
 368out_free:
 369        dma_free_address(dev, dma_addr, nr_pages);
 370out_err:
 371        zpci_err("map error:\n");
 372        zpci_err_dma(ret, pa);
 373        return DMA_MAPPING_ERROR;
 374}
 375
 376static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
 377                                 size_t size, enum dma_data_direction direction,
 378                                 unsigned long attrs)
 379{
 380        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 381        int npages, ret;
 382
 383        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
 384        dma_addr = dma_addr & PAGE_MASK;
 385        ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
 386                               ZPCI_PTE_INVALID);
 387        if (ret) {
 388                zpci_err("unmap error:\n");
 389                zpci_err_dma(ret, dma_addr);
 390                return;
 391        }
 392
 393        atomic64_add(npages, &zdev->unmapped_pages);
 394        dma_free_address(dev, dma_addr, npages);
 395}
 396
 397static void *s390_dma_alloc(struct device *dev, size_t size,
 398                            dma_addr_t *dma_handle, gfp_t flag,
 399                            unsigned long attrs)
 400{
 401        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 402        struct page *page;
 403        unsigned long pa;
 404        dma_addr_t map;
 405
 406        size = PAGE_ALIGN(size);
 407        page = alloc_pages(flag | __GFP_ZERO, get_order(size));
 408        if (!page)
 409                return NULL;
 410
 411        pa = page_to_phys(page);
 412        map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
 413        if (dma_mapping_error(dev, map)) {
 414                free_pages(pa, get_order(size));
 415                return NULL;
 416        }
 417
 418        atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
 419        if (dma_handle)
 420                *dma_handle = map;
 421        return (void *) pa;
 422}
 423
 424static void s390_dma_free(struct device *dev, size_t size,
 425                          void *pa, dma_addr_t dma_handle,
 426                          unsigned long attrs)
 427{
 428        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 429
 430        size = PAGE_ALIGN(size);
 431        atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
 432        s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
 433        free_pages((unsigned long) pa, get_order(size));
 434}
 435
 436/* Map a segment into a contiguous dma address area */
 437static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 438                             size_t size, dma_addr_t *handle,
 439                             enum dma_data_direction dir)
 440{
 441        unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 442        struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 443        dma_addr_t dma_addr_base, dma_addr;
 444        int flags = ZPCI_PTE_VALID;
 445        struct scatterlist *s;
 446        unsigned long pa = 0;
 447        int ret;
 448
 449        dma_addr_base = dma_alloc_address(dev, nr_pages);
 450        if (dma_addr_base == DMA_MAPPING_ERROR)
 451                return -ENOMEM;
 452
 453        dma_addr = dma_addr_base;
 454        if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
 455                flags |= ZPCI_TABLE_PROTECTED;
 456
 457        for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
 458                pa = page_to_phys(sg_page(s));
 459                ret = __dma_update_trans(zdev, pa, dma_addr,
 460                                         s->offset + s->length, flags);
 461                if (ret)
 462                        goto unmap;
 463
 464                dma_addr += s->offset + s->length;
 465        }
 466        ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
 467        if (ret)
 468                goto unmap;
 469
 470        *handle = dma_addr_base;
 471        atomic64_add(nr_pages, &zdev->mapped_pages);
 472
 473        return ret;
 474
 475unmap:
 476        dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
 477                         ZPCI_PTE_INVALID);
 478        dma_free_address(dev, dma_addr_base, nr_pages);
 479        zpci_err("map error:\n");
 480        zpci_err_dma(ret, pa);
 481        return ret;
 482}
 483
 484static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 485                           int nr_elements, enum dma_data_direction dir,
 486                           unsigned long attrs)
 487{
 488        struct scatterlist *s = sg, *start = sg, *dma = sg;
 489        unsigned int max = dma_get_max_seg_size(dev);
 490        unsigned int size = s->offset + s->length;
 491        unsigned int offset = s->offset;
 492        int count = 0, i, ret;
 493
 494        for (i = 1; i < nr_elements; i++) {
 495                s = sg_next(s);
 496
 497                s->dma_length = 0;
 498
 499                if (s->offset || (size & ~PAGE_MASK) ||
 500                    size + s->length > max) {
 501                        ret = __s390_dma_map_sg(dev, start, size,
 502                                                &dma->dma_address, dir);
 503                        if (ret)
 504                                goto unmap;
 505
 506                        dma->dma_address += offset;
 507                        dma->dma_length = size - offset;
 508
 509                        size = offset = s->offset;
 510                        start = s;
 511                        dma = sg_next(dma);
 512                        count++;
 513                }
 514                size += s->length;
 515        }
 516        ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
 517        if (ret)
 518                goto unmap;
 519
 520        dma->dma_address += offset;
 521        dma->dma_length = size - offset;
 522
 523        return count + 1;
 524unmap:
 525        for_each_sg(sg, s, count, i)
 526                s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
 527                                     dir, attrs);
 528
 529        return ret;
 530}
 531
 532static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 533                              int nr_elements, enum dma_data_direction dir,
 534                              unsigned long attrs)
 535{
 536        struct scatterlist *s;
 537        int i;
 538
 539        for_each_sg(sg, s, nr_elements, i) {
 540                if (s->dma_length)
 541                        s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
 542                                             dir, attrs);
 543                s->dma_address = 0;
 544                s->dma_length = 0;
 545        }
 546}
 547        
 548int zpci_dma_init_device(struct zpci_dev *zdev)
 549{
 550        int rc;
 551
 552        /*
 553         * At this point, if the device is part of an IOMMU domain, this would
 554         * be a strong hint towards a bug in the IOMMU API (common) code and/or
 555         * simultaneous access via IOMMU and DMA API. So let's issue a warning.
 556         */
 557        WARN_ON(zdev->s390_domain);
 558
 559        spin_lock_init(&zdev->iommu_bitmap_lock);
 560        spin_lock_init(&zdev->dma_table_lock);
 561
 562        zdev->dma_table = dma_alloc_cpu_table();
 563        if (!zdev->dma_table) {
 564                rc = -ENOMEM;
 565                goto out;
 566        }
 567
 568        /*
 569         * Restrict the iommu bitmap size to the minimum of the following:
 570         * - s390_iommu_aperture which defaults to high_memory
 571         * - 3-level pagetable address limit minus start_dma offset
 572         * - DMA address range allowed by the hardware (clp query pci fn)
 573         *
 574         * Also set zdev->end_dma to the actual end address of the usable
 575         * range, instead of the theoretical maximum as reported by hardware.
 576         *
 577         * This limits the number of concurrently usable DMA mappings since
 578         * for each DMA mapped memory address we need a DMA address including
 579         * extra DMA addresses for multiple mappings of the same memory address.
 580         */
 581        zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
 582        zdev->iommu_size = min3(s390_iommu_aperture,
 583                                ZPCI_TABLE_SIZE_RT - zdev->start_dma,
 584                                zdev->end_dma - zdev->start_dma + 1);
 585        zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
 586        zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
 587        zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
 588        if (!zdev->iommu_bitmap) {
 589                rc = -ENOMEM;
 590                goto free_dma_table;
 591        }
 592        if (!s390_iommu_strict) {
 593                zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
 594                if (!zdev->lazy_bitmap) {
 595                        rc = -ENOMEM;
 596                        goto free_bitmap;
 597                }
 598
 599        }
 600        if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
 601                               (u64)zdev->dma_table)) {
 602                rc = -EIO;
 603                goto free_bitmap;
 604        }
 605
 606        return 0;
 607free_bitmap:
 608        vfree(zdev->iommu_bitmap);
 609        zdev->iommu_bitmap = NULL;
 610        vfree(zdev->lazy_bitmap);
 611        zdev->lazy_bitmap = NULL;
 612free_dma_table:
 613        dma_free_cpu_table(zdev->dma_table);
 614        zdev->dma_table = NULL;
 615out:
 616        return rc;
 617}
 618
 619int zpci_dma_exit_device(struct zpci_dev *zdev)
 620{
 621        int cc = 0;
 622
 623        /*
 624         * At this point, if the device is part of an IOMMU domain, this would
 625         * be a strong hint towards a bug in the IOMMU API (common) code and/or
 626         * simultaneous access via IOMMU and DMA API. So let's issue a warning.
 627         */
 628        WARN_ON(zdev->s390_domain);
 629        if (zdev_enabled(zdev))
 630                cc = zpci_unregister_ioat(zdev, 0);
 631        /*
 632         * cc == 3 indicates the function is gone already. This can happen
 633         * if the function was deconfigured/disabled suddenly and we have not
 634         * received a new handle yet.
 635         */
 636        if (cc && cc != 3)
 637                return -EIO;
 638
 639        dma_cleanup_tables(zdev->dma_table);
 640        zdev->dma_table = NULL;
 641        vfree(zdev->iommu_bitmap);
 642        zdev->iommu_bitmap = NULL;
 643        vfree(zdev->lazy_bitmap);
 644        zdev->lazy_bitmap = NULL;
 645        zdev->next_bit = 0;
 646        return 0;
 647}
 648
 649static int __init dma_alloc_cpu_table_caches(void)
 650{
 651        dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
 652                                        ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
 653                                        0, NULL);
 654        if (!dma_region_table_cache)
 655                return -ENOMEM;
 656
 657        dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
 658                                        ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
 659                                        0, NULL);
 660        if (!dma_page_table_cache) {
 661                kmem_cache_destroy(dma_region_table_cache);
 662                return -ENOMEM;
 663        }
 664        return 0;
 665}
 666
 667int __init zpci_dma_init(void)
 668{
 669        s390_iommu_aperture = (u64)high_memory;
 670        if (!s390_iommu_aperture_factor)
 671                s390_iommu_aperture = ULONG_MAX;
 672        else
 673                s390_iommu_aperture *= s390_iommu_aperture_factor;
 674
 675        return dma_alloc_cpu_table_caches();
 676}
 677
 678void zpci_dma_exit(void)
 679{
 680        kmem_cache_destroy(dma_page_table_cache);
 681        kmem_cache_destroy(dma_region_table_cache);
 682}
 683
 684const struct dma_map_ops s390_pci_dma_ops = {
 685        .alloc          = s390_dma_alloc,
 686        .free           = s390_dma_free,
 687        .map_sg         = s390_dma_map_sg,
 688        .unmap_sg       = s390_dma_unmap_sg,
 689        .map_page       = s390_dma_map_pages,
 690        .unmap_page     = s390_dma_unmap_pages,
 691        .mmap           = dma_common_mmap,
 692        .get_sgtable    = dma_common_get_sgtable,
 693        .alloc_pages    = dma_common_alloc_pages,
 694        .free_pages     = dma_common_free_pages,
 695        /* dma_supported is unconditionally true without a callback */
 696};
 697EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
 698
 699static int __init s390_iommu_setup(char *str)
 700{
 701        if (!strcmp(str, "strict"))
 702                s390_iommu_strict = 1;
 703        return 1;
 704}
 705
 706__setup("s390_iommu=", s390_iommu_setup);
 707
 708static int __init s390_iommu_aperture_setup(char *str)
 709{
 710        if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
 711                s390_iommu_aperture_factor = 1;
 712        return 1;
 713}
 714
 715__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
 716