linux/arch/mips/cavium-octeon/dma-octeon.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   7 * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
   8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
   9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  10 * IP32 changes by Ilya.
  11 * Copyright (C) 2010 Cavium Networks, Inc.
  12 */
  13#include <linux/dma-mapping.h>
  14#include <linux/scatterlist.h>
  15#include <linux/bootmem.h>
  16#include <linux/export.h>
  17#include <linux/swiotlb.h>
  18#include <linux/types.h>
  19#include <linux/init.h>
  20#include <linux/mm.h>
  21
  22#include <asm/bootinfo.h>
  23
  24#include <asm/octeon/octeon.h>
  25
  26#ifdef CONFIG_PCI
  27#include <asm/octeon/pci-octeon.h>
  28#include <asm/octeon/cvmx-npi-defs.h>
  29#include <asm/octeon/cvmx-pci-defs.h>
  30
  31static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
  32{
  33        if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
  34                return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
  35        else
  36                return paddr;
  37}
  38
  39static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
  40{
  41        if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
  42                return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
  43        else
  44                return daddr;
  45}
  46
  47static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
  48{
  49        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
  50                paddr -= 0x400000000ull;
  51        return octeon_hole_phys_to_dma(paddr);
  52}
  53
  54static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
  55{
  56        daddr = octeon_hole_dma_to_phys(daddr);
  57
  58        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
  59                daddr += 0x400000000ull;
  60
  61        return daddr;
  62}
  63
  64static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
  65{
  66        return octeon_hole_phys_to_dma(paddr);
  67}
  68
  69static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
  70{
  71        return octeon_hole_dma_to_phys(daddr);
  72}
  73
  74static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
  75{
  76        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
  77                paddr -= 0x400000000ull;
  78
  79        /* Anything in the BAR1 hole or above goes via BAR2 */
  80        if (paddr >= 0xf0000000ull)
  81                paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
  82
  83        return paddr;
  84}
  85
  86static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
  87{
  88        if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
  89                daddr -= OCTEON_BAR2_PCI_ADDRESS;
  90
  91        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
  92                daddr += 0x400000000ull;
  93        return daddr;
  94}
  95
  96static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
  97                                           phys_addr_t paddr)
  98{
  99        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
 100                paddr -= 0x400000000ull;
 101
 102        /* Anything not in the BAR1 range goes via BAR2 */
 103        if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
 104                paddr = paddr - octeon_bar1_pci_phys;
 105        else
 106                paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
 107
 108        return paddr;
 109}
 110
 111static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
 112                                            dma_addr_t daddr)
 113{
 114        if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
 115                daddr -= OCTEON_BAR2_PCI_ADDRESS;
 116        else
 117                daddr += octeon_bar1_pci_phys;
 118
 119        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
 120                daddr += 0x400000000ull;
 121        return daddr;
 122}
 123
 124#endif /* CONFIG_PCI */
 125
 126static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
 127        unsigned long offset, size_t size, enum dma_data_direction direction,
 128        struct dma_attrs *attrs)
 129{
 130        dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
 131                                            direction, attrs);
 132        mb();
 133
 134        return daddr;
 135}
 136
 137static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
 138        int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
 139{
 140        int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
 141        mb();
 142        return r;
 143}
 144
 145static void octeon_dma_sync_single_for_device(struct device *dev,
 146        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 147{
 148        swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
 149        mb();
 150}
 151
 152static void octeon_dma_sync_sg_for_device(struct device *dev,
 153        struct scatterlist *sg, int nelems, enum dma_data_direction direction)
 154{
 155        swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
 156        mb();
 157}
 158
 159static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
 160        dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 161{
 162        void *ret;
 163
 164        /* ignore region specifiers */
 165        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 166
 167#ifdef CONFIG_ZONE_DMA
 168        if (dev == NULL)
 169                gfp |= __GFP_DMA;
 170        else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
 171                gfp |= __GFP_DMA;
 172        else
 173#endif
 174#ifdef CONFIG_ZONE_DMA32
 175             if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
 176                gfp |= __GFP_DMA32;
 177        else
 178#endif
 179                ;
 180
 181        /* Don't invoke OOM killer */
 182        gfp |= __GFP_NORETRY;
 183
 184        ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 185
 186        mb();
 187
 188        return ret;
 189}
 190
 191static void octeon_dma_free_coherent(struct device *dev, size_t size,
 192        void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
 193{
 194        swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 195}
 196
 197static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
 198{
 199        return paddr;
 200}
 201
 202static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
 203{
 204        return daddr;
 205}
 206
 207struct octeon_dma_map_ops {
 208        struct dma_map_ops dma_map_ops;
 209        dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
 210        phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
 211};
 212
 213dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 214{
 215        struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
 216                                                      struct octeon_dma_map_ops,
 217                                                      dma_map_ops);
 218
 219        return ops->phys_to_dma(dev, paddr);
 220}
 221EXPORT_SYMBOL(phys_to_dma);
 222
 223phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 224{
 225        struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
 226                                                      struct octeon_dma_map_ops,
 227                                                      dma_map_ops);
 228
 229        return ops->dma_to_phys(dev, daddr);
 230}
 231EXPORT_SYMBOL(dma_to_phys);
 232
 233static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
 234        .dma_map_ops = {
 235                .alloc = octeon_dma_alloc_coherent,
 236                .free = octeon_dma_free_coherent,
 237                .map_page = octeon_dma_map_page,
 238                .unmap_page = swiotlb_unmap_page,
 239                .map_sg = octeon_dma_map_sg,
 240                .unmap_sg = swiotlb_unmap_sg_attrs,
 241                .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 242                .sync_single_for_device = octeon_dma_sync_single_for_device,
 243                .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 244                .sync_sg_for_device = octeon_dma_sync_sg_for_device,
 245                .mapping_error = swiotlb_dma_mapping_error,
 246                .dma_supported = swiotlb_dma_supported
 247        },
 248        .phys_to_dma = octeon_unity_phys_to_dma,
 249        .dma_to_phys = octeon_unity_dma_to_phys
 250};
 251
 252char *octeon_swiotlb;
 253
 254void __init plat_swiotlb_setup(void)
 255{
 256        int i;
 257        phys_addr_t max_addr;
 258        phys_addr_t addr_size;
 259        size_t swiotlbsize;
 260        unsigned long swiotlb_nslabs;
 261
 262        max_addr = 0;
 263        addr_size = 0;
 264
 265        for (i = 0 ; i < boot_mem_map.nr_map; i++) {
 266                struct boot_mem_map_entry *e = &boot_mem_map.map[i];
 267                if (e->type != BOOT_MEM_RAM && e->type != BOOT_MEM_INIT_RAM)
 268                        continue;
 269
 270                /* These addresses map low for PCI. */
 271                if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
 272                        continue;
 273
 274                addr_size += e->size;
 275
 276                if (max_addr < e->addr + e->size)
 277                        max_addr = e->addr + e->size;
 278
 279        }
 280
 281        swiotlbsize = PAGE_SIZE;
 282
 283#ifdef CONFIG_PCI
 284        /*
 285         * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
 286         * size to a maximum of 64MB
 287         */
 288        if (OCTEON_IS_MODEL(OCTEON_CN31XX)
 289            || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
 290                swiotlbsize = addr_size / 4;
 291                if (swiotlbsize > 64 * (1<<20))
 292                        swiotlbsize = 64 * (1<<20);
 293        } else if (max_addr > 0xf0000000ul) {
 294                /*
 295                 * Otherwise only allocate a big iotlb if there is
 296                 * memory past the BAR1 hole.
 297                 */
 298                swiotlbsize = 64 * (1<<20);
 299        }
 300#endif
 301#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
 302        /* OCTEON II ohci is only 32-bit. */
 303        if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
 304                swiotlbsize = 64 * (1<<20);
 305#endif
 306        swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
 307        swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
 308        swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
 309
 310        octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
 311
 312        if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
 313                panic("Cannot allocate SWIOTLB buffer");
 314
 315        mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
 316}
 317
 318#ifdef CONFIG_PCI
 319static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
 320        .dma_map_ops = {
 321                .alloc = octeon_dma_alloc_coherent,
 322                .free = octeon_dma_free_coherent,
 323                .map_page = octeon_dma_map_page,
 324                .unmap_page = swiotlb_unmap_page,
 325                .map_sg = octeon_dma_map_sg,
 326                .unmap_sg = swiotlb_unmap_sg_attrs,
 327                .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 328                .sync_single_for_device = octeon_dma_sync_single_for_device,
 329                .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 330                .sync_sg_for_device = octeon_dma_sync_sg_for_device,
 331                .mapping_error = swiotlb_dma_mapping_error,
 332                .dma_supported = swiotlb_dma_supported
 333        },
 334};
 335
 336struct dma_map_ops *octeon_pci_dma_map_ops;
 337
 338void __init octeon_pci_dma_init(void)
 339{
 340        switch (octeon_dma_bar_type) {
 341        case OCTEON_DMA_BAR_TYPE_PCIE2:
 342                _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
 343                _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
 344                break;
 345        case OCTEON_DMA_BAR_TYPE_PCIE:
 346                _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
 347                _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
 348                break;
 349        case OCTEON_DMA_BAR_TYPE_BIG:
 350                _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
 351                _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
 352                break;
 353        case OCTEON_DMA_BAR_TYPE_SMALL:
 354                _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
 355                _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
 356                break;
 357        default:
 358                BUG();
 359        }
 360        octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
 361}
 362#endif /* CONFIG_PCI */
 363