linux/arch/mips/cavium-octeon/dma-octeon.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   7 * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
   8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
   9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  10 * IP32 changes by Ilya.
  11 * Copyright (C) 2010 Cavium Networks, Inc.
  12 */
  13#include <linux/dma-mapping.h>
  14#include <linux/scatterlist.h>
  15#include <linux/bootmem.h>
  16#include <linux/swiotlb.h>
  17#include <linux/types.h>
  18#include <linux/init.h>
  19#include <linux/mm.h>
  20
  21#include <asm/bootinfo.h>
  22
  23#include <asm/octeon/octeon.h>
  24
  25#ifdef CONFIG_PCI
  26#include <asm/octeon/pci-octeon.h>
  27#include <asm/octeon/cvmx-npi-defs.h>
  28#include <asm/octeon/cvmx-pci-defs.h>
  29
  30static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
  31{
  32        if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
  33                return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
  34        else
  35                return paddr;
  36}
  37
  38static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
  39{
  40        if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
  41                return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
  42        else
  43                return daddr;
  44}
  45
  46static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
  47{
  48        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
  49                paddr -= 0x400000000ull;
  50        return octeon_hole_phys_to_dma(paddr);
  51}
  52
  53static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
  54{
  55        daddr = octeon_hole_dma_to_phys(daddr);
  56
  57        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
  58                daddr += 0x400000000ull;
  59
  60        return daddr;
  61}
  62
  63static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
  64{
  65        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
  66                paddr -= 0x400000000ull;
  67
  68        /* Anything in the BAR1 hole or above goes via BAR2 */
  69        if (paddr >= 0xf0000000ull)
  70                paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
  71
  72        return paddr;
  73}
  74
  75static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
  76{
  77        if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
  78                daddr -= OCTEON_BAR2_PCI_ADDRESS;
  79
  80        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
  81                daddr += 0x400000000ull;
  82        return daddr;
  83}
  84
  85static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
  86                                           phys_addr_t paddr)
  87{
  88        if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
  89                paddr -= 0x400000000ull;
  90
  91        /* Anything not in the BAR1 range goes via BAR2 */
  92        if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
  93                paddr = paddr - octeon_bar1_pci_phys;
  94        else
  95                paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
  96
  97        return paddr;
  98}
  99
 100static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
 101                                            dma_addr_t daddr)
 102{
 103        if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
 104                daddr -= OCTEON_BAR2_PCI_ADDRESS;
 105        else
 106                daddr += octeon_bar1_pci_phys;
 107
 108        if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
 109                daddr += 0x400000000ull;
 110        return daddr;
 111}
 112
 113#endif /* CONFIG_PCI */
 114
 115static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
 116        unsigned long offset, size_t size, enum dma_data_direction direction,
 117        struct dma_attrs *attrs)
 118{
 119        dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
 120                                            direction, attrs);
 121        mb();
 122
 123        return daddr;
 124}
 125
 126static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
 127        int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
 128{
 129        int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
 130        mb();
 131        return r;
 132}
 133
 134static void octeon_dma_sync_single_for_device(struct device *dev,
 135        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 136{
 137        swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
 138        mb();
 139}
 140
 141static void octeon_dma_sync_sg_for_device(struct device *dev,
 142        struct scatterlist *sg, int nelems, enum dma_data_direction direction)
 143{
 144        swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
 145        mb();
 146}
 147
 148static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
 149        dma_addr_t *dma_handle, gfp_t gfp)
 150{
 151        void *ret;
 152
 153        if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
 154                return ret;
 155
 156        /* ignore region specifiers */
 157        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 158
 159#ifdef CONFIG_ZONE_DMA
 160        if (dev == NULL)
 161                gfp |= __GFP_DMA;
 162        else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
 163                gfp |= __GFP_DMA;
 164        else
 165#endif
 166#ifdef CONFIG_ZONE_DMA32
 167             if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
 168                gfp |= __GFP_DMA32;
 169        else
 170#endif
 171                ;
 172
 173        /* Don't invoke OOM killer */
 174        gfp |= __GFP_NORETRY;
 175
 176        ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
 177
 178        mb();
 179
 180        return ret;
 181}
 182
 183static void octeon_dma_free_coherent(struct device *dev, size_t size,
 184        void *vaddr, dma_addr_t dma_handle)
 185{
 186        int order = get_order(size);
 187
 188        if (dma_release_from_coherent(dev, order, vaddr))
 189                return;
 190
 191        swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 192}
 193
 194static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
 195{
 196        return paddr;
 197}
 198
 199static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
 200{
 201        return daddr;
 202}
 203
 204struct octeon_dma_map_ops {
 205        struct dma_map_ops dma_map_ops;
 206        dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
 207        phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
 208};
 209
 210dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 211{
 212        struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
 213                                                      struct octeon_dma_map_ops,
 214                                                      dma_map_ops);
 215
 216        return ops->phys_to_dma(dev, paddr);
 217}
 218EXPORT_SYMBOL(phys_to_dma);
 219
 220phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 221{
 222        struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
 223                                                      struct octeon_dma_map_ops,
 224                                                      dma_map_ops);
 225
 226        return ops->dma_to_phys(dev, daddr);
 227}
 228EXPORT_SYMBOL(dma_to_phys);
 229
 230static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
 231        .dma_map_ops = {
 232                .alloc_coherent = octeon_dma_alloc_coherent,
 233                .free_coherent = octeon_dma_free_coherent,
 234                .map_page = octeon_dma_map_page,
 235                .unmap_page = swiotlb_unmap_page,
 236                .map_sg = octeon_dma_map_sg,
 237                .unmap_sg = swiotlb_unmap_sg_attrs,
 238                .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 239                .sync_single_for_device = octeon_dma_sync_single_for_device,
 240                .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 241                .sync_sg_for_device = octeon_dma_sync_sg_for_device,
 242                .mapping_error = swiotlb_dma_mapping_error,
 243                .dma_supported = swiotlb_dma_supported
 244        },
 245        .phys_to_dma = octeon_unity_phys_to_dma,
 246        .dma_to_phys = octeon_unity_dma_to_phys
 247};
 248
 249char *octeon_swiotlb;
 250
 251void __init plat_swiotlb_setup(void)
 252{
 253        int i;
 254        phys_t max_addr;
 255        phys_t addr_size;
 256        size_t swiotlbsize;
 257        unsigned long swiotlb_nslabs;
 258
 259        max_addr = 0;
 260        addr_size = 0;
 261
 262        for (i = 0 ; i < boot_mem_map.nr_map; i++) {
 263                struct boot_mem_map_entry *e = &boot_mem_map.map[i];
 264                if (e->type != BOOT_MEM_RAM)
 265                        continue;
 266
 267                /* These addresses map low for PCI. */
 268                if (e->addr > 0x410000000ull)
 269                        continue;
 270
 271                addr_size += e->size;
 272
 273                if (max_addr < e->addr + e->size)
 274                        max_addr = e->addr + e->size;
 275
 276        }
 277
 278        swiotlbsize = PAGE_SIZE;
 279
 280#ifdef CONFIG_PCI
 281        /*
 282         * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
 283         * size to a maximum of 64MB
 284         */
 285        if (OCTEON_IS_MODEL(OCTEON_CN31XX)
 286            || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
 287                swiotlbsize = addr_size / 4;
 288                if (swiotlbsize > 64 * (1<<20))
 289                        swiotlbsize = 64 * (1<<20);
 290        } else if (max_addr > 0xf0000000ul) {
 291                /*
 292                 * Otherwise only allocate a big iotlb if there is
 293                 * memory past the BAR1 hole.
 294                 */
 295                swiotlbsize = 64 * (1<<20);
 296        }
 297#endif
 298        swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
 299        swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
 300        swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
 301
 302        octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
 303
 304        swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
 305
 306        mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
 307}
 308
 309#ifdef CONFIG_PCI
 310static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
 311        .dma_map_ops = {
 312                .alloc_coherent = octeon_dma_alloc_coherent,
 313                .free_coherent = octeon_dma_free_coherent,
 314                .map_page = octeon_dma_map_page,
 315                .unmap_page = swiotlb_unmap_page,
 316                .map_sg = octeon_dma_map_sg,
 317                .unmap_sg = swiotlb_unmap_sg_attrs,
 318                .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 319                .sync_single_for_device = octeon_dma_sync_single_for_device,
 320                .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 321                .sync_sg_for_device = octeon_dma_sync_sg_for_device,
 322                .mapping_error = swiotlb_dma_mapping_error,
 323                .dma_supported = swiotlb_dma_supported
 324        },
 325};
 326
 327struct dma_map_ops *octeon_pci_dma_map_ops;
 328
 329void __init octeon_pci_dma_init(void)
 330{
 331        switch (octeon_dma_bar_type) {
 332        case OCTEON_DMA_BAR_TYPE_PCIE:
 333                _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
 334                _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
 335                break;
 336        case OCTEON_DMA_BAR_TYPE_BIG:
 337                _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
 338                _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
 339                break;
 340        case OCTEON_DMA_BAR_TYPE_SMALL:
 341                _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
 342                _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
 343                break;
 344        default:
 345                BUG();
 346        }
 347        octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
 348}
 349#endif /* CONFIG_PCI */
 350