linux/arch/ia64/sn/pci/pci_dma.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
   7 *
   8 * Routines for PCI DMA mapping.  See Documentation/DMA-API.txt for
   9 * a description of how these routines should be used.
  10 */
  11
  12#include <linux/gfp.h>
  13#include <linux/module.h>
  14#include <linux/dma-mapping.h>
  15#include <asm/dma.h>
  16#include <asm/sn/intr.h>
  17#include <asm/sn/pcibus_provider_defs.h>
  18#include <asm/sn/pcidev.h>
  19#include <asm/sn/sn_sal.h>
  20
  21#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
  22#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
  23
  24/**
  25 * sn_dma_supported - test a DMA mask
  26 * @dev: device to test
  27 * @mask: DMA mask to test
  28 *
  29 * Return whether the given PCI device DMA address mask can be supported
  30 * properly.  For example, if your device can only drive the low 24-bits
  31 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
  32 * this function.  Of course, SN only supports devices that have 32 or more
  33 * address bits when using the PMU.
  34 */
  35static int sn_dma_supported(struct device *dev, u64 mask)
  36{
  37        BUG_ON(dev->bus != &pci_bus_type);
  38
  39        if (mask < 0x7fffffff)
  40                return 0;
  41        return 1;
  42}
  43
  44/**
  45 * sn_dma_set_mask - set the DMA mask
  46 * @dev: device to set
  47 * @dma_mask: new mask
  48 *
  49 * Set @dev's DMA mask if the hw supports it.
  50 */
  51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
  52{
  53        BUG_ON(dev->bus != &pci_bus_type);
  54
  55        if (!sn_dma_supported(dev, dma_mask))
  56                return 0;
  57
  58        *dev->dma_mask = dma_mask;
  59        return 1;
  60}
  61EXPORT_SYMBOL(sn_dma_set_mask);
  62
  63/**
  64 * sn_dma_alloc_coherent - allocate memory for coherent DMA
  65 * @dev: device to allocate for
  66 * @size: size of the region
  67 * @dma_handle: DMA (bus) address
  68 * @flags: memory allocation flags
  69 *
  70 * dma_alloc_coherent() returns a pointer to a memory region suitable for
  71 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
  72 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
  73 *
  74 * This interface is usually used for "command" streams (e.g. the command
  75 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
  76 * more information.
  77 */
  78static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
  79                                   dma_addr_t * dma_handle, gfp_t flags,
  80                                   struct dma_attrs *attrs)
  81{
  82        void *cpuaddr;
  83        unsigned long phys_addr;
  84        int node;
  85        struct pci_dev *pdev = to_pci_dev(dev);
  86        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
  87
  88        BUG_ON(dev->bus != &pci_bus_type);
  89
  90        /*
  91         * Allocate the memory.
  92         */
  93        node = pcibus_to_node(pdev->bus);
  94        if (likely(node >=0)) {
  95                struct page *p = alloc_pages_exact_node(node,
  96                                                flags, get_order(size));
  97
  98                if (likely(p))
  99                        cpuaddr = page_address(p);
 100                else
 101                        return NULL;
 102        } else
 103                cpuaddr = (void *)__get_free_pages(flags, get_order(size));
 104
 105        if (unlikely(!cpuaddr))
 106                return NULL;
 107
 108        memset(cpuaddr, 0x0, size);
 109
 110        /* physical addr. of the memory we just got */
 111        phys_addr = __pa(cpuaddr);
 112
 113        /*
 114         * 64 bit address translations should never fail.
 115         * 32 bit translations can fail if there are insufficient mapping
 116         * resources.
 117         */
 118
 119        *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
 120                                                   SN_DMA_ADDR_PHYS);
 121        if (!*dma_handle) {
 122                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 123                free_pages((unsigned long)cpuaddr, get_order(size));
 124                return NULL;
 125        }
 126
 127        return cpuaddr;
 128}
 129
 130/**
 131 * sn_pci_free_coherent - free memory associated with coherent DMAable region
 132 * @dev: device to free for
 133 * @size: size to free
 134 * @cpu_addr: kernel virtual address to free
 135 * @dma_handle: DMA address associated with this region
 136 *
 137 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
 138 * any associated IOMMU mappings.
 139 */
 140static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 141                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
 142{
 143        struct pci_dev *pdev = to_pci_dev(dev);
 144        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 145
 146        BUG_ON(dev->bus != &pci_bus_type);
 147
 148        provider->dma_unmap(pdev, dma_handle, 0);
 149        free_pages((unsigned long)cpu_addr, get_order(size));
 150}
 151
 152/**
 153 * sn_dma_map_single_attrs - map a single page for DMA
 154 * @dev: device to map for
 155 * @cpu_addr: kernel virtual address of the region to map
 156 * @size: size of the region
 157 * @direction: DMA direction
 158 * @attrs: optional dma attributes
 159 *
 160 * Map the region pointed to by @cpu_addr for DMA and return the
 161 * DMA address.
 162 *
 163 * We map this to the one step pcibr_dmamap_trans interface rather than
 164 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
 165 * no way of saving the dmamap handle from the alloc to later free
 166 * (which is pretty much unacceptable).
 167 *
 168 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 169 * dma_map_consistent() so that writes force a flush of pending DMA.
 170 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 171 * Document Number: 007-4763-001)
 172 *
 173 * TODO: simplify our interface;
 174 *       figure out how to save dmamap handle so can use two step.
 175 */
 176static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
 177                                  unsigned long offset, size_t size,
 178                                  enum dma_data_direction dir,
 179                                  struct dma_attrs *attrs)
 180{
 181        void *cpu_addr = page_address(page) + offset;
 182        dma_addr_t dma_addr;
 183        unsigned long phys_addr;
 184        struct pci_dev *pdev = to_pci_dev(dev);
 185        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 186        int dmabarr;
 187
 188        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 189
 190        BUG_ON(dev->bus != &pci_bus_type);
 191
 192        phys_addr = __pa(cpu_addr);
 193        if (dmabarr)
 194                dma_addr = provider->dma_map_consistent(pdev, phys_addr,
 195                                                        size, SN_DMA_ADDR_PHYS);
 196        else
 197                dma_addr = provider->dma_map(pdev, phys_addr, size,
 198                                             SN_DMA_ADDR_PHYS);
 199
 200        if (!dma_addr) {
 201                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 202                return 0;
 203        }
 204        return dma_addr;
 205}
 206
 207/**
 208 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
 209 * @dev: device to sync
 210 * @dma_addr: DMA address to sync
 211 * @size: size of region
 212 * @direction: DMA direction
 213 * @attrs: optional dma attributes
 214 *
 215 * This routine is supposed to sync the DMA region specified
 216 * by @dma_handle into the coherence domain.  On SN, we're always cache
 217 * coherent, so we just need to free any ATEs associated with this mapping.
 218 */
 219static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 220                              size_t size, enum dma_data_direction dir,
 221                              struct dma_attrs *attrs)
 222{
 223        struct pci_dev *pdev = to_pci_dev(dev);
 224        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 225
 226        BUG_ON(dev->bus != &pci_bus_type);
 227
 228        provider->dma_unmap(pdev, dma_addr, dir);
 229}
 230
 231/**
 232 * sn_dma_unmap_sg - unmap a DMA scatterlist
 233 * @dev: device to unmap
 234 * @sg: scatterlist to unmap
 235 * @nhwentries: number of scatterlist entries
 236 * @direction: DMA direction
 237 * @attrs: optional dma attributes
 238 *
 239 * Unmap a set of streaming mode DMA translations.
 240 */
 241static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 242                            int nhwentries, enum dma_data_direction dir,
 243                            struct dma_attrs *attrs)
 244{
 245        int i;
 246        struct pci_dev *pdev = to_pci_dev(dev);
 247        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 248        struct scatterlist *sg;
 249
 250        BUG_ON(dev->bus != &pci_bus_type);
 251
 252        for_each_sg(sgl, sg, nhwentries, i) {
 253                provider->dma_unmap(pdev, sg->dma_address, dir);
 254                sg->dma_address = (dma_addr_t) NULL;
 255                sg->dma_length = 0;
 256        }
 257}
 258
 259/**
 260 * sn_dma_map_sg - map a scatterlist for DMA
 261 * @dev: device to map for
 262 * @sg: scatterlist to map
 263 * @nhwentries: number of entries
 264 * @direction: direction of the DMA transaction
 265 * @attrs: optional dma attributes
 266 *
 267 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 268 * dma_map_consistent() so that writes force a flush of pending DMA.
 269 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 270 * Document Number: 007-4763-001)
 271 *
 272 * Maps each entry of @sg for DMA.
 273 */
 274static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
 275                         int nhwentries, enum dma_data_direction dir,
 276                         struct dma_attrs *attrs)
 277{
 278        unsigned long phys_addr;
 279        struct scatterlist *saved_sg = sgl, *sg;
 280        struct pci_dev *pdev = to_pci_dev(dev);
 281        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 282        int i;
 283        int dmabarr;
 284
 285        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 286
 287        BUG_ON(dev->bus != &pci_bus_type);
 288
 289        /*
 290         * Setup a DMA address for each entry in the scatterlist.
 291         */
 292        for_each_sg(sgl, sg, nhwentries, i) {
 293                dma_addr_t dma_addr;
 294                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
 295                if (dmabarr)
 296                        dma_addr = provider->dma_map_consistent(pdev,
 297                                                                phys_addr,
 298                                                                sg->length,
 299                                                                SN_DMA_ADDR_PHYS);
 300                else
 301                        dma_addr = provider->dma_map(pdev, phys_addr,
 302                                                     sg->length,
 303                                                     SN_DMA_ADDR_PHYS);
 304
 305                sg->dma_address = dma_addr;
 306                if (!sg->dma_address) {
 307                        printk(KERN_ERR "%s: out of ATEs\n", __func__);
 308
 309                        /*
 310                         * Free any successfully allocated entries.
 311                         */
 312                        if (i > 0)
 313                                sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
 314                        return 0;
 315                }
 316
 317                sg->dma_length = sg->length;
 318        }
 319
 320        return nhwentries;
 321}
 322
 323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 324                                       size_t size, enum dma_data_direction dir)
 325{
 326        BUG_ON(dev->bus != &pci_bus_type);
 327}
 328
 329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 330                                          size_t size,
 331                                          enum dma_data_direction dir)
 332{
 333        BUG_ON(dev->bus != &pci_bus_type);
 334}
 335
 336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 337                                   int nelems, enum dma_data_direction dir)
 338{
 339        BUG_ON(dev->bus != &pci_bus_type);
 340}
 341
 342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 343                                      int nelems, enum dma_data_direction dir)
 344{
 345        BUG_ON(dev->bus != &pci_bus_type);
 346}
 347
 348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 349{
 350        return 0;
 351}
 352
 353u64 sn_dma_get_required_mask(struct device *dev)
 354{
 355        return DMA_BIT_MASK(64);
 356}
 357EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
 358
 359char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 360{
 361        if (!SN_PCIBUS_BUSSOFT(bus))
 362                return ERR_PTR(-ENODEV);
 363
 364        return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
 365}
 366
 367int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 368{
 369        unsigned long addr;
 370        int ret;
 371        struct ia64_sal_retval isrv;
 372
 373        /*
 374         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 375         * around hw issues at the pci bus level.  SGI proms older than
 376         * 4.10 don't implement this.
 377         */
 378
 379        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 380                 pci_domain_nr(bus), bus->number,
 381                 0, /* io */
 382                 0, /* read */
 383                 port, size, __pa(val));
 384
 385        if (isrv.status == 0)
 386                return size;
 387
 388        /*
 389         * If the above failed, retry using the SAL_PROBE call which should
 390         * be present in all proms (but which cannot work round PCI chipset
 391         * bugs).  This code is retained for compatibility with old
 392         * pre-4.10 proms, and should be removed at some point in the future.
 393         */
 394
 395        if (!SN_PCIBUS_BUSSOFT(bus))
 396                return -ENODEV;
 397
 398        addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 399        addr += port;
 400
 401        ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
 402
 403        if (ret == 2)
 404                return -EINVAL;
 405
 406        if (ret == 1)
 407                *val = -1;
 408
 409        return size;
 410}
 411
 412int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 413{
 414        int ret = size;
 415        unsigned long paddr;
 416        unsigned long *addr;
 417        struct ia64_sal_retval isrv;
 418
 419        /*
 420         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 421         * around hw issues at the pci bus level.  SGI proms older than
 422         * 4.10 don't implement this.
 423         */
 424
 425        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 426                 pci_domain_nr(bus), bus->number,
 427                 0, /* io */
 428                 1, /* write */
 429                 port, size, __pa(&val));
 430
 431        if (isrv.status == 0)
 432                return size;
 433
 434        /*
 435         * If the above failed, retry using the SAL_PROBE call which should
 436         * be present in all proms (but which cannot work round PCI chipset
 437         * bugs).  This code is retained for compatibility with old
 438         * pre-4.10 proms, and should be removed at some point in the future.
 439         */
 440
 441        if (!SN_PCIBUS_BUSSOFT(bus)) {
 442                ret = -ENODEV;
 443                goto out;
 444        }
 445
 446        /* Put the phys addr in uncached space */
 447        paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 448        paddr += port;
 449        addr = (unsigned long *)paddr;
 450
 451        switch (size) {
 452        case 1:
 453                *(volatile u8 *)(addr) = (u8)(val);
 454                break;
 455        case 2:
 456                *(volatile u16 *)(addr) = (u16)(val);
 457                break;
 458        case 4:
 459                *(volatile u32 *)(addr) = (u32)(val);
 460                break;
 461        default:
 462                ret = -EINVAL;
 463                break;
 464        }
 465 out:
 466        return ret;
 467}
 468
 469static struct dma_map_ops sn_dma_ops = {
 470        .alloc                  = sn_dma_alloc_coherent,
 471        .free                   = sn_dma_free_coherent,
 472        .map_page               = sn_dma_map_page,
 473        .unmap_page             = sn_dma_unmap_page,
 474        .map_sg                 = sn_dma_map_sg,
 475        .unmap_sg               = sn_dma_unmap_sg,
 476        .sync_single_for_cpu    = sn_dma_sync_single_for_cpu,
 477        .sync_sg_for_cpu        = sn_dma_sync_sg_for_cpu,
 478        .sync_single_for_device = sn_dma_sync_single_for_device,
 479        .sync_sg_for_device     = sn_dma_sync_sg_for_device,
 480        .mapping_error          = sn_dma_mapping_error,
 481        .dma_supported          = sn_dma_supported,
 482};
 483
 484void sn_dma_init(void)
 485{
 486        dma_ops = &sn_dma_ops;
 487}
 488