linux/arch/ia64/sn/pci/pci_dma.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
   7 *
   8 * Routines for PCI DMA mapping.  See Documentation/DMA-API.txt for
   9 * a description of how these routines should be used.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/dma-mapping.h>
  14#include <asm/dma.h>
  15#include <asm/sn/intr.h>
  16#include <asm/sn/pcibus_provider_defs.h>
  17#include <asm/sn/pcidev.h>
  18#include <asm/sn/sn_sal.h>
  19
  20#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
  21#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
  22
  23/**
  24 * sn_dma_supported - test a DMA mask
  25 * @dev: device to test
  26 * @mask: DMA mask to test
  27 *
  28 * Return whether the given PCI device DMA address mask can be supported
  29 * properly.  For example, if your device can only drive the low 24-bits
  30 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
  31 * this function.  Of course, SN only supports devices that have 32 or more
  32 * address bits when using the PMU.
  33 */
  34static int sn_dma_supported(struct device *dev, u64 mask)
  35{
  36        BUG_ON(dev->bus != &pci_bus_type);
  37
  38        if (mask < 0x7fffffff)
  39                return 0;
  40        return 1;
  41}
  42
  43/**
  44 * sn_dma_set_mask - set the DMA mask
  45 * @dev: device to set
  46 * @dma_mask: new mask
  47 *
  48 * Set @dev's DMA mask if the hw supports it.
  49 */
  50int sn_dma_set_mask(struct device *dev, u64 dma_mask)
  51{
  52        BUG_ON(dev->bus != &pci_bus_type);
  53
  54        if (!sn_dma_supported(dev, dma_mask))
  55                return 0;
  56
  57        *dev->dma_mask = dma_mask;
  58        return 1;
  59}
  60EXPORT_SYMBOL(sn_dma_set_mask);
  61
  62/**
  63 * sn_dma_alloc_coherent - allocate memory for coherent DMA
  64 * @dev: device to allocate for
  65 * @size: size of the region
  66 * @dma_handle: DMA (bus) address
  67 * @flags: memory allocation flags
  68 *
  69 * dma_alloc_coherent() returns a pointer to a memory region suitable for
  70 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
  71 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
  72 *
  73 * This interface is usually used for "command" streams (e.g. the command
  74 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
  75 * more information.
  76 */
  77static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
  78                                   dma_addr_t * dma_handle, gfp_t flags)
  79{
  80        void *cpuaddr;
  81        unsigned long phys_addr;
  82        int node;
  83        struct pci_dev *pdev = to_pci_dev(dev);
  84        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
  85
  86        BUG_ON(dev->bus != &pci_bus_type);
  87
  88        /*
  89         * Allocate the memory.
  90         */
  91        node = pcibus_to_node(pdev->bus);
  92        if (likely(node >=0)) {
  93                struct page *p = alloc_pages_exact_node(node,
  94                                                flags, get_order(size));
  95
  96                if (likely(p))
  97                        cpuaddr = page_address(p);
  98                else
  99                        return NULL;
 100        } else
 101                cpuaddr = (void *)__get_free_pages(flags, get_order(size));
 102
 103        if (unlikely(!cpuaddr))
 104                return NULL;
 105
 106        memset(cpuaddr, 0x0, size);
 107
 108        /* physical addr. of the memory we just got */
 109        phys_addr = __pa(cpuaddr);
 110
 111        /*
 112         * 64 bit address translations should never fail.
 113         * 32 bit translations can fail if there are insufficient mapping
 114         * resources.
 115         */
 116
 117        *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
 118                                                   SN_DMA_ADDR_PHYS);
 119        if (!*dma_handle) {
 120                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 121                free_pages((unsigned long)cpuaddr, get_order(size));
 122                return NULL;
 123        }
 124
 125        return cpuaddr;
 126}
 127
 128/**
 129 * sn_pci_free_coherent - free memory associated with coherent DMAable region
 130 * @dev: device to free for
 131 * @size: size to free
 132 * @cpu_addr: kernel virtual address to free
 133 * @dma_handle: DMA address associated with this region
 134 *
 135 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
 136 * any associated IOMMU mappings.
 137 */
 138static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 139                                 dma_addr_t dma_handle)
 140{
 141        struct pci_dev *pdev = to_pci_dev(dev);
 142        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 143
 144        BUG_ON(dev->bus != &pci_bus_type);
 145
 146        provider->dma_unmap(pdev, dma_handle, 0);
 147        free_pages((unsigned long)cpu_addr, get_order(size));
 148}
 149
 150/**
 151 * sn_dma_map_single_attrs - map a single page for DMA
 152 * @dev: device to map for
 153 * @cpu_addr: kernel virtual address of the region to map
 154 * @size: size of the region
 155 * @direction: DMA direction
 156 * @attrs: optional dma attributes
 157 *
 158 * Map the region pointed to by @cpu_addr for DMA and return the
 159 * DMA address.
 160 *
 161 * We map this to the one step pcibr_dmamap_trans interface rather than
 162 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
 163 * no way of saving the dmamap handle from the alloc to later free
 164 * (which is pretty much unacceptable).
 165 *
 166 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 167 * dma_map_consistent() so that writes force a flush of pending DMA.
 168 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 169 * Document Number: 007-4763-001)
 170 *
 171 * TODO: simplify our interface;
 172 *       figure out how to save dmamap handle so can use two step.
 173 */
 174static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
 175                                  unsigned long offset, size_t size,
 176                                  enum dma_data_direction dir,
 177                                  struct dma_attrs *attrs)
 178{
 179        void *cpu_addr = page_address(page) + offset;
 180        dma_addr_t dma_addr;
 181        unsigned long phys_addr;
 182        struct pci_dev *pdev = to_pci_dev(dev);
 183        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 184        int dmabarr;
 185
 186        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 187
 188        BUG_ON(dev->bus != &pci_bus_type);
 189
 190        phys_addr = __pa(cpu_addr);
 191        if (dmabarr)
 192                dma_addr = provider->dma_map_consistent(pdev, phys_addr,
 193                                                        size, SN_DMA_ADDR_PHYS);
 194        else
 195                dma_addr = provider->dma_map(pdev, phys_addr, size,
 196                                             SN_DMA_ADDR_PHYS);
 197
 198        if (!dma_addr) {
 199                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 200                return 0;
 201        }
 202        return dma_addr;
 203}
 204
 205/**
 206 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
 207 * @dev: device to sync
 208 * @dma_addr: DMA address to sync
 209 * @size: size of region
 210 * @direction: DMA direction
 211 * @attrs: optional dma attributes
 212 *
 213 * This routine is supposed to sync the DMA region specified
 214 * by @dma_handle into the coherence domain.  On SN, we're always cache
 215 * coherent, so we just need to free any ATEs associated with this mapping.
 216 */
 217static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 218                              size_t size, enum dma_data_direction dir,
 219                              struct dma_attrs *attrs)
 220{
 221        struct pci_dev *pdev = to_pci_dev(dev);
 222        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 223
 224        BUG_ON(dev->bus != &pci_bus_type);
 225
 226        provider->dma_unmap(pdev, dma_addr, dir);
 227}
 228
 229/**
 230 * sn_dma_unmap_sg - unmap a DMA scatterlist
 231 * @dev: device to unmap
 232 * @sg: scatterlist to unmap
 233 * @nhwentries: number of scatterlist entries
 234 * @direction: DMA direction
 235 * @attrs: optional dma attributes
 236 *
 237 * Unmap a set of streaming mode DMA translations.
 238 */
 239static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 240                            int nhwentries, enum dma_data_direction dir,
 241                            struct dma_attrs *attrs)
 242{
 243        int i;
 244        struct pci_dev *pdev = to_pci_dev(dev);
 245        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 246        struct scatterlist *sg;
 247
 248        BUG_ON(dev->bus != &pci_bus_type);
 249
 250        for_each_sg(sgl, sg, nhwentries, i) {
 251                provider->dma_unmap(pdev, sg->dma_address, dir);
 252                sg->dma_address = (dma_addr_t) NULL;
 253                sg->dma_length = 0;
 254        }
 255}
 256
 257/**
 258 * sn_dma_map_sg - map a scatterlist for DMA
 259 * @dev: device to map for
 260 * @sg: scatterlist to map
 261 * @nhwentries: number of entries
 262 * @direction: direction of the DMA transaction
 263 * @attrs: optional dma attributes
 264 *
 265 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 266 * dma_map_consistent() so that writes force a flush of pending DMA.
 267 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 268 * Document Number: 007-4763-001)
 269 *
 270 * Maps each entry of @sg for DMA.
 271 */
 272static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
 273                         int nhwentries, enum dma_data_direction dir,
 274                         struct dma_attrs *attrs)
 275{
 276        unsigned long phys_addr;
 277        struct scatterlist *saved_sg = sgl, *sg;
 278        struct pci_dev *pdev = to_pci_dev(dev);
 279        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 280        int i;
 281        int dmabarr;
 282
 283        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 284
 285        BUG_ON(dev->bus != &pci_bus_type);
 286
 287        /*
 288         * Setup a DMA address for each entry in the scatterlist.
 289         */
 290        for_each_sg(sgl, sg, nhwentries, i) {
 291                dma_addr_t dma_addr;
 292                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
 293                if (dmabarr)
 294                        dma_addr = provider->dma_map_consistent(pdev,
 295                                                                phys_addr,
 296                                                                sg->length,
 297                                                                SN_DMA_ADDR_PHYS);
 298                else
 299                        dma_addr = provider->dma_map(pdev, phys_addr,
 300                                                     sg->length,
 301                                                     SN_DMA_ADDR_PHYS);
 302
 303                sg->dma_address = dma_addr;
 304                if (!sg->dma_address) {
 305                        printk(KERN_ERR "%s: out of ATEs\n", __func__);
 306
 307                        /*
 308                         * Free any successfully allocated entries.
 309                         */
 310                        if (i > 0)
 311                                sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
 312                        return 0;
 313                }
 314
 315                sg->dma_length = sg->length;
 316        }
 317
 318        return nhwentries;
 319}
 320
 321static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 322                                       size_t size, enum dma_data_direction dir)
 323{
 324        BUG_ON(dev->bus != &pci_bus_type);
 325}
 326
 327static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 328                                          size_t size,
 329                                          enum dma_data_direction dir)
 330{
 331        BUG_ON(dev->bus != &pci_bus_type);
 332}
 333
 334static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 335                                   int nelems, enum dma_data_direction dir)
 336{
 337        BUG_ON(dev->bus != &pci_bus_type);
 338}
 339
 340static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 341                                      int nelems, enum dma_data_direction dir)
 342{
 343        BUG_ON(dev->bus != &pci_bus_type);
 344}
 345
 346static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 347{
 348        return 0;
 349}
 350
 351u64 sn_dma_get_required_mask(struct device *dev)
 352{
 353        return DMA_BIT_MASK(64);
 354}
 355EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
 356
 357char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 358{
 359        if (!SN_PCIBUS_BUSSOFT(bus))
 360                return ERR_PTR(-ENODEV);
 361
 362        return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
 363}
 364
 365int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 366{
 367        unsigned long addr;
 368        int ret;
 369        struct ia64_sal_retval isrv;
 370
 371        /*
 372         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 373         * around hw issues at the pci bus level.  SGI proms older than
 374         * 4.10 don't implement this.
 375         */
 376
 377        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 378                 pci_domain_nr(bus), bus->number,
 379                 0, /* io */
 380                 0, /* read */
 381                 port, size, __pa(val));
 382
 383        if (isrv.status == 0)
 384                return size;
 385
 386        /*
 387         * If the above failed, retry using the SAL_PROBE call which should
 388         * be present in all proms (but which cannot work round PCI chipset
 389         * bugs).  This code is retained for compatibility with old
 390         * pre-4.10 proms, and should be removed at some point in the future.
 391         */
 392
 393        if (!SN_PCIBUS_BUSSOFT(bus))
 394                return -ENODEV;
 395
 396        addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 397        addr += port;
 398
 399        ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
 400
 401        if (ret == 2)
 402                return -EINVAL;
 403
 404        if (ret == 1)
 405                *val = -1;
 406
 407        return size;
 408}
 409
 410int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 411{
 412        int ret = size;
 413        unsigned long paddr;
 414        unsigned long *addr;
 415        struct ia64_sal_retval isrv;
 416
 417        /*
 418         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 419         * around hw issues at the pci bus level.  SGI proms older than
 420         * 4.10 don't implement this.
 421         */
 422
 423        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 424                 pci_domain_nr(bus), bus->number,
 425                 0, /* io */
 426                 1, /* write */
 427                 port, size, __pa(&val));
 428
 429        if (isrv.status == 0)
 430                return size;
 431
 432        /*
 433         * If the above failed, retry using the SAL_PROBE call which should
 434         * be present in all proms (but which cannot work round PCI chipset
 435         * bugs).  This code is retained for compatibility with old
 436         * pre-4.10 proms, and should be removed at some point in the future.
 437         */
 438
 439        if (!SN_PCIBUS_BUSSOFT(bus)) {
 440                ret = -ENODEV;
 441                goto out;
 442        }
 443
 444        /* Put the phys addr in uncached space */
 445        paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 446        paddr += port;
 447        addr = (unsigned long *)paddr;
 448
 449        switch (size) {
 450        case 1:
 451                *(volatile u8 *)(addr) = (u8)(val);
 452                break;
 453        case 2:
 454                *(volatile u16 *)(addr) = (u16)(val);
 455                break;
 456        case 4:
 457                *(volatile u32 *)(addr) = (u32)(val);
 458                break;
 459        default:
 460                ret = -EINVAL;
 461                break;
 462        }
 463 out:
 464        return ret;
 465}
 466
 467static struct dma_map_ops sn_dma_ops = {
 468        .alloc_coherent         = sn_dma_alloc_coherent,
 469        .free_coherent          = sn_dma_free_coherent,
 470        .map_page               = sn_dma_map_page,
 471        .unmap_page             = sn_dma_unmap_page,
 472        .map_sg                 = sn_dma_map_sg,
 473        .unmap_sg               = sn_dma_unmap_sg,
 474        .sync_single_for_cpu    = sn_dma_sync_single_for_cpu,
 475        .sync_sg_for_cpu        = sn_dma_sync_sg_for_cpu,
 476        .sync_single_for_device = sn_dma_sync_single_for_device,
 477        .sync_sg_for_device     = sn_dma_sync_sg_for_device,
 478        .mapping_error          = sn_dma_mapping_error,
 479        .dma_supported          = sn_dma_supported,
 480};
 481
 482void sn_dma_init(void)
 483{
 484        dma_ops = &sn_dma_ops;
 485}
 486