linux/arch/ia64/sn/pci/pci_dma.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
   7 *
   8 * Routines for PCI DMA mapping.  See Documentation/DMA-API.txt for
   9 * a description of how these routines should be used.
  10 */
  11
  12#include <linux/gfp.h>
  13#include <linux/module.h>
  14#include <linux/dma-mapping.h>
  15#include <asm/dma.h>
  16#include <asm/sn/intr.h>
  17#include <asm/sn/pcibus_provider_defs.h>
  18#include <asm/sn/pcidev.h>
  19#include <asm/sn/sn_sal.h>
  20
  21#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
  22#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
  23
  24/**
  25 * sn_dma_supported - test a DMA mask
  26 * @dev: device to test
  27 * @mask: DMA mask to test
  28 *
  29 * Return whether the given PCI device DMA address mask can be supported
  30 * properly.  For example, if your device can only drive the low 24-bits
  31 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
  32 * this function.  Of course, SN only supports devices that have 32 or more
  33 * address bits when using the PMU.
  34 */
  35static int sn_dma_supported(struct device *dev, u64 mask)
  36{
  37        BUG_ON(dev->bus != &pci_bus_type);
  38
  39        if (mask < 0x7fffffff)
  40                return 0;
  41        return 1;
  42}
  43
  44/**
  45 * sn_dma_set_mask - set the DMA mask
  46 * @dev: device to set
  47 * @dma_mask: new mask
  48 *
  49 * Set @dev's DMA mask if the hw supports it.
  50 */
  51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
  52{
  53        BUG_ON(dev->bus != &pci_bus_type);
  54
  55        if (!sn_dma_supported(dev, dma_mask))
  56                return 0;
  57
  58        *dev->dma_mask = dma_mask;
  59        return 1;
  60}
  61EXPORT_SYMBOL(sn_dma_set_mask);
  62
  63/**
  64 * sn_dma_alloc_coherent - allocate memory for coherent DMA
  65 * @dev: device to allocate for
  66 * @size: size of the region
  67 * @dma_handle: DMA (bus) address
  68 * @flags: memory allocation flags
  69 *
  70 * dma_alloc_coherent() returns a pointer to a memory region suitable for
  71 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
  72 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
  73 *
  74 * This interface is usually used for "command" streams (e.g. the command
  75 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
  76 * more information.
  77 */
  78static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
  79                                   dma_addr_t * dma_handle, gfp_t flags)
  80{
  81        void *cpuaddr;
  82        unsigned long phys_addr;
  83        int node;
  84        struct pci_dev *pdev = to_pci_dev(dev);
  85        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
  86
  87        BUG_ON(dev->bus != &pci_bus_type);
  88
  89        /*
  90         * Allocate the memory.
  91         */
  92        node = pcibus_to_node(pdev->bus);
  93        if (likely(node >=0)) {
  94                struct page *p = alloc_pages_exact_node(node,
  95                                                flags, get_order(size));
  96
  97                if (likely(p))
  98                        cpuaddr = page_address(p);
  99                else
 100                        return NULL;
 101        } else
 102                cpuaddr = (void *)__get_free_pages(flags, get_order(size));
 103
 104        if (unlikely(!cpuaddr))
 105                return NULL;
 106
 107        memset(cpuaddr, 0x0, size);
 108
 109        /* physical addr. of the memory we just got */
 110        phys_addr = __pa(cpuaddr);
 111
 112        /*
 113         * 64 bit address translations should never fail.
 114         * 32 bit translations can fail if there are insufficient mapping
 115         * resources.
 116         */
 117
 118        *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
 119                                                   SN_DMA_ADDR_PHYS);
 120        if (!*dma_handle) {
 121                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 122                free_pages((unsigned long)cpuaddr, get_order(size));
 123                return NULL;
 124        }
 125
 126        return cpuaddr;
 127}
 128
 129/**
 130 * sn_pci_free_coherent - free memory associated with coherent DMAable region
 131 * @dev: device to free for
 132 * @size: size to free
 133 * @cpu_addr: kernel virtual address to free
 134 * @dma_handle: DMA address associated with this region
 135 *
 136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
 137 * any associated IOMMU mappings.
 138 */
 139static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 140                                 dma_addr_t dma_handle)
 141{
 142        struct pci_dev *pdev = to_pci_dev(dev);
 143        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 144
 145        BUG_ON(dev->bus != &pci_bus_type);
 146
 147        provider->dma_unmap(pdev, dma_handle, 0);
 148        free_pages((unsigned long)cpu_addr, get_order(size));
 149}
 150
 151/**
 152 * sn_dma_map_single_attrs - map a single page for DMA
 153 * @dev: device to map for
 154 * @cpu_addr: kernel virtual address of the region to map
 155 * @size: size of the region
 156 * @direction: DMA direction
 157 * @attrs: optional dma attributes
 158 *
 159 * Map the region pointed to by @cpu_addr for DMA and return the
 160 * DMA address.
 161 *
 162 * We map this to the one step pcibr_dmamap_trans interface rather than
 163 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
 164 * no way of saving the dmamap handle from the alloc to later free
 165 * (which is pretty much unacceptable).
 166 *
 167 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 168 * dma_map_consistent() so that writes force a flush of pending DMA.
 169 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 170 * Document Number: 007-4763-001)
 171 *
 172 * TODO: simplify our interface;
 173 *       figure out how to save dmamap handle so can use two step.
 174 */
 175static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
 176                                  unsigned long offset, size_t size,
 177                                  enum dma_data_direction dir,
 178                                  struct dma_attrs *attrs)
 179{
 180        void *cpu_addr = page_address(page) + offset;
 181        dma_addr_t dma_addr;
 182        unsigned long phys_addr;
 183        struct pci_dev *pdev = to_pci_dev(dev);
 184        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 185        int dmabarr;
 186
 187        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 188
 189        BUG_ON(dev->bus != &pci_bus_type);
 190
 191        phys_addr = __pa(cpu_addr);
 192        if (dmabarr)
 193                dma_addr = provider->dma_map_consistent(pdev, phys_addr,
 194                                                        size, SN_DMA_ADDR_PHYS);
 195        else
 196                dma_addr = provider->dma_map(pdev, phys_addr, size,
 197                                             SN_DMA_ADDR_PHYS);
 198
 199        if (!dma_addr) {
 200                printk(KERN_ERR "%s: out of ATEs\n", __func__);
 201                return 0;
 202        }
 203        return dma_addr;
 204}
 205
 206/**
 207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
 208 * @dev: device to sync
 209 * @dma_addr: DMA address to sync
 210 * @size: size of region
 211 * @direction: DMA direction
 212 * @attrs: optional dma attributes
 213 *
 214 * This routine is supposed to sync the DMA region specified
 215 * by @dma_handle into the coherence domain.  On SN, we're always cache
 216 * coherent, so we just need to free any ATEs associated with this mapping.
 217 */
 218static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 219                              size_t size, enum dma_data_direction dir,
 220                              struct dma_attrs *attrs)
 221{
 222        struct pci_dev *pdev = to_pci_dev(dev);
 223        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 224
 225        BUG_ON(dev->bus != &pci_bus_type);
 226
 227        provider->dma_unmap(pdev, dma_addr, dir);
 228}
 229
 230/**
 231 * sn_dma_unmap_sg - unmap a DMA scatterlist
 232 * @dev: device to unmap
 233 * @sg: scatterlist to unmap
 234 * @nhwentries: number of scatterlist entries
 235 * @direction: DMA direction
 236 * @attrs: optional dma attributes
 237 *
 238 * Unmap a set of streaming mode DMA translations.
 239 */
 240static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 241                            int nhwentries, enum dma_data_direction dir,
 242                            struct dma_attrs *attrs)
 243{
 244        int i;
 245        struct pci_dev *pdev = to_pci_dev(dev);
 246        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 247        struct scatterlist *sg;
 248
 249        BUG_ON(dev->bus != &pci_bus_type);
 250
 251        for_each_sg(sgl, sg, nhwentries, i) {
 252                provider->dma_unmap(pdev, sg->dma_address, dir);
 253                sg->dma_address = (dma_addr_t) NULL;
 254                sg->dma_length = 0;
 255        }
 256}
 257
 258/**
 259 * sn_dma_map_sg - map a scatterlist for DMA
 260 * @dev: device to map for
 261 * @sg: scatterlist to map
 262 * @nhwentries: number of entries
 263 * @direction: direction of the DMA transaction
 264 * @attrs: optional dma attributes
 265 *
 266 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
 267 * dma_map_consistent() so that writes force a flush of pending DMA.
 268 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
 269 * Document Number: 007-4763-001)
 270 *
 271 * Maps each entry of @sg for DMA.
 272 */
 273static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
 274                         int nhwentries, enum dma_data_direction dir,
 275                         struct dma_attrs *attrs)
 276{
 277        unsigned long phys_addr;
 278        struct scatterlist *saved_sg = sgl, *sg;
 279        struct pci_dev *pdev = to_pci_dev(dev);
 280        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 281        int i;
 282        int dmabarr;
 283
 284        dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 285
 286        BUG_ON(dev->bus != &pci_bus_type);
 287
 288        /*
 289         * Setup a DMA address for each entry in the scatterlist.
 290         */
 291        for_each_sg(sgl, sg, nhwentries, i) {
 292                dma_addr_t dma_addr;
 293                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
 294                if (dmabarr)
 295                        dma_addr = provider->dma_map_consistent(pdev,
 296                                                                phys_addr,
 297                                                                sg->length,
 298                                                                SN_DMA_ADDR_PHYS);
 299                else
 300                        dma_addr = provider->dma_map(pdev, phys_addr,
 301                                                     sg->length,
 302                                                     SN_DMA_ADDR_PHYS);
 303
 304                sg->dma_address = dma_addr;
 305                if (!sg->dma_address) {
 306                        printk(KERN_ERR "%s: out of ATEs\n", __func__);
 307
 308                        /*
 309                         * Free any successfully allocated entries.
 310                         */
 311                        if (i > 0)
 312                                sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
 313                        return 0;
 314                }
 315
 316                sg->dma_length = sg->length;
 317        }
 318
 319        return nhwentries;
 320}
 321
 322static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 323                                       size_t size, enum dma_data_direction dir)
 324{
 325        BUG_ON(dev->bus != &pci_bus_type);
 326}
 327
 328static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 329                                          size_t size,
 330                                          enum dma_data_direction dir)
 331{
 332        BUG_ON(dev->bus != &pci_bus_type);
 333}
 334
 335static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 336                                   int nelems, enum dma_data_direction dir)
 337{
 338        BUG_ON(dev->bus != &pci_bus_type);
 339}
 340
 341static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 342                                      int nelems, enum dma_data_direction dir)
 343{
 344        BUG_ON(dev->bus != &pci_bus_type);
 345}
 346
 347static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 348{
 349        return 0;
 350}
 351
 352u64 sn_dma_get_required_mask(struct device *dev)
 353{
 354        return DMA_BIT_MASK(64);
 355}
 356EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
 357
 358char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 359{
 360        if (!SN_PCIBUS_BUSSOFT(bus))
 361                return ERR_PTR(-ENODEV);
 362
 363        return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
 364}
 365
 366int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 367{
 368        unsigned long addr;
 369        int ret;
 370        struct ia64_sal_retval isrv;
 371
 372        /*
 373         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 374         * around hw issues at the pci bus level.  SGI proms older than
 375         * 4.10 don't implement this.
 376         */
 377
 378        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 379                 pci_domain_nr(bus), bus->number,
 380                 0, /* io */
 381                 0, /* read */
 382                 port, size, __pa(val));
 383
 384        if (isrv.status == 0)
 385                return size;
 386
 387        /*
 388         * If the above failed, retry using the SAL_PROBE call which should
 389         * be present in all proms (but which cannot work round PCI chipset
 390         * bugs).  This code is retained for compatibility with old
 391         * pre-4.10 proms, and should be removed at some point in the future.
 392         */
 393
 394        if (!SN_PCIBUS_BUSSOFT(bus))
 395                return -ENODEV;
 396
 397        addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 398        addr += port;
 399
 400        ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
 401
 402        if (ret == 2)
 403                return -EINVAL;
 404
 405        if (ret == 1)
 406                *val = -1;
 407
 408        return size;
 409}
 410
 411int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 412{
 413        int ret = size;
 414        unsigned long paddr;
 415        unsigned long *addr;
 416        struct ia64_sal_retval isrv;
 417
 418        /*
 419         * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
 420         * around hw issues at the pci bus level.  SGI proms older than
 421         * 4.10 don't implement this.
 422         */
 423
 424        SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
 425                 pci_domain_nr(bus), bus->number,
 426                 0, /* io */
 427                 1, /* write */
 428                 port, size, __pa(&val));
 429
 430        if (isrv.status == 0)
 431                return size;
 432
 433        /*
 434         * If the above failed, retry using the SAL_PROBE call which should
 435         * be present in all proms (but which cannot work round PCI chipset
 436         * bugs).  This code is retained for compatibility with old
 437         * pre-4.10 proms, and should be removed at some point in the future.
 438         */
 439
 440        if (!SN_PCIBUS_BUSSOFT(bus)) {
 441                ret = -ENODEV;
 442                goto out;
 443        }
 444
 445        /* Put the phys addr in uncached space */
 446        paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
 447        paddr += port;
 448        addr = (unsigned long *)paddr;
 449
 450        switch (size) {
 451        case 1:
 452                *(volatile u8 *)(addr) = (u8)(val);
 453                break;
 454        case 2:
 455                *(volatile u16 *)(addr) = (u16)(val);
 456                break;
 457        case 4:
 458                *(volatile u32 *)(addr) = (u32)(val);
 459                break;
 460        default:
 461                ret = -EINVAL;
 462                break;
 463        }
 464 out:
 465        return ret;
 466}
 467
 468static struct dma_map_ops sn_dma_ops = {
 469        .alloc_coherent         = sn_dma_alloc_coherent,
 470        .free_coherent          = sn_dma_free_coherent,
 471        .map_page               = sn_dma_map_page,
 472        .unmap_page             = sn_dma_unmap_page,
 473        .map_sg                 = sn_dma_map_sg,
 474        .unmap_sg               = sn_dma_unmap_sg,
 475        .sync_single_for_cpu    = sn_dma_sync_single_for_cpu,
 476        .sync_sg_for_cpu        = sn_dma_sync_sg_for_cpu,
 477        .sync_single_for_device = sn_dma_sync_single_for_device,
 478        .sync_sg_for_device     = sn_dma_sync_sg_for_device,
 479        .mapping_error          = sn_dma_mapping_error,
 480        .dma_supported          = sn_dma_supported,
 481};
 482
 483void sn_dma_init(void)
 484{
 485        dma_ops = &sn_dma_ops;
 486}
 487