linux/arch/parisc/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef _PARISC_DMA_MAPPING_H
   2#define _PARISC_DMA_MAPPING_H
   3
   4#include <linux/mm.h>
   5#include <asm/cacheflush.h>
   6#include <asm/scatterlist.h>
   7
   8/* See Documentation/PCI/PCI-DMA-mapping.txt */
   9struct hppa_dma_ops {
  10        int  (*dma_supported)(struct device *dev, u64 mask);
  11        void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
  12        void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
  13        void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
  14        dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
  15        void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
  16        int  (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
  17        void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
  18        void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
  19        void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
  20        void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
  21        void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
  22};
  23
  24/*
  25** We could live without the hppa_dma_ops indirection if we didn't want
  26** to support 4 different coherent dma models with one binary (they will
  27** someday be loadable modules):
  28**     I/O MMU        consistent method           dma_sync behavior
  29**  =============   ======================       =======================
  30**  a) PA-7x00LC    uncachable host memory          flush/purge
  31**  b) U2/Uturn      cachable host memory              NOP
  32**  c) Ike/Astro     cachable host memory              NOP
  33**  d) EPIC/SAGA     memory on EPIC/SAGA         flush/reset DMA channel
  34**
  35** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
  36**
  37** Systems (eg PCX-T workstations) that don't fall into the above
  38** categories will need to modify the needed drivers to perform
  39** flush/purge and allocate "regular" cacheable pages for everything.
  40*/
  41
  42#ifdef CONFIG_PA11
  43extern struct hppa_dma_ops pcxl_dma_ops;
  44extern struct hppa_dma_ops pcx_dma_ops;
  45#endif
  46
  47extern struct hppa_dma_ops *hppa_dma_ops;
  48
  49static inline void *
  50dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  51                   gfp_t flag)
  52{
  53        return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
  54}
  55
  56static inline void *
  57dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  58                      gfp_t flag)
  59{
  60        return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
  61}
  62
  63static inline void
  64dma_free_coherent(struct device *dev, size_t size, 
  65                    void *vaddr, dma_addr_t dma_handle)
  66{
  67        hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
  68}
  69
  70static inline void
  71dma_free_noncoherent(struct device *dev, size_t size, 
  72                    void *vaddr, dma_addr_t dma_handle)
  73{
  74        hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
  75}
  76
  77static inline dma_addr_t
  78dma_map_single(struct device *dev, void *ptr, size_t size,
  79               enum dma_data_direction direction)
  80{
  81        return hppa_dma_ops->map_single(dev, ptr, size, direction);
  82}
  83
  84static inline void
  85dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  86                 enum dma_data_direction direction)
  87{
  88        hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
  89}
  90
  91static inline int
  92dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  93           enum dma_data_direction direction)
  94{
  95        return hppa_dma_ops->map_sg(dev, sg, nents, direction);
  96}
  97
  98static inline void
  99dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
 100             enum dma_data_direction direction)
 101{
 102        hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
 103}
 104
 105static inline dma_addr_t
 106dma_map_page(struct device *dev, struct page *page, unsigned long offset,
 107             size_t size, enum dma_data_direction direction)
 108{
 109        return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
 110}
 111
 112static inline void
 113dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
 114               enum dma_data_direction direction)
 115{
 116        dma_unmap_single(dev, dma_address, size, direction);
 117}
 118
 119
 120static inline void
 121dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
 122                enum dma_data_direction direction)
 123{
 124        if(hppa_dma_ops->dma_sync_single_for_cpu)
 125                hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
 126}
 127
 128static inline void
 129dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
 130                enum dma_data_direction direction)
 131{
 132        if(hppa_dma_ops->dma_sync_single_for_device)
 133                hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
 134}
 135
 136static inline void
 137dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
 138                      unsigned long offset, size_t size,
 139                      enum dma_data_direction direction)
 140{
 141        if(hppa_dma_ops->dma_sync_single_for_cpu)
 142                hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
 143}
 144
 145static inline void
 146dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 147                      unsigned long offset, size_t size,
 148                      enum dma_data_direction direction)
 149{
 150        if(hppa_dma_ops->dma_sync_single_for_device)
 151                hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
 152}
 153
 154static inline void
 155dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 156                 enum dma_data_direction direction)
 157{
 158        if(hppa_dma_ops->dma_sync_sg_for_cpu)
 159                hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
 160}
 161
 162static inline void
 163dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 164                 enum dma_data_direction direction)
 165{
 166        if(hppa_dma_ops->dma_sync_sg_for_device)
 167                hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
 168}
 169
 170static inline int
 171dma_supported(struct device *dev, u64 mask)
 172{
 173        return hppa_dma_ops->dma_supported(dev, mask);
 174}
 175
 176static inline int
 177dma_set_mask(struct device *dev, u64 mask)
 178{
 179        if(!dev->dma_mask || !dma_supported(dev, mask))
 180                return -EIO;
 181
 182        *dev->dma_mask = mask;
 183
 184        return 0;
 185}
 186
 187static inline int
 188dma_get_cache_alignment(void)
 189{
 190        return dcache_stride;
 191}
 192
 193static inline int
 194dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 195{
 196        return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
 197}
 198
 199static inline void
 200dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 201               enum dma_data_direction direction)
 202{
 203        if(hppa_dma_ops->dma_sync_single_for_cpu)
 204                flush_kernel_dcache_range((unsigned long)vaddr, size);
 205}
 206
 207static inline void *
 208parisc_walk_tree(struct device *dev)
 209{
 210        struct device *otherdev;
 211        if(likely(dev->platform_data != NULL))
 212                return dev->platform_data;
 213        /* OK, just traverse the bus to find it */
 214        for(otherdev = dev->parent; otherdev;
 215            otherdev = otherdev->parent) {
 216                if(otherdev->platform_data) {
 217                        dev->platform_data = otherdev->platform_data;
 218                        break;
 219                }
 220        }
 221        BUG_ON(!dev->platform_data);
 222        return dev->platform_data;
 223}
 224                
 225#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu);  
 226        
 227
 228#ifdef CONFIG_IOMMU_CCIO
 229struct parisc_device;
 230struct ioc;
 231void * ccio_get_iommu(const struct parisc_device *dev);
 232int ccio_request_resource(const struct parisc_device *dev,
 233                struct resource *res);
 234int ccio_allocate_resource(const struct parisc_device *dev,
 235                struct resource *res, unsigned long size,
 236                unsigned long min, unsigned long max, unsigned long align);
 237#else /* !CONFIG_IOMMU_CCIO */
 238#define ccio_get_iommu(dev) NULL
 239#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
 240#define ccio_allocate_resource(dev, res, size, min, max, align) \
 241                allocate_resource(&iomem_resource, res, size, min, max, \
 242                                align, NULL, NULL)
 243#endif /* !CONFIG_IOMMU_CCIO */
 244
 245#ifdef CONFIG_IOMMU_SBA
 246struct parisc_device;
 247void * sba_get_iommu(struct parisc_device *dev);
 248#endif
 249
 250/* At the moment, we panic on error for IOMMU resource exaustion */
 251#define dma_mapping_error(dev, x)       0
 252
 253#endif
 254