linux/arch/alpha/kernel/pci-noop.c
<<
>>
Prefs
   1/*
   2 *      linux/arch/alpha/kernel/pci-noop.c
   3 *
   4 * Stub PCI interfaces for Jensen-specific kernels.
   5 */
   6
   7#include <linux/pci.h>
   8#include <linux/init.h>
   9#include <linux/bootmem.h>
  10#include <linux/gfp.h>
  11#include <linux/capability.h>
  12#include <linux/mm.h>
  13#include <linux/errno.h>
  14#include <linux/sched.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/scatterlist.h>
  17
  18#include "proto.h"
  19
  20
  21/*
  22 * The PCI controller list.
  23 */
  24
  25struct pci_controller *hose_head, **hose_tail = &hose_head;
  26struct pci_controller *pci_isa_hose;
  27
  28
  29struct pci_controller * __init
  30alloc_pci_controller(void)
  31{
  32        struct pci_controller *hose;
  33
  34        hose = alloc_bootmem(sizeof(*hose));
  35
  36        *hose_tail = hose;
  37        hose_tail = &hose->next;
  38
  39        return hose;
  40}
  41
  42struct resource * __init
  43alloc_resource(void)
  44{
  45        struct resource *res;
  46
  47        res = alloc_bootmem(sizeof(*res));
  48
  49        return res;
  50}
  51
  52asmlinkage long
  53sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
  54{
  55        struct pci_controller *hose;
  56
  57        /* from hose or from bus.devfn */
  58        if (which & IOBASE_FROM_HOSE) {
  59                for (hose = hose_head; hose; hose = hose->next) 
  60                        if (hose->index == bus)
  61                                break;
  62                if (!hose)
  63                        return -ENODEV;
  64        } else {
  65                /* Special hook for ISA access.  */
  66                if (bus == 0 && dfn == 0)
  67                        hose = pci_isa_hose;
  68                else
  69                        return -ENODEV;
  70        }
  71
  72        switch (which & ~IOBASE_FROM_HOSE) {
  73        case IOBASE_HOSE:
  74                return hose->index;
  75        case IOBASE_SPARSE_MEM:
  76                return hose->sparse_mem_base;
  77        case IOBASE_DENSE_MEM:
  78                return hose->dense_mem_base;
  79        case IOBASE_SPARSE_IO:
  80                return hose->sparse_io_base;
  81        case IOBASE_DENSE_IO:
  82                return hose->dense_io_base;
  83        case IOBASE_ROOT_BUS:
  84                return hose->bus->number;
  85        }
  86
  87        return -EOPNOTSUPP;
  88}
  89
  90asmlinkage long
  91sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  92                   unsigned long off, unsigned long len, void *buf)
  93{
  94        if (!capable(CAP_SYS_ADMIN))
  95                return -EPERM;
  96        else
  97                return -ENODEV;
  98}
  99
 100asmlinkage long
 101sys_pciconfig_write(unsigned long bus, unsigned long dfn,
 102                    unsigned long off, unsigned long len, void *buf)
 103{
 104        if (!capable(CAP_SYS_ADMIN))
 105                return -EPERM;
 106        else
 107                return -ENODEV;
 108}
 109
 110static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
 111                                       dma_addr_t *dma_handle, gfp_t gfp)
 112{
 113        void *ret;
 114
 115        if (!dev || *dev->dma_mask >= 0xffffffffUL)
 116                gfp &= ~GFP_DMA;
 117        ret = (void *)__get_free_pages(gfp, get_order(size));
 118        if (ret) {
 119                memset(ret, 0, size);
 120                *dma_handle = virt_to_phys(ret);
 121        }
 122        return ret;
 123}
 124
 125static void alpha_noop_free_coherent(struct device *dev, size_t size,
 126                                     void *cpu_addr, dma_addr_t dma_addr)
 127{
 128        free_pages((unsigned long)cpu_addr, get_order(size));
 129}
 130
 131static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
 132                                      unsigned long offset, size_t size,
 133                                      enum dma_data_direction dir,
 134                                      struct dma_attrs *attrs)
 135{
 136        return page_to_pa(page) + offset;
 137}
 138
 139static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 140                             enum dma_data_direction dir, struct dma_attrs *attrs)
 141{
 142        int i;
 143        struct scatterlist *sg;
 144
 145        for_each_sg(sgl, sg, nents, i) {
 146                void *va;
 147
 148                BUG_ON(!sg_page(sg));
 149                va = sg_virt(sg);
 150                sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
 151                sg_dma_len(sg) = sg->length;
 152        }
 153
 154        return nents;
 155}
 156
 157static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
 158{
 159        return 0;
 160}
 161
 162static int alpha_noop_supported(struct device *dev, u64 mask)
 163{
 164        return mask < 0x00ffffffUL ? 0 : 1;
 165}
 166
 167static int alpha_noop_set_mask(struct device *dev, u64 mask)
 168{
 169        if (!dev->dma_mask || !dma_supported(dev, mask))
 170                return -EIO;
 171
 172        *dev->dma_mask = mask;
 173        return 0;
 174}
 175
 176struct dma_map_ops alpha_noop_ops = {
 177        .alloc_coherent         = alpha_noop_alloc_coherent,
 178        .free_coherent          = alpha_noop_free_coherent,
 179        .map_page               = alpha_noop_map_page,
 180        .map_sg                 = alpha_noop_map_sg,
 181        .mapping_error          = alpha_noop_mapping_error,
 182        .dma_supported          = alpha_noop_supported,
 183        .set_dma_mask           = alpha_noop_set_mask,
 184};
 185
 186struct dma_map_ops *dma_ops = &alpha_noop_ops;
 187EXPORT_SYMBOL(dma_ops);
 188
 189void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 190{
 191        return NULL;
 192}
 193
 194void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 195{
 196}
 197
 198EXPORT_SYMBOL(pci_iomap);
 199EXPORT_SYMBOL(pci_iounmap);
 200