linux/arch/alpha/kernel/pci-noop.c
<<
>>
Prefs
   1/*
   2 *      linux/arch/alpha/kernel/pci-noop.c
   3 *
   4 * Stub PCI interfaces for Jensen-specific kernels.
   5 */
   6
   7#include <linux/pci.h>
   8#include <linux/init.h>
   9#include <linux/bootmem.h>
  10#include <linux/gfp.h>
  11#include <linux/capability.h>
  12#include <linux/mm.h>
  13#include <linux/errno.h>
  14#include <linux/sched.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/scatterlist.h>
  17
  18#include "proto.h"
  19
  20
  21/*
  22 * The PCI controller list.
  23 */
  24
  25struct pci_controller *hose_head, **hose_tail = &hose_head;
  26struct pci_controller *pci_isa_hose;
  27
  28
  29struct pci_controller * __init
  30alloc_pci_controller(void)
  31{
  32        struct pci_controller *hose;
  33
  34        hose = alloc_bootmem(sizeof(*hose));
  35
  36        *hose_tail = hose;
  37        hose_tail = &hose->next;
  38
  39        return hose;
  40}
  41
  42struct resource * __init
  43alloc_resource(void)
  44{
  45        struct resource *res;
  46
  47        res = alloc_bootmem(sizeof(*res));
  48
  49        return res;
  50}
  51
  52asmlinkage long
  53sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
  54{
  55        struct pci_controller *hose;
  56
  57        /* from hose or from bus.devfn */
  58        if (which & IOBASE_FROM_HOSE) {
  59                for (hose = hose_head; hose; hose = hose->next) 
  60                        if (hose->index == bus)
  61                                break;
  62                if (!hose)
  63                        return -ENODEV;
  64        } else {
  65                /* Special hook for ISA access.  */
  66                if (bus == 0 && dfn == 0)
  67                        hose = pci_isa_hose;
  68                else
  69                        return -ENODEV;
  70        }
  71
  72        switch (which & ~IOBASE_FROM_HOSE) {
  73        case IOBASE_HOSE:
  74                return hose->index;
  75        case IOBASE_SPARSE_MEM:
  76                return hose->sparse_mem_base;
  77        case IOBASE_DENSE_MEM:
  78                return hose->dense_mem_base;
  79        case IOBASE_SPARSE_IO:
  80                return hose->sparse_io_base;
  81        case IOBASE_DENSE_IO:
  82                return hose->dense_io_base;
  83        case IOBASE_ROOT_BUS:
  84                return hose->bus->number;
  85        }
  86
  87        return -EOPNOTSUPP;
  88}
  89
  90asmlinkage long
  91sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  92                   unsigned long off, unsigned long len, void *buf)
  93{
  94        if (!capable(CAP_SYS_ADMIN))
  95                return -EPERM;
  96        else
  97                return -ENODEV;
  98}
  99
 100asmlinkage long
 101sys_pciconfig_write(unsigned long bus, unsigned long dfn,
 102                    unsigned long off, unsigned long len, void *buf)
 103{
 104        if (!capable(CAP_SYS_ADMIN))
 105                return -EPERM;
 106        else
 107                return -ENODEV;
 108}
 109
 110static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
 111                                       dma_addr_t *dma_handle, gfp_t gfp,
 112                                       struct dma_attrs *attrs)
 113{
 114        void *ret;
 115
 116        if (!dev || *dev->dma_mask >= 0xffffffffUL)
 117                gfp &= ~GFP_DMA;
 118        ret = (void *)__get_free_pages(gfp, get_order(size));
 119        if (ret) {
 120                memset(ret, 0, size);
 121                *dma_handle = virt_to_phys(ret);
 122        }
 123        return ret;
 124}
 125
 126static void alpha_noop_free_coherent(struct device *dev, size_t size,
 127                                     void *cpu_addr, dma_addr_t dma_addr,
 128                                     struct dma_attrs *attrs)
 129{
 130        free_pages((unsigned long)cpu_addr, get_order(size));
 131}
 132
 133static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
 134                                      unsigned long offset, size_t size,
 135                                      enum dma_data_direction dir,
 136                                      struct dma_attrs *attrs)
 137{
 138        return page_to_pa(page) + offset;
 139}
 140
 141static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 142                             enum dma_data_direction dir, struct dma_attrs *attrs)
 143{
 144        int i;
 145        struct scatterlist *sg;
 146
 147        for_each_sg(sgl, sg, nents, i) {
 148                void *va;
 149
 150                BUG_ON(!sg_page(sg));
 151                va = sg_virt(sg);
 152                sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
 153                sg_dma_len(sg) = sg->length;
 154        }
 155
 156        return nents;
 157}
 158
 159static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
 160{
 161        return 0;
 162}
 163
 164static int alpha_noop_supported(struct device *dev, u64 mask)
 165{
 166        return mask < 0x00ffffffUL ? 0 : 1;
 167}
 168
 169static int alpha_noop_set_mask(struct device *dev, u64 mask)
 170{
 171        if (!dev->dma_mask || !dma_supported(dev, mask))
 172                return -EIO;
 173
 174        *dev->dma_mask = mask;
 175        return 0;
 176}
 177
 178struct dma_map_ops alpha_noop_ops = {
 179        .alloc                  = alpha_noop_alloc_coherent,
 180        .free                   = alpha_noop_free_coherent,
 181        .map_page               = alpha_noop_map_page,
 182        .map_sg                 = alpha_noop_map_sg,
 183        .mapping_error          = alpha_noop_mapping_error,
 184        .dma_supported          = alpha_noop_supported,
 185        .set_dma_mask           = alpha_noop_set_mask,
 186};
 187
 188struct dma_map_ops *dma_ops = &alpha_noop_ops;
 189EXPORT_SYMBOL(dma_ops);
 190