linux/arch/mips/include/asm/mach-powertv/ioremap.h
<<
>>
Prefs
   1/*
   2 *      This program is free software; you can redistribute it and/or
   3 *      modify it under the terms of the GNU General Public License
   4 *      as published by the Free Software Foundation; either version
   5 *      2 of the License, or (at your option) any later version.
   6 *
   7 * Portions Copyright (C)  Cisco Systems, Inc.
   8 */
   9#ifndef __ASM_MACH_POWERTV_IOREMAP_H
  10#define __ASM_MACH_POWERTV_IOREMAP_H
  11
  12#include <linux/types.h>
  13#include <linux/log2.h>
  14#include <linux/compiler.h>
  15
  16#include <asm/pgtable-bits.h>
  17#include <asm/addrspace.h>
  18
  19/* We're going to mess with bits, so get sizes */
  20#define IOR_BPC                 8                       /* Bits per char */
  21#define IOR_PHYS_BITS           (IOR_BPC * sizeof(phys_addr_t))
  22#define IOR_DMA_BITS            (IOR_BPC * sizeof(dma_addr_t))
  23
  24/*
  25 * Define the granularity of physical/DMA mapping in terms of the number
  26 * of bits that defines the offset within a grain. These will be the
  27 * least significant bits of the address. The rest of a physical or DMA
  28 * address will be used to index into an appropriate table to find the
  29 * offset to add to the address to yield the corresponding DMA or physical
  30 * address, respectively.
  31 */
  32#define IOR_LSBITS              22                      /* Bits in a grain */
  33
  34/*
  35 * Compute the number of most significant address bits after removing those
  36 * used for the offset within a grain and then compute the number of table
  37 * entries for the conversion.
  38 */
  39#define IOR_PHYS_MSBITS         (IOR_PHYS_BITS - IOR_LSBITS)
  40#define IOR_NUM_PHYS_TO_DMA     ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
  41
  42#define IOR_DMA_MSBITS          (IOR_DMA_BITS - IOR_LSBITS)
  43#define IOR_NUM_DMA_TO_PHYS     ((dma_addr_t) 1 << IOR_DMA_MSBITS)
  44
  45/*
  46 * Define data structures used as elements in the arrays for the conversion
  47 * between physical and DMA addresses. We do some slightly fancy math to
  48 * compute the width of the offset element of the conversion tables so
  49 * that we can have the smallest conversion tables. Next, round up the
  50 * sizes to the next higher power of two, i.e. the offset element will have
  51 * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
  52 * bits.  Finally, we compute a shift value that puts the most significant
  53 * bits of the offset into the most significant bits of the offset element.
  54 * This makes it more efficient on processors without barrel shifters and
  55 * easier to see the values if the conversion table is dumped in binary.
  56 */
  57#define _IOR_OFFSET_WIDTH(n)    (1 << order_base_2(n))
  58#define IOR_OFFSET_WIDTH(n) \
  59        (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
  60
  61#define IOR_PHYS_OFFSET_BITS    IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
  62#define IOR_PHYS_SHIFT          (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
  63
  64#define IOR_DMA_OFFSET_BITS     IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
  65#define IOR_DMA_SHIFT           (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
  66
  67struct ior_phys_to_dma {
  68        dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
  69                __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
  70};
  71
  72struct ior_dma_to_phys {
  73        dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
  74                __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
  75};
  76
  77extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
  78extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
  79
  80static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
  81{
  82        return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
  83}
  84
  85static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
  86{
  87        return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
  88}
  89
  90/* These are not portable and should not be used in drivers. Drivers should
  91 * be using ioremap() and friends to map physical addresses to virtual
  92 * addresses and dma_map*() and friends to map virtual addresses into DMA
  93 * addresses and back.
  94 */
  95static inline dma_addr_t phys_to_dma(phys_addr_t phys)
  96{
  97        return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
  98}
  99
 100static inline phys_addr_t dma_to_phys(dma_addr_t dma)
 101{
 102        return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
 103}
 104
 105extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
 106        dma_addr_t size);
 107
 108/*
 109 * Allow physical addresses to be fixed up to help peripherals located
 110 * outside the low 32-bit range -- generic pass-through version.
 111 */
 112static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
 113{
 114        return phys_addr;
 115}
 116
 117/*
 118 * Handle the special case of addresses the area aliased into the first
 119 * 512 MiB of the processor's physical address space. These turn into either
 120 * kseg0 or kseg1 addresses, depending on flags.
 121 */
 122static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
 123        unsigned long flags)
 124{
 125        phys_addr_t start_offset;
 126        void __iomem *result = NULL;
 127
 128        /* Start by checking to see whether this is an aliased address */
 129        start_offset = _dma_to_phys_offset_raw(start);
 130
 131        /*
 132         * If:
 133         * o    the memory is aliased into the first 512 MiB, and
 134         * o    the start and end are in the same RAM bank, and
 135         * o    we don't have a zero size or wrap around, and
 136         * o    we are supposed to create an uncached mapping,
 137         *      handle this is a kseg0 or kseg1 address
 138         */
 139        if (start_offset != 0) {
 140                phys_addr_t last;
 141                dma_addr_t dma_to_phys_offset;
 142
 143                last = start + size - 1;
 144                dma_to_phys_offset =
 145                        _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
 146
 147                if (dma_to_phys_offset == start_offset &&
 148                        size != 0 && start <= last) {
 149                        phys_t adjusted_start;
 150                        adjusted_start = start + start_offset;
 151                        if (flags == _CACHE_UNCACHED)
 152                                result = (void __iomem *) (unsigned long)
 153                                        CKSEG1ADDR(adjusted_start);
 154                        else
 155                                result = (void __iomem *) (unsigned long)
 156                                        CKSEG0ADDR(adjusted_start);
 157                }
 158        }
 159
 160        return result;
 161}
 162
 163static inline int plat_iounmap(const volatile void __iomem *addr)
 164{
 165        return 0;
 166}
 167#endif /* __ASM_MACH_POWERTV_IOREMAP_H */
 168