linux/arch/sh/include/asm/io.h
<<
>>
Prefs
   1#ifndef __ASM_SH_IO_H
   2#define __ASM_SH_IO_H
   3/*
   4 * Convention:
   5 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
   6 *    while in{b,w,l}/out{b,w,l} are for ISA
   7 *
   8 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
   9 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
  10 *
  11 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
  12 * automatically, there are also __raw versions, which do not.
  13 *
  14 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
  15 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
  16 * these have the same semantics as the __raw variants, and as such, all
  17 * new code should be using the __raw versions.
  18 *
  19 * All ISA I/O routines are wrapped through the machine vector. If a
  20 * board does not provide overrides, a generic set that are copied in
  21 * from the default machine vector are used instead. These are largely
  22 * for old compat code for I/O offseting to SuperIOs, all of which are
  23 * better handled through the machvec ioport mapping routines these days.
  24 */
  25#include <asm/cache.h>
  26#include <asm/system.h>
  27#include <asm/addrspace.h>
  28#include <asm/machvec.h>
  29#include <asm/pgtable.h>
  30#include <asm-generic/iomap.h>
  31
  32#ifdef __KERNEL__
  33/*
  34 * Depending on which platform we are running on, we need different
  35 * I/O functions.
  36 */
  37#define __IO_PREFIX     generic
  38#include <asm/io_generic.h>
  39#include <asm/io_trapped.h>
  40
  41#define inb(p)                  sh_mv.mv_inb((p))
  42#define inw(p)                  sh_mv.mv_inw((p))
  43#define inl(p)                  sh_mv.mv_inl((p))
  44#define outb(x,p)               sh_mv.mv_outb((x),(p))
  45#define outw(x,p)               sh_mv.mv_outw((x),(p))
  46#define outl(x,p)               sh_mv.mv_outl((x),(p))
  47
  48#define inb_p(p)                sh_mv.mv_inb_p((p))
  49#define inw_p(p)                sh_mv.mv_inw_p((p))
  50#define inl_p(p)                sh_mv.mv_inl_p((p))
  51#define outb_p(x,p)             sh_mv.mv_outb_p((x),(p))
  52#define outw_p(x,p)             sh_mv.mv_outw_p((x),(p))
  53#define outl_p(x,p)             sh_mv.mv_outl_p((x),(p))
  54
  55#define insb(p,b,c)             sh_mv.mv_insb((p), (b), (c))
  56#define insw(p,b,c)             sh_mv.mv_insw((p), (b), (c))
  57#define insl(p,b,c)             sh_mv.mv_insl((p), (b), (c))
  58#define outsb(p,b,c)            sh_mv.mv_outsb((p), (b), (c))
  59#define outsw(p,b,c)            sh_mv.mv_outsw((p), (b), (c))
  60#define outsl(p,b,c)            sh_mv.mv_outsl((p), (b), (c))
  61
  62#define __raw_writeb(v,a)       (__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
  63#define __raw_writew(v,a)       (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
  64#define __raw_writel(v,a)       (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
  65#define __raw_writeq(v,a)       (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
  66
  67#define __raw_readb(a)          (__chk_io_ptr(a), *(volatile u8  __force *)(a))
  68#define __raw_readw(a)          (__chk_io_ptr(a), *(volatile u16 __force *)(a))
  69#define __raw_readl(a)          (__chk_io_ptr(a), *(volatile u32 __force *)(a))
  70#define __raw_readq(a)          (__chk_io_ptr(a), *(volatile u64 __force *)(a))
  71
  72#define readb(a)                ({ u8  r_ = __raw_readb(a); mb(); r_; })
  73#define readw(a)                ({ u16 r_ = __raw_readw(a); mb(); r_; })
  74#define readl(a)                ({ u32 r_ = __raw_readl(a); mb(); r_; })
  75#define readq(a)                ({ u64 r_ = __raw_readq(a); mb(); r_; })
  76
  77#define writeb(v,a)             ({ __raw_writeb((v),(a)); mb(); })
  78#define writew(v,a)             ({ __raw_writew((v),(a)); mb(); })
  79#define writel(v,a)             ({ __raw_writel((v),(a)); mb(); })
  80#define writeq(v,a)             ({ __raw_writeq((v),(a)); mb(); })
  81
  82/* SuperH on-chip I/O functions */
  83#define ctrl_inb                __raw_readb
  84#define ctrl_inw                __raw_readw
  85#define ctrl_inl                __raw_readl
  86#define ctrl_inq                __raw_readq
  87
  88#define ctrl_outb               __raw_writeb
  89#define ctrl_outw               __raw_writew
  90#define ctrl_outl               __raw_writel
  91#define ctrl_outq               __raw_writeq
  92
  93static inline void ctrl_delay(void)
  94{
  95#ifdef CONFIG_CPU_SH4
  96        __raw_readw(CCN_PVR);
  97#elif defined(P2SEG)
  98        __raw_readw(P2SEG);
  99#else
 100#error "Need a dummy address for delay"
 101#endif
 102}
 103
 104#define __BUILD_MEMORY_STRING(bwlq, type)                               \
 105                                                                        \
 106static inline void __raw_writes##bwlq(volatile void __iomem *mem,       \
 107                                const void *addr, unsigned int count)   \
 108{                                                                       \
 109        const volatile type *__addr = addr;                             \
 110                                                                        \
 111        while (count--) {                                               \
 112                __raw_write##bwlq(*__addr, mem);                        \
 113                __addr++;                                               \
 114        }                                                               \
 115}                                                                       \
 116                                                                        \
 117static inline void __raw_reads##bwlq(volatile void __iomem *mem,        \
 118                               void *addr, unsigned int count)          \
 119{                                                                       \
 120        volatile type *__addr = addr;                                   \
 121                                                                        \
 122        while (count--) {                                               \
 123                *__addr = __raw_read##bwlq(mem);                        \
 124                __addr++;                                               \
 125        }                                                               \
 126}
 127
 128__BUILD_MEMORY_STRING(b, u8)
 129__BUILD_MEMORY_STRING(w, u16)
 130
 131#ifdef CONFIG_SUPERH32
 132void __raw_writesl(void __iomem *addr, const void *data, int longlen);
 133void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 134#else
 135__BUILD_MEMORY_STRING(l, u32)
 136#endif
 137
 138__BUILD_MEMORY_STRING(q, u64)
 139
 140#define writesb                 __raw_writesb
 141#define writesw                 __raw_writesw
 142#define writesl                 __raw_writesl
 143
 144#define readsb                  __raw_readsb
 145#define readsw                  __raw_readsw
 146#define readsl                  __raw_readsl
 147
 148#define readb_relaxed(a)        readb(a)
 149#define readw_relaxed(a)        readw(a)
 150#define readl_relaxed(a)        readl(a)
 151#define readq_relaxed(a)        readq(a)
 152
 153#ifndef CONFIG_GENERIC_IOMAP
 154/* Simple MMIO */
 155#define ioread8(a)              __raw_readb(a)
 156#define ioread16(a)             __raw_readw(a)
 157#define ioread16be(a)           be16_to_cpu(__raw_readw((a)))
 158#define ioread32(a)             __raw_readl(a)
 159#define ioread32be(a)           be32_to_cpu(__raw_readl((a)))
 160
 161#define iowrite8(v,a)           __raw_writeb((v),(a))
 162#define iowrite16(v,a)          __raw_writew((v),(a))
 163#define iowrite16be(v,a)        __raw_writew(cpu_to_be16((v)),(a))
 164#define iowrite32(v,a)          __raw_writel((v),(a))
 165#define iowrite32be(v,a)        __raw_writel(cpu_to_be32((v)),(a))
 166
 167#define ioread8_rep(a, d, c)    __raw_readsb((a), (d), (c))
 168#define ioread16_rep(a, d, c)   __raw_readsw((a), (d), (c))
 169#define ioread32_rep(a, d, c)   __raw_readsl((a), (d), (c))
 170
 171#define iowrite8_rep(a, s, c)   __raw_writesb((a), (s), (c))
 172#define iowrite16_rep(a, s, c)  __raw_writesw((a), (s), (c))
 173#define iowrite32_rep(a, s, c)  __raw_writesl((a), (s), (c))
 174#endif
 175
 176#define mmio_insb(p,d,c)        __raw_readsb(p,d,c)
 177#define mmio_insw(p,d,c)        __raw_readsw(p,d,c)
 178#define mmio_insl(p,d,c)        __raw_readsl(p,d,c)
 179
 180#define mmio_outsb(p,s,c)       __raw_writesb(p,s,c)
 181#define mmio_outsw(p,s,c)       __raw_writesw(p,s,c)
 182#define mmio_outsl(p,s,c)       __raw_writesl(p,s,c)
 183
 184/* synco on SH-4A, otherwise a nop */
 185#define mmiowb()                wmb()
 186
 187#define IO_SPACE_LIMIT 0xffffffff
 188
 189extern unsigned long generic_io_base;
 190
 191/*
 192 * This function provides a method for the generic case where a
 193 * board-specific ioport_map simply needs to return the port + some
 194 * arbitrary port base.
 195 *
 196 * We use this at board setup time to implicitly set the port base, and
 197 * as a result, we can use the generic ioport_map.
 198 */
 199static inline void __set_io_port_base(unsigned long pbase)
 200{
 201        generic_io_base = pbase;
 202}
 203
 204#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
 205
 206/* We really want to try and get these to memcpy etc */
 207void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
 208void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
 209void memset_io(volatile void __iomem *, int, unsigned long);
 210
 211/* Quad-word real-mode I/O, don't ask.. */
 212unsigned long long peek_real_address_q(unsigned long long addr);
 213unsigned long long poke_real_address_q(unsigned long long addr,
 214                                       unsigned long long val);
 215
 216#if !defined(CONFIG_MMU)
 217#define virt_to_phys(address)   ((unsigned long)(address))
 218#define phys_to_virt(address)   ((void *)(address))
 219#else
 220#define virt_to_phys(address)   (__pa(address))
 221#define phys_to_virt(address)   (__va(address))
 222#endif
 223
 224/*
 225 * On 32-bit SH, we traditionally have the whole physical address space
 226 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
 227 * not need to do anything but place the address in the proper segment.
 228 * This is true for P1 and P2 addresses, as well as some P3 ones.
 229 * However, most of the P3 addresses and newer cores using extended
 230 * addressing need to map through page tables, so the ioremap()
 231 * implementation becomes a bit more complicated.
 232 *
 233 * See arch/sh/mm/ioremap.c for additional notes on this.
 234 *
 235 * We cheat a bit and always return uncachable areas until we've fixed
 236 * the drivers to handle caching properly.
 237 *
 238 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
 239 * doesn't exist, so everything must go through page tables.
 240 */
 241#ifdef CONFIG_MMU
 242void __iomem *__ioremap(unsigned long offset, unsigned long size,
 243                        unsigned long flags);
 244void __iounmap(void __iomem *addr);
 245
 246static inline void __iomem *
 247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
 248{
 249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
 250        unsigned long last_addr = offset + size - 1;
 251#endif
 252        void __iomem *ret;
 253
 254        ret = __ioremap_trapped(offset, size);
 255        if (ret)
 256                return ret;
 257
 258#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
 259        /*
 260         * For P1 and P2 space this is trivial, as everything is already
 261         * mapped. Uncached access for P1 addresses are done through P2.
 262         * In the P3 case or for addresses outside of the 29-bit space,
 263         * mapping must be done by the PMB or by using page tables.
 264         */
 265        if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
 266                if (unlikely(flags & _PAGE_CACHABLE))
 267                        return (void __iomem *)P1SEGADDR(offset);
 268
 269                return (void __iomem *)P2SEGADDR(offset);
 270        }
 271
 272        /* P4 above the store queues are always mapped. */
 273        if (unlikely(offset >= P3_ADDR_MAX))
 274                return (void __iomem *)P4SEGADDR(offset);
 275#endif
 276
 277        return __ioremap(offset, size, flags);
 278}
 279#else
 280#define __ioremap_mode(offset, size, flags)     ((void __iomem *)(offset))
 281#define __iounmap(addr)                         do { } while (0)
 282#endif /* CONFIG_MMU */
 283
 284#define ioremap(offset, size)                           \
 285        __ioremap_mode((offset), (size), 0)
 286#define ioremap_nocache(offset, size)                   \
 287        __ioremap_mode((offset), (size), 0)
 288#define ioremap_cache(offset, size)                     \
 289        __ioremap_mode((offset), (size), _PAGE_CACHABLE)
 290#define p3_ioremap(offset, size, flags)                 \
 291        __ioremap((offset), (size), (flags))
 292#define ioremap_prot(offset, size, flags)               \
 293        __ioremap_mode((offset), (size), (flags))
 294#define iounmap(addr)                                   \
 295        __iounmap((addr))
 296
 297#define maybebadio(port) \
 298        printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
 299               __func__, __LINE__, (port), (u32)__builtin_return_address(0))
 300
 301/*
 302 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 303 * access
 304 */
 305#define xlate_dev_mem_ptr(p)    __va(p)
 306
 307/*
 308 * Convert a virtual cached pointer to an uncached pointer
 309 */
 310#define xlate_dev_kmem_ptr(p)   p
 311
 312#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
 313int valid_phys_addr_range(unsigned long addr, size_t size);
 314int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 315
 316#endif /* __KERNEL__ */
 317
 318#endif /* __ASM_SH_IO_H */
 319