linux/arch/xtensa/include/asm/io.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/io.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2001 - 2005 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_IO_H
  12#define _XTENSA_IO_H
  13
  14#ifdef __KERNEL__
  15#include <asm/byteorder.h>
  16#include <asm/page.h>
  17#include <linux/kernel.h>
  18
  19#include <linux/types.h>
  20
  21#define XCHAL_KIO_CACHED_VADDR  0xe0000000
  22#define XCHAL_KIO_BYPASS_VADDR  0xf0000000
  23#define XCHAL_KIO_PADDR         0xf0000000
  24#define XCHAL_KIO_SIZE          0x10000000
  25
  26#define IOADDR(x)               (XCHAL_KIO_BYPASS_VADDR + (x))
  27
  28/*
  29 * swap functions to change byte order from little-endian to big-endian and
  30 * vice versa.
  31 */
  32
  33static inline unsigned short _swapw (unsigned short v)
  34{
  35        return (v << 8) | (v >> 8);
  36}
  37
  38static inline unsigned int _swapl (unsigned int v)
  39{
  40        return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
  41}
  42
  43/*
  44 * Change virtual addresses to physical addresses and vv.
  45 * These are trivial on the 1:1 Linux/Xtensa mapping
  46 */
  47
  48static inline unsigned long virt_to_phys(volatile void * address)
  49{
  50        return __pa(address);
  51}
  52
  53static inline void * phys_to_virt(unsigned long address)
  54{
  55        return __va(address);
  56}
  57
  58/*
  59 * virt_to_bus and bus_to_virt are deprecated.
  60 */
  61
  62#define virt_to_bus(x)  virt_to_phys(x)
  63#define bus_to_virt(x)  phys_to_virt(x)
  64
  65/*
  66 * Return the virtual (cached) address for the specified bus memory.
  67 * Note that we currently don't support any address outside the KIO segment.
  68 */
  69
  70static inline void *ioremap(unsigned long offset, unsigned long size)
  71{
  72#ifdef CONFIG_MMU
  73        if (offset >= XCHAL_KIO_PADDR
  74            && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
  75                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
  76        else
  77                BUG();
  78#else
  79        return (void *)offset;
  80#endif
  81}
  82
  83static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
  84{
  85#ifdef CONFIG_MMU
  86        if (offset >= XCHAL_KIO_PADDR
  87            && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
  88                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
  89        else
  90                BUG();
  91#else
  92        return (void *)offset;
  93#endif
  94}
  95
  96static inline void iounmap(void *addr)
  97{
  98}
  99
 100/*
 101 * Generic I/O
 102 */
 103
 104#define readb(addr) \
 105        ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
 106#define readw(addr) \
 107        ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
 108#define readl(addr) \
 109        ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
 110#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
 111#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
 112#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
 113
 114static inline __u8 __raw_readb(const volatile void __iomem *addr)
 115{
 116          return *(__force volatile __u8 *)(addr);
 117}
 118static inline __u16 __raw_readw(const volatile void __iomem *addr)
 119{
 120          return *(__force volatile __u16 *)(addr);
 121}
 122static inline __u32 __raw_readl(const volatile void __iomem *addr)
 123{
 124          return *(__force volatile __u32 *)(addr);
 125}
 126static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
 127{
 128          *(__force volatile __u8 *)(addr) = b;
 129}
 130static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
 131{
 132          *(__force volatile __u16 *)(addr) = b;
 133}
 134static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
 135{
 136          *(__force volatile __u32 *)(addr) = b;
 137}
 138
 139/* These are the definitions for the x86 IO instructions
 140 * inb/inw/inl/outb/outw/outl, the "string" versions
 141 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
 142 * inb_p/inw_p/...
 143 * The macros don't do byte-swapping.
 144 */
 145
 146#define inb(port)               readb((u8 *)((port)))
 147#define outb(val, port)         writeb((val),(u8 *)((unsigned long)(port)))
 148#define inw(port)               readw((u16 *)((port)))
 149#define outw(val, port)         writew((val),(u16 *)((unsigned long)(port)))
 150#define inl(port)               readl((u32 *)((port)))
 151#define outl(val, port)         writel((val),(u32 *)((unsigned long)(port)))
 152
 153#define inb_p(port)             inb((port))
 154#define outb_p(val, port)       outb((val), (port))
 155#define inw_p(port)             inw((port))
 156#define outw_p(val, port)       outw((val), (port))
 157#define inl_p(port)             inl((port))
 158#define outl_p(val, port)       outl((val), (port))
 159
 160extern void insb (unsigned long port, void *dst, unsigned long count);
 161extern void insw (unsigned long port, void *dst, unsigned long count);
 162extern void insl (unsigned long port, void *dst, unsigned long count);
 163extern void outsb (unsigned long port, const void *src, unsigned long count);
 164extern void outsw (unsigned long port, const void *src, unsigned long count);
 165extern void outsl (unsigned long port, const void *src, unsigned long count);
 166
 167#define IO_SPACE_LIMIT ~0
 168
 169#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
 170#define memcpy_fromio(a,b,c)   memcpy((a),(void *)(b),(c))
 171#define memcpy_toio(a,b,c)      memcpy((void *)(a),(b),(c))
 172
 173/* At this point the Xtensa doesn't provide byte swap instructions */
 174
 175#ifdef __XTENSA_EB__
 176# define in_8(addr) (*(u8*)(addr))
 177# define in_le16(addr) _swapw(*(u16*)(addr))
 178# define in_le32(addr) _swapl(*(u32*)(addr))
 179# define out_8(b, addr) *(u8*)(addr) = (b)
 180# define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
 181# define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
 182#elif defined(__XTENSA_EL__)
 183# define in_8(addr)  (*(u8*)(addr))
 184# define in_le16(addr) (*(u16*)(addr))
 185# define in_le32(addr) (*(u32*)(addr))
 186# define out_8(b, addr) *(u8*)(addr) = (b)
 187# define out_le16(b, addr) *(u16*)(addr) = (b)
 188# define out_le32(b, addr) *(u32*)(addr) = (b)
 189#else
 190# error processor byte order undefined!
 191#endif
 192
 193
 194/*
 195 * Convert a physical pointer to a virtual kernel pointer for /dev/mem access
 196 */
 197#define xlate_dev_mem_ptr(p)    __va(p)
 198
 199/*
 200 * Convert a virtual cached pointer to an uncached pointer
 201 */
 202#define xlate_dev_kmem_ptr(p)   p
 203
 204
 205#endif  /* __KERNEL__ */
 206
 207#endif  /* _XTENSA_IO_H */
 208