linux/include/asm-ppc/io.h
<<
>>
Prefs
   1#ifdef __KERNEL__
   2#ifndef _PPC_IO_H
   3#define _PPC_IO_H
   4
   5#include <linux/string.h>
   6#include <linux/types.h>
   7
   8#include <asm/page.h>
   9#include <asm/byteorder.h>
  10#include <asm/synch.h>
  11#include <asm/mmu.h>
  12
  13#define SIO_CONFIG_RA   0x398
  14#define SIO_CONFIG_RD   0x399
  15
  16#define SLOW_DOWN_IO
  17
  18#define PMAC_ISA_MEM_BASE       0
  19#define PMAC_PCI_DRAM_OFFSET    0
  20#define CHRP_ISA_IO_BASE        0xf8000000
  21#define CHRP_ISA_MEM_BASE       0xf7000000
  22#define CHRP_PCI_DRAM_OFFSET    0
  23#define PREP_ISA_IO_BASE        0x80000000
  24#define PREP_ISA_MEM_BASE       0xc0000000
  25#define PREP_PCI_DRAM_OFFSET    0x80000000
  26
  27#if defined(CONFIG_4xx)
  28#include <asm/ibm4xx.h>
  29#elif defined(CONFIG_8xx)
  30#include <asm/mpc8xx.h>
  31#elif defined(CONFIG_8260)
  32#include <asm/mpc8260.h>
  33#elif !defined(CONFIG_PCI)
  34#define _IO_BASE        0
  35#define _ISA_MEM_BASE   0
  36#define PCI_DRAM_OFFSET 0
  37#else /* Everyone else */
  38#define _IO_BASE        isa_io_base
  39#define _ISA_MEM_BASE   isa_mem_base
  40#define PCI_DRAM_OFFSET pci_dram_offset
  41#endif /* Platform-dependent I/O */
  42
  43#define ___IO_BASE ((void __iomem *)_IO_BASE)
  44extern unsigned long isa_io_base;
  45extern unsigned long isa_mem_base;
  46extern unsigned long pci_dram_offset;
  47
  48/*
  49 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
  50 *
  51 * Read operations have additional twi & isync to make sure the read
  52 * is actually performed (i.e. the data has come back) before we start
  53 * executing any following instructions.
  54 */
  55extern inline int in_8(const volatile unsigned char __iomem *addr)
  56{
  57        int ret;
  58
  59        __asm__ __volatile__(
  60                "sync; lbz%U1%X1 %0,%1;\n"
  61                "twi 0,%0,0;\n"
  62                "isync" : "=r" (ret) : "m" (*addr));
  63        return ret;
  64}
  65
  66extern inline void out_8(volatile unsigned char __iomem *addr, int val)
  67{
  68        __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
  69}
  70
  71extern inline int in_le16(const volatile unsigned short __iomem *addr)
  72{
  73        int ret;
  74
  75        __asm__ __volatile__("sync; lhbrx %0,0,%1;\n"
  76                             "twi 0,%0,0;\n"
  77                             "isync" : "=r" (ret) :
  78                              "r" (addr), "m" (*addr));
  79        return ret;
  80}
  81
  82extern inline int in_be16(const volatile unsigned short __iomem *addr)
  83{
  84        int ret;
  85
  86        __asm__ __volatile__("sync; lhz%U1%X1 %0,%1;\n"
  87                             "twi 0,%0,0;\n"
  88                             "isync" : "=r" (ret) : "m" (*addr));
  89        return ret;
  90}
  91
  92extern inline void out_le16(volatile unsigned short __iomem *addr, int val)
  93{
  94        __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) :
  95                              "r" (val), "r" (addr));
  96}
  97
  98extern inline void out_be16(volatile unsigned short __iomem *addr, int val)
  99{
 100        __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
 101}
 102
 103extern inline unsigned in_le32(const volatile unsigned __iomem *addr)
 104{
 105        unsigned ret;
 106
 107        __asm__ __volatile__("sync; lwbrx %0,0,%1;\n"
 108                             "twi 0,%0,0;\n"
 109                             "isync" : "=r" (ret) :
 110                             "r" (addr), "m" (*addr));
 111        return ret;
 112}
 113
 114extern inline unsigned in_be32(const volatile unsigned __iomem *addr)
 115{
 116        unsigned ret;
 117
 118        __asm__ __volatile__("sync; lwz%U1%X1 %0,%1;\n"
 119                             "twi 0,%0,0;\n"
 120                             "isync" : "=r" (ret) : "m" (*addr));
 121        return ret;
 122}
 123
 124extern inline void out_le32(volatile unsigned __iomem *addr, int val)
 125{
 126        __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) :
 127                             "r" (val), "r" (addr));
 128}
 129
 130extern inline void out_be32(volatile unsigned __iomem *addr, int val)
 131{
 132        __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
 133}
 134#if defined (CONFIG_8260_PCI9)
 135#define readb(addr) in_8((volatile u8 *)(addr))
 136#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
 137#else
 138static inline __u8 readb(const volatile void __iomem *addr)
 139{
 140        return in_8(addr);
 141}
 142static inline void writeb(__u8 b, volatile void __iomem *addr)
 143{
 144        out_8(addr, b);
 145}
 146#endif
 147
 148#if defined (CONFIG_8260_PCI9)
 149/* Use macros if PCI9 workaround enabled */
 150#define readw(addr) in_le16((volatile u16 *)(addr))
 151#define readl(addr) in_le32((volatile u32 *)(addr))
 152#define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
 153#define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
 154#else
 155static inline __u16 readw(const volatile void __iomem *addr)
 156{
 157        return in_le16(addr);
 158}
 159static inline __u32 readl(const volatile void __iomem *addr)
 160{
 161        return in_le32(addr);
 162}
 163static inline void writew(__u16 b, volatile void __iomem *addr)
 164{
 165        out_le16(addr, b);
 166}
 167static inline void writel(__u32 b, volatile void __iomem *addr)
 168{
 169        out_le32(addr, b);
 170}
 171#endif /* CONFIG_8260_PCI9 */
 172
 173#define readb_relaxed(addr) readb(addr)
 174#define readw_relaxed(addr) readw(addr)
 175#define readl_relaxed(addr) readl(addr)
 176
 177static inline __u8 __raw_readb(const volatile void __iomem *addr)
 178{
 179        return *(__force volatile __u8 *)(addr);
 180}
 181static inline __u16 __raw_readw(const volatile void __iomem *addr)
 182{
 183        return *(__force volatile __u16 *)(addr);
 184}
 185static inline __u32 __raw_readl(const volatile void __iomem *addr)
 186{
 187        return *(__force volatile __u32 *)(addr);
 188}
 189static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
 190{
 191        *(__force volatile __u8 *)(addr) = b;
 192}
 193static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
 194{
 195        *(__force volatile __u16 *)(addr) = b;
 196}
 197static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
 198{
 199        *(__force volatile __u32 *)(addr) = b;
 200}
 201
 202#define mmiowb()
 203
 204/*
 205 * The insw/outsw/insl/outsl macros don't do byte-swapping.
 206 * They are only used in practice for transferring buffers which
 207 * are arrays of bytes, and byte-swapping is not appropriate in
 208 * that case.  - paulus
 209 */
 210#define insb(port, buf, ns)     _insb((port)+___IO_BASE, (buf), (ns))
 211#define outsb(port, buf, ns)    _outsb((port)+___IO_BASE, (buf), (ns))
 212#define insw(port, buf, ns)     _insw_ns((port)+___IO_BASE, (buf), (ns))
 213#define outsw(port, buf, ns)    _outsw_ns((port)+___IO_BASE, (buf), (ns))
 214#define insl(port, buf, nl)     _insl_ns((port)+___IO_BASE, (buf), (nl))
 215#define outsl(port, buf, nl)    _outsl_ns((port)+___IO_BASE, (buf), (nl))
 216
 217#define readsb(a, b, n)         _insb((a), (b), (n))
 218#define readsw(a, b, n)         _insw_ns((a), (b), (n))
 219#define readsl(a, b, n)         _insl_ns((a), (b), (n))
 220#define writesb(a, b, n)        _outsb((a),(b),(n))
 221#define writesw(a, b, n)        _outsw_ns((a),(b),(n))
 222#define writesl(a, b, n)        _outsl_ns((a),(b),(n))
 223
 224
 225/*
 226 * On powermacs and 8xx we will get a machine check exception 
 227 * if we try to read data from a non-existent I/O port. Because
 228 * the machine check is an asynchronous exception, it isn't
 229 * well-defined which instruction SRR0 will point to when the
 230 * exception occurs.
 231 * With the sequence below (twi; isync; nop), we have found that
 232 * the machine check occurs on one of the three instructions on
 233 * all PPC implementations tested so far.  The twi and isync are
 234 * needed on the 601 (in fact twi; sync works too), the isync and
 235 * nop are needed on 604[e|r], and any of twi, sync or isync will
 236 * work on 603[e], 750, 74xx.
 237 * The twi creates an explicit data dependency on the returned
 238 * value which seems to be needed to make the 601 wait for the
 239 * load to finish.
 240 */
 241
 242#define __do_in_asm(name, op)                           \
 243extern __inline__ unsigned int name(unsigned int port)  \
 244{                                                       \
 245        unsigned int x;                                 \
 246        __asm__ __volatile__(                           \
 247                "sync\n"                                \
 248                "0:"    op "    %0,0,%1\n"              \
 249                "1:     twi     0,%0,0\n"               \
 250                "2:     isync\n"                        \
 251                "3:     nop\n"                          \
 252                "4:\n"                                  \
 253                ".section .fixup,\"ax\"\n"              \
 254                "5:     li      %0,-1\n"                \
 255                "       b       4b\n"                   \
 256                ".previous\n"                           \
 257                ".section __ex_table,\"a\"\n"           \
 258                "       .align  2\n"                    \
 259                "       .long   0b,5b\n"                \
 260                "       .long   1b,5b\n"                \
 261                "       .long   2b,5b\n"                \
 262                "       .long   3b,5b\n"                \
 263                ".previous"                             \
 264                : "=&r" (x)                             \
 265                : "r" (port + ___IO_BASE));             \
 266        return x;                                       \
 267}
 268
 269#define __do_out_asm(name, op)                          \
 270extern __inline__ void name(unsigned int val, unsigned int port) \
 271{                                                       \
 272        __asm__ __volatile__(                           \
 273                "sync\n"                                \
 274                "0:" op " %0,0,%1\n"                    \
 275                "1:     sync\n"                         \
 276                "2:\n"                                  \
 277                ".section __ex_table,\"a\"\n"           \
 278                "       .align  2\n"                    \
 279                "       .long   0b,2b\n"                \
 280                "       .long   1b,2b\n"                \
 281                ".previous"                             \
 282                : : "r" (val), "r" (port + ___IO_BASE));        \
 283}
 284
 285__do_out_asm(outb, "stbx")
 286#if defined (CONFIG_8260_PCI9)
 287/* in asm cannot be defined if PCI9 workaround is used */
 288#define inb(port)               in_8((port)+___IO_BASE)
 289#define inw(port)               in_le16((port)+___IO_BASE)
 290#define inl(port)               in_le32((port)+___IO_BASE)
 291__do_out_asm(outw, "sthbrx")
 292__do_out_asm(outl, "stwbrx")
 293#else
 294__do_in_asm(inb, "lbzx")
 295__do_in_asm(inw, "lhbrx")
 296__do_in_asm(inl, "lwbrx")
 297__do_out_asm(outw, "sthbrx")
 298__do_out_asm(outl, "stwbrx")
 299
 300#endif
 301
 302#define inb_p(port)             inb((port))
 303#define outb_p(val, port)       outb((val), (port))
 304#define inw_p(port)             inw((port))
 305#define outw_p(val, port)       outw((val), (port))
 306#define inl_p(port)             inl((port))
 307#define outl_p(val, port)       outl((val), (port))
 308
 309extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
 310extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
 311extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
 312extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
 313extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
 314extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
 315
 316
 317#define IO_SPACE_LIMIT ~0
 318
 319#if defined (CONFIG_8260_PCI9)
 320#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
 321#define memcpy_fromio(a,b,c)   memcpy((a),(void *)(b),(c))
 322#define memcpy_toio(a,b,c)     memcpy((void *)(a),(b),(c))
 323#else
 324static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
 325{
 326        memset((void __force *)addr, val, count);
 327}
 328static inline void memcpy_fromio(void *dst,const volatile void __iomem *src, int count)
 329{
 330        memcpy(dst, (void __force *) src, count);
 331}
 332static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
 333{
 334        memcpy((void __force *) dst, src, count);
 335}
 336#endif
 337
 338/*
 339 * Map in an area of physical address space, for accessing
 340 * I/O devices etc.
 341 */
 342extern void __iomem *__ioremap(phys_addr_t address, unsigned long size,
 343                       unsigned long flags);
 344extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
 345#ifdef CONFIG_44x
 346extern void __iomem *ioremap64(unsigned long long address, unsigned long size);
 347#endif
 348#define ioremap_nocache(addr, size)     ioremap((addr), (size))
 349extern void iounmap(volatile void __iomem *addr);
 350extern unsigned long iopa(unsigned long addr);
 351extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
 352                             unsigned int size, int flags);
 353
 354/*
 355 * The PCI bus is inherently Little-Endian.  The PowerPC is being
 356 * run Big-Endian.  Thus all values which cross the [PCI] barrier
 357 * must be endian-adjusted.  Also, the local DRAM has a different
 358 * address from the PCI point of view, thus buffer addresses also
 359 * have to be modified [mapped] appropriately.
 360 */
 361extern inline unsigned long virt_to_bus(volatile void * address)
 362{
 363        if (address == (void *)0)
 364                return 0;
 365        return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET;
 366}
 367
 368extern inline void * bus_to_virt(unsigned long address)
 369{
 370        if (address == 0)
 371                return NULL;
 372        return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
 373}
 374
 375/*
 376 * Change virtual addresses to physical addresses and vv, for
 377 * addresses in the area where the kernel has the RAM mapped.
 378 */
 379extern inline unsigned long virt_to_phys(volatile void * address)
 380{
 381        return (unsigned long) address - KERNELBASE;
 382}
 383
 384extern inline void * phys_to_virt(unsigned long address)
 385{
 386        return (void *) (address + KERNELBASE);
 387}
 388
 389/*
 390 * Change "struct page" to physical address.
 391 */
 392#define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
 393#define page_to_bus(page)       (page_to_phys(page) + PCI_DRAM_OFFSET)
 394
 395/* Enforce in-order execution of data I/O.
 396 * No distinction between read/write on PPC; use eieio for all three.
 397 */
 398#define iobarrier_rw() eieio()
 399#define iobarrier_r()  eieio()
 400#define iobarrier_w()  eieio()
 401
 402/*
 403 * Here comes the ppc implementation of the IOMAP 
 404 * interfaces.
 405 */
 406static inline unsigned int ioread8(void __iomem *addr)
 407{
 408        return readb(addr);
 409}
 410
 411static inline unsigned int ioread16(void __iomem *addr)
 412{
 413        return readw(addr);
 414}
 415
 416static inline unsigned int ioread32(void __iomem *addr)
 417{
 418        return readl(addr);
 419}
 420
 421static inline void iowrite8(u8 val, void __iomem *addr)
 422{
 423        writeb(val, addr);
 424}
 425
 426static inline void iowrite16(u16 val, void __iomem *addr)
 427{
 428        writew(val, addr);
 429}
 430
 431static inline void iowrite32(u32 val, void __iomem *addr)
 432{
 433        writel(val, addr);
 434}
 435
 436static inline void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 437{
 438        _insb(addr, dst, count);
 439}
 440
 441static inline void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
 442{
 443        _insw_ns(addr, dst, count);
 444}
 445
 446static inline void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
 447{
 448        _insl_ns(addr, dst, count);
 449}
 450
 451static inline void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
 452{
 453        _outsb(addr, src, count);
 454}
 455
 456static inline void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
 457{
 458        _outsw_ns(addr, src, count);
 459}
 460
 461static inline void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
 462{
 463        _outsl_ns(addr, src, count);
 464}
 465
 466/* Create a virtual mapping cookie for an IO port range */
 467extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
 468extern void ioport_unmap(void __iomem *);
 469
 470/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 471struct pci_dev;
 472extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 473extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 474
 475#endif /* _PPC_IO_H */
 476
 477#ifdef CONFIG_8260_PCI9
 478#include <asm/mpc8260_pci9.h>
 479#endif
 480
 481/*
 482 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 483 * access
 484 */
 485#define xlate_dev_mem_ptr(p)    __va(p)
 486
 487/*
 488 * Convert a virtual cached pointer to an uncached pointer
 489 */
 490#define xlate_dev_kmem_ptr(p)   p
 491
 492/* access ports */
 493#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) |  (_v))
 494#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
 495
 496#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) |  (_v))
 497#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
 498
 499#define setbits8(_addr, _v) out_8((_addr), in_8(_addr) |  (_v))
 500#define clrbits8(_addr, _v) out_8((_addr), in_8(_addr) & ~(_v))
 501
 502#endif /* __KERNEL__ */
 503