uboot/arch/mips/include/asm/io.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 1995 Waldorf GmbH
   7 * Copyright (C) 1994 - 2000 Ralf Baechle
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 * Copyright (C) 2000 FSMLabs, Inc.
  10 */
  11#ifndef _ASM_IO_H
  12#define _ASM_IO_H
  13
  14#include <linux/config.h>
  15#if 0
  16#include <linux/pagemap.h>
  17#endif
  18#include <asm/addrspace.h>
  19#include <asm/byteorder.h>
  20
  21/*
  22 * Slowdown I/O port space accesses for antique hardware.
  23 */
  24#undef CONF_SLOWDOWN_IO
  25
  26/*
  27 * Sane hardware offers swapping of I/O space accesses in hardware; less
  28 * sane hardware forces software to fiddle with this ...
  29 */
  30#if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__)
  31
  32#define __ioswab8(x) (x)
  33#define __ioswab16(x) swab16(x)
  34#define __ioswab32(x) swab32(x)
  35
  36#else
  37
  38#define __ioswab8(x) (x)
  39#define __ioswab16(x) (x)
  40#define __ioswab32(x) (x)
  41
  42#endif
  43
  44/*
  45 * This file contains the definitions for the MIPS counterpart of the
  46 * x86 in/out instructions. This heap of macros and C results in much
  47 * better code than the approach of doing it in plain C.  The macros
  48 * result in code that is to fast for certain hardware.  On the other
  49 * side the performance of the string functions should be improved for
  50 * sake of certain devices like EIDE disks that do highspeed polled I/O.
  51 *
  52 *   Ralf
  53 *
  54 * This file contains the definitions for the x86 IO instructions
  55 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
  56 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
  57 * versions of the single-IO instructions (inb_p/inw_p/..).
  58 *
  59 * This file is not meant to be obfuscating: it's just complicated
  60 * to (a) handle it all in a way that makes gcc able to optimize it
  61 * as well as possible and (b) trying to avoid writing the same thing
  62 * over and over again with slight variations and possibly making a
  63 * mistake somewhere.
  64 */
  65
  66/*
  67 * On MIPS I/O ports are memory mapped, so we access them using normal
  68 * load/store instructions. mips_io_port_base is the virtual address to
  69 * which all ports are being mapped.  For sake of efficiency some code
  70 * assumes that this is an address that can be loaded with a single lui
  71 * instruction, so the lower 16 bits must be zero.  Should be true on
  72 * on any sane architecture; generic code does not use this assumption.
  73 */
  74extern const unsigned long mips_io_port_base;
  75
  76/*
  77 * Gcc will generate code to load the value of mips_io_port_base after each
  78 * function call which may be fairly wasteful in some cases.  So we don't
  79 * play quite by the book.  We tell gcc mips_io_port_base is a long variable
  80 * which solves the code generation issue.  Now we need to violate the
  81 * aliasing rules a little to make initialization possible and finally we
  82 * will need the barrier() to fight side effects of the aliasing chat.
  83 * This trickery will eventually collapse under gcc's optimizer.  Oh well.
  84 */
  85static inline void set_io_port_base(unsigned long base)
  86{
  87        * (unsigned long *) &mips_io_port_base = base;
  88}
  89
  90/*
  91 * Thanks to James van Artsdalen for a better timing-fix than
  92 * the two short jumps: using outb's to a nonexistent port seems
  93 * to guarantee better timings even on fast machines.
  94 *
  95 * On the other hand, I'd like to be sure of a non-existent port:
  96 * I feel a bit unsafe about using 0x80 (should be safe, though)
  97 *
  98 *              Linus
  99 *
 100 */
 101
 102#define __SLOW_DOWN_IO \
 103        __asm__ __volatile__( \
 104                "sb\t$0,0x80(%0)" \
 105                : : "r" (mips_io_port_base));
 106
 107#ifdef CONF_SLOWDOWN_IO
 108#ifdef REALLY_SLOW_IO
 109#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
 110#else
 111#define SLOW_DOWN_IO __SLOW_DOWN_IO
 112#endif
 113#else
 114#define SLOW_DOWN_IO
 115#endif
 116
 117/*
 118 * Change virtual addresses to physical addresses and vv.
 119 * These are trivial on the 1:1 Linux/MIPS mapping
 120 */
 121extern inline phys_addr_t virt_to_phys(volatile void * address)
 122{
 123#ifndef CONFIG_64BIT
 124        return CPHYSADDR(address);
 125#else
 126        return XPHYSADDR(address);
 127#endif
 128}
 129
 130extern inline void * phys_to_virt(unsigned long address)
 131{
 132#ifndef CONFIG_64BIT
 133        return (void *)KSEG0ADDR(address);
 134#else
 135        return (void *)CKSEG0ADDR(address);
 136#endif
 137}
 138
 139/*
 140 * IO bus memory addresses are also 1:1 with the physical address
 141 */
 142extern inline unsigned long virt_to_bus(volatile void * address)
 143{
 144#ifndef CONFIG_64BIT
 145        return CPHYSADDR(address);
 146#else
 147        return XPHYSADDR(address);
 148#endif
 149}
 150
 151extern inline void * bus_to_virt(unsigned long address)
 152{
 153#ifndef CONFIG_64BIT
 154        return (void *)KSEG0ADDR(address);
 155#else
 156        return (void *)CKSEG0ADDR(address);
 157#endif
 158}
 159
 160/*
 161 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
 162 * for the processor.
 163 */
 164extern unsigned long isa_slot_offset;
 165
 166extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 167
 168#if 0
 169extern inline void *ioremap(unsigned long offset, unsigned long size)
 170{
 171        return __ioremap(offset, size, _CACHE_UNCACHED);
 172}
 173
 174extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
 175{
 176        return __ioremap(offset, size, _CACHE_UNCACHED);
 177}
 178
 179extern void iounmap(void *addr);
 180#endif
 181
 182/*
 183 * XXX We need system specific versions of these to handle EISA address bits
 184 * 24-31 on SNI.
 185 * XXX more SNI hacks.
 186 */
 187#define readb(addr) (*(volatile unsigned char *)(addr))
 188#define readw(addr) __ioswab16((*(volatile unsigned short *)(addr)))
 189#define readl(addr) __ioswab32((*(volatile unsigned int *)(addr)))
 190#define __raw_readb readb
 191#define __raw_readw readw
 192#define __raw_readl readl
 193
 194#define writeb(b,addr) (*(volatile unsigned char *)(addr)) = (b)
 195#define writew(b,addr) (*(volatile unsigned short *)(addr)) = (__ioswab16(b))
 196#define writel(b,addr) (*(volatile unsigned int *)(addr)) = (__ioswab32(b))
 197#define __raw_writeb writeb
 198#define __raw_writew writew
 199#define __raw_writel writel
 200
 201#define memset_io(a,b,c)        memset((void *)(a),(b),(c))
 202#define memcpy_fromio(a,b,c)    memcpy((a),(void *)(b),(c))
 203#define memcpy_toio(a,b,c)      memcpy((void *)(a),(b),(c))
 204
 205/* END SNI HACKS ... */
 206
 207/*
 208 * ISA space is 'always mapped' on currently supported MIPS systems, no need
 209 * to explicitly ioremap() it. The fact that the ISA IO space is mapped
 210 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
 211 * are physical addresses. The following constant pointer can be
 212 * used as the IO-area pointer (it can be iounmapped as well, so the
 213 * analogy with PCI is quite large):
 214 */
 215#define __ISA_IO_base ((char *)(PAGE_OFFSET))
 216
 217#define isa_readb(a) readb(a)
 218#define isa_readw(a) readw(a)
 219#define isa_readl(a) readl(a)
 220#define isa_writeb(b,a) writeb(b,a)
 221#define isa_writew(w,a) writew(w,a)
 222#define isa_writel(l,a) writel(l,a)
 223
 224#define isa_memset_io(a,b,c)     memset_io((a),(b),(c))
 225#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),(b),(c))
 226#define isa_memcpy_toio(a,b,c)   memcpy_toio((a),(b),(c))
 227
 228/*
 229 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
 230 * just copy it. The net code will then do the checksum later.
 231 */
 232#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
 233#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
 234
 235static inline int check_signature(unsigned long io_addr,
 236                                  const unsigned char *signature, int length)
 237{
 238        int retval = 0;
 239        do {
 240                if (readb(io_addr) != *signature)
 241                        goto out;
 242                io_addr++;
 243                signature++;
 244                length--;
 245        } while (length);
 246        retval = 1;
 247out:
 248        return retval;
 249}
 250#define isa_check_signature(io, s, l) check_signature(i,s,l)
 251
 252/*
 253 * Talk about misusing macros..
 254 */
 255
 256#define __OUT1(s) \
 257extern inline void __out##s(unsigned int value, unsigned int port) {
 258
 259#define __OUT2(m) \
 260__asm__ __volatile__ ("s" #m "\t%0,%1(%2)"
 261
 262#define __OUT(m,s,w) \
 263__OUT1(s) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); } \
 264__OUT1(s##c) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); } \
 265__OUT1(s##_p) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); \
 266        SLOW_DOWN_IO; } \
 267__OUT1(s##c_p) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); \
 268        SLOW_DOWN_IO; }
 269
 270#define __IN1(t,s) \
 271extern __inline__ t __in##s(unsigned int port) { t _v;
 272
 273/*
 274 * Required nops will be inserted by the assembler
 275 */
 276#define __IN2(m) \
 277__asm__ __volatile__ ("l" #m "\t%0,%1(%2)"
 278
 279#define __IN(t,m,s,w) \
 280__IN1(t,s) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); return __ioswab##w(_v); } \
 281__IN1(t,s##c) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); return __ioswab##w(_v); } \
 282__IN1(t,s##_p) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); SLOW_DOWN_IO; return __ioswab##w(_v); } \
 283__IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return __ioswab##w(_v); }
 284
 285#define __INS1(s) \
 286extern inline void __ins##s(unsigned int port, void * addr, unsigned long count) {
 287
 288#define __INS2(m) \
 289if (count) \
 290__asm__ __volatile__ ( \
 291        ".set\tnoreorder\n\t" \
 292        ".set\tnoat\n" \
 293        "1:\tl" #m "\t$1,%4(%5)\n\t" \
 294        "subu\t%1,1\n\t" \
 295        "s" #m "\t$1,(%0)\n\t" \
 296        "bne\t$0,%1,1b\n\t" \
 297        "addiu\t%0,%6\n\t" \
 298        ".set\tat\n\t" \
 299        ".set\treorder"
 300
 301#define __INS(m,s,i) \
 302__INS1(s) __INS2(m) \
 303        : "=r" (addr), "=r" (count) \
 304        : "0" (addr), "1" (count), "i" (0), \
 305          "r" (mips_io_port_base+port), "I" (i) \
 306        : "$1");} \
 307__INS1(s##c) __INS2(m) \
 308        : "=r" (addr), "=r" (count) \
 309        : "0" (addr), "1" (count), "ir" (port), \
 310          "r" (mips_io_port_base), "I" (i) \
 311        : "$1");}
 312
 313#define __OUTS1(s) \
 314extern inline void __outs##s(unsigned int port, const void * addr, unsigned long count) {
 315
 316#define __OUTS2(m) \
 317if (count) \
 318__asm__ __volatile__ ( \
 319        ".set\tnoreorder\n\t" \
 320        ".set\tnoat\n" \
 321        "1:\tl" #m "\t$1,(%0)\n\t" \
 322        "subu\t%1,1\n\t" \
 323        "s" #m "\t$1,%4(%5)\n\t" \
 324        "bne\t$0,%1,1b\n\t" \
 325        "addiu\t%0,%6\n\t" \
 326        ".set\tat\n\t" \
 327        ".set\treorder"
 328
 329#define __OUTS(m,s,i) \
 330__OUTS1(s) __OUTS2(m) \
 331        : "=r" (addr), "=r" (count) \
 332        : "0" (addr), "1" (count), "i" (0), "r" (mips_io_port_base+port), "I" (i) \
 333        : "$1");} \
 334__OUTS1(s##c) __OUTS2(m) \
 335        : "=r" (addr), "=r" (count) \
 336        : "0" (addr), "1" (count), "ir" (port), "r" (mips_io_port_base), "I" (i) \
 337        : "$1");}
 338
 339__IN(unsigned char,b,b,8)
 340__IN(unsigned short,h,w,16)
 341__IN(unsigned int,w,l,32)
 342
 343__OUT(b,b,8)
 344__OUT(h,w,16)
 345__OUT(w,l,32)
 346
 347__INS(b,b,1)
 348__INS(h,w,2)
 349__INS(w,l,4)
 350
 351__OUTS(b,b,1)
 352__OUTS(h,w,2)
 353__OUTS(w,l,4)
 354
 355
 356/*
 357 * Note that due to the way __builtin_constant_p() works, you
 358 *  - can't use it inside an inline function (it will never be true)
 359 *  - you don't have to worry about side effects within the __builtin..
 360 */
 361#define outb(val,port) \
 362((__builtin_constant_p((port)) && (port) < 32768) ? \
 363        __outbc((val),(port)) : \
 364        __outb((val),(port)))
 365
 366#define inb(port) \
 367((__builtin_constant_p((port)) && (port) < 32768) ? \
 368        __inbc(port) : \
 369        __inb(port))
 370
 371#define outb_p(val,port) \
 372((__builtin_constant_p((port)) && (port) < 32768) ? \
 373        __outbc_p((val),(port)) : \
 374        __outb_p((val),(port)))
 375
 376#define inb_p(port) \
 377((__builtin_constant_p((port)) && (port) < 32768) ? \
 378        __inbc_p(port) : \
 379        __inb_p(port))
 380
 381#define outw(val,port) \
 382((__builtin_constant_p((port)) && (port) < 32768) ? \
 383        __outwc((val),(port)) : \
 384        __outw((val),(port)))
 385
 386#define inw(port) \
 387((__builtin_constant_p((port)) && (port) < 32768) ? \
 388        __inwc(port) : \
 389        __inw(port))
 390
 391#define outw_p(val,port) \
 392((__builtin_constant_p((port)) && (port) < 32768) ? \
 393        __outwc_p((val),(port)) : \
 394        __outw_p((val),(port)))
 395
 396#define inw_p(port) \
 397((__builtin_constant_p((port)) && (port) < 32768) ? \
 398        __inwc_p(port) : \
 399        __inw_p(port))
 400
 401#define outl(val,port) \
 402((__builtin_constant_p((port)) && (port) < 32768) ? \
 403        __outlc((val),(port)) : \
 404        __outl((val),(port)))
 405
 406#define inl(port) \
 407((__builtin_constant_p((port)) && (port) < 32768) ? \
 408        __inlc(port) : \
 409        __inl(port))
 410
 411#define outl_p(val,port) \
 412((__builtin_constant_p((port)) && (port) < 32768) ? \
 413        __outlc_p((val),(port)) : \
 414        __outl_p((val),(port)))
 415
 416#define inl_p(port) \
 417((__builtin_constant_p((port)) && (port) < 32768) ? \
 418        __inlc_p(port) : \
 419        __inl_p(port))
 420
 421
 422#define outsb(port,addr,count) \
 423((__builtin_constant_p((port)) && (port) < 32768) ? \
 424        __outsbc((port),(addr),(count)) : \
 425        __outsb ((port),(addr),(count)))
 426
 427#define insb(port,addr,count) \
 428((__builtin_constant_p((port)) && (port) < 32768) ? \
 429        __insbc((port),(addr),(count)) : \
 430        __insb((port),(addr),(count)))
 431
 432#define outsw(port,addr,count) \
 433((__builtin_constant_p((port)) && (port) < 32768) ? \
 434        __outswc((port),(addr),(count)) : \
 435        __outsw ((port),(addr),(count)))
 436
 437#define insw(port,addr,count) \
 438((__builtin_constant_p((port)) && (port) < 32768) ? \
 439        __inswc((port),(addr),(count)) : \
 440        __insw((port),(addr),(count)))
 441
 442#define outsl(port,addr,count) \
 443((__builtin_constant_p((port)) && (port) < 32768) ? \
 444        __outslc((port),(addr),(count)) : \
 445        __outsl ((port),(addr),(count)))
 446
 447#define insl(port,addr,count) \
 448((__builtin_constant_p((port)) && (port) < 32768) ? \
 449        __inslc((port),(addr),(count)) : \
 450        __insl((port),(addr),(count)))
 451
 452#define IO_SPACE_LIMIT 0xffff
 453
 454/*
 455 * The caches on some architectures aren't dma-coherent and have need to
 456 * handle this in software.  There are three types of operations that
 457 * can be applied to dma buffers.
 458 *
 459 *  - dma_cache_wback_inv(start, size) makes caches and coherent by
 460 *    writing the content of the caches back to memory, if necessary.
 461 *    The function also invalidates the affected part of the caches as
 462 *    necessary before DMA transfers from outside to memory.
 463 *  - dma_cache_wback(start, size) makes caches and coherent by
 464 *    writing the content of the caches back to memory, if necessary.
 465 *    The function also invalidates the affected part of the caches as
 466 *    necessary before DMA transfers from outside to memory.
 467 *  - dma_cache_inv(start, size) invalidates the affected parts of the
 468 *    caches.  Dirty lines of the caches may be written back or simply
 469 *    be discarded.  This operation is necessary before dma operations
 470 *    to the memory.
 471 */
 472extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 473extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
 474extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 475
 476#define dma_cache_wback_inv(start,size) _dma_cache_wback_inv(start,size)
 477#define dma_cache_wback(start,size)     _dma_cache_wback(start,size)
 478#define dma_cache_inv(start,size)       _dma_cache_inv(start,size)
 479
 480static inline void sync(void)
 481{
 482}
 483
 484/*
 485 * Given a physical address and a length, return a virtual address
 486 * that can be used to access the memory range with the caching
 487 * properties specified by "flags".
 488 */
 489#define MAP_NOCACHE     (0)
 490#define MAP_WRCOMBINE   (0)
 491#define MAP_WRBACK      (0)
 492#define MAP_WRTHROUGH   (0)
 493
 494static inline void *
 495map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
 496{
 497        return (void *)paddr;
 498}
 499
 500/*
 501 * Take down a mapping set up by map_physmem().
 502 */
 503static inline void unmap_physmem(void *vaddr, unsigned long flags)
 504{
 505
 506}
 507
 508#endif /* _ASM_IO_H */
 509