linux/include/asm-generic/io.h
<<
>>
Prefs
   1/* Generic I/O port emulation, based on MN10300 code
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef __ASM_GENERIC_IO_H
  12#define __ASM_GENERIC_IO_H
  13
  14#include <asm/page.h> /* I/O is all done through memory accesses */
  15#include <linux/string.h> /* for memset() and memcpy() */
  16#include <linux/types.h>
  17
  18#ifdef CONFIG_GENERIC_IOMAP
  19#include <asm-generic/iomap.h>
  20#endif
  21
  22#include <asm-generic/pci_iomap.h>
  23
  24#ifndef mmiowb
  25#define mmiowb() do {} while (0)
  26#endif
  27
  28/*
  29 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  30 *
  31 * On some architectures memory mapped IO needs to be accessed differently.
  32 * On the simple architectures, we just read/write the memory location
  33 * directly.
  34 */
  35
  36#ifndef __raw_readb
  37#define __raw_readb __raw_readb
  38static inline u8 __raw_readb(const volatile void __iomem *addr)
  39{
  40        return *(const volatile u8 __force *)addr;
  41}
  42#endif
  43
  44#ifndef __raw_readw
  45#define __raw_readw __raw_readw
  46static inline u16 __raw_readw(const volatile void __iomem *addr)
  47{
  48        return *(const volatile u16 __force *)addr;
  49}
  50#endif
  51
  52#ifndef __raw_readl
  53#define __raw_readl __raw_readl
  54static inline u32 __raw_readl(const volatile void __iomem *addr)
  55{
  56        return *(const volatile u32 __force *)addr;
  57}
  58#endif
  59
  60#ifdef CONFIG_64BIT
  61#ifndef __raw_readq
  62#define __raw_readq __raw_readq
  63static inline u64 __raw_readq(const volatile void __iomem *addr)
  64{
  65        return *(const volatile u64 __force *)addr;
  66}
  67#endif
  68#endif /* CONFIG_64BIT */
  69
  70#ifndef __raw_writeb
  71#define __raw_writeb __raw_writeb
  72static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
  73{
  74        *(volatile u8 __force *)addr = value;
  75}
  76#endif
  77
  78#ifndef __raw_writew
  79#define __raw_writew __raw_writew
  80static inline void __raw_writew(u16 value, volatile void __iomem *addr)
  81{
  82        *(volatile u16 __force *)addr = value;
  83}
  84#endif
  85
  86#ifndef __raw_writel
  87#define __raw_writel __raw_writel
  88static inline void __raw_writel(u32 value, volatile void __iomem *addr)
  89{
  90        *(volatile u32 __force *)addr = value;
  91}
  92#endif
  93
  94#ifdef CONFIG_64BIT
  95#ifndef __raw_writeq
  96#define __raw_writeq __raw_writeq
  97static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
  98{
  99        *(volatile u64 __force *)addr = value;
 100}
 101#endif
 102#endif /* CONFIG_64BIT */
 103
 104/*
 105 * {read,write}{b,w,l,q}() access little endian memory and return result in
 106 * native endianness.
 107 */
 108
 109#ifndef readb
 110#define readb readb
 111static inline u8 readb(const volatile void __iomem *addr)
 112{
 113        return __raw_readb(addr);
 114}
 115#endif
 116
 117#ifndef readw
 118#define readw readw
 119static inline u16 readw(const volatile void __iomem *addr)
 120{
 121        return __le16_to_cpu(__raw_readw(addr));
 122}
 123#endif
 124
 125#ifndef readl
 126#define readl readl
 127static inline u32 readl(const volatile void __iomem *addr)
 128{
 129        return __le32_to_cpu(__raw_readl(addr));
 130}
 131#endif
 132
 133#ifdef CONFIG_64BIT
 134#ifndef readq
 135#define readq readq
 136static inline u64 readq(const volatile void __iomem *addr)
 137{
 138        return __le64_to_cpu(__raw_readq(addr));
 139}
 140#endif
 141#endif /* CONFIG_64BIT */
 142
 143#ifndef writeb
 144#define writeb writeb
 145static inline void writeb(u8 value, volatile void __iomem *addr)
 146{
 147        __raw_writeb(value, addr);
 148}
 149#endif
 150
 151#ifndef writew
 152#define writew writew
 153static inline void writew(u16 value, volatile void __iomem *addr)
 154{
 155        __raw_writew(cpu_to_le16(value), addr);
 156}
 157#endif
 158
 159#ifndef writel
 160#define writel writel
 161static inline void writel(u32 value, volatile void __iomem *addr)
 162{
 163        __raw_writel(__cpu_to_le32(value), addr);
 164}
 165#endif
 166
 167#ifdef CONFIG_64BIT
 168#ifndef writeq
 169#define writeq writeq
 170static inline void writeq(u64 value, volatile void __iomem *addr)
 171{
 172        __raw_writeq(__cpu_to_le64(value), addr);
 173}
 174#endif
 175#endif /* CONFIG_64BIT */
 176
 177/*
 178 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
 179 * are not guaranteed to provide ordering against spinlocks or memory
 180 * accesses.
 181 */
 182#ifndef readb_relaxed
 183#define readb_relaxed readb
 184#endif
 185
 186#ifndef readw_relaxed
 187#define readw_relaxed readw
 188#endif
 189
 190#ifndef readl_relaxed
 191#define readl_relaxed readl
 192#endif
 193
 194#if defined(readq) && !defined(readq_relaxed)
 195#define readq_relaxed readq
 196#endif
 197
 198#ifndef writeb_relaxed
 199#define writeb_relaxed writeb
 200#endif
 201
 202#ifndef writew_relaxed
 203#define writew_relaxed writew
 204#endif
 205
 206#ifndef writel_relaxed
 207#define writel_relaxed writel
 208#endif
 209
 210#if defined(writeq) && !defined(writeq_relaxed)
 211#define writeq_relaxed writeq
 212#endif
 213
 214/*
 215 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
 216 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
 217 */
 218#ifndef readsb
 219#define readsb readsb
 220static inline void readsb(const volatile void __iomem *addr, void *buffer,
 221                          unsigned int count)
 222{
 223        if (count) {
 224                u8 *buf = buffer;
 225
 226                do {
 227                        u8 x = __raw_readb(addr);
 228                        *buf++ = x;
 229                } while (--count);
 230        }
 231}
 232#endif
 233
 234#ifndef readsw
 235#define readsw readsw
 236static inline void readsw(const volatile void __iomem *addr, void *buffer,
 237                          unsigned int count)
 238{
 239        if (count) {
 240                u16 *buf = buffer;
 241
 242                do {
 243                        u16 x = __raw_readw(addr);
 244                        *buf++ = x;
 245                } while (--count);
 246        }
 247}
 248#endif
 249
 250#ifndef readsl
 251#define readsl readsl
 252static inline void readsl(const volatile void __iomem *addr, void *buffer,
 253                          unsigned int count)
 254{
 255        if (count) {
 256                u32 *buf = buffer;
 257
 258                do {
 259                        u32 x = __raw_readl(addr);
 260                        *buf++ = x;
 261                } while (--count);
 262        }
 263}
 264#endif
 265
 266#ifdef CONFIG_64BIT
 267#ifndef readsq
 268#define readsq readsq
 269static inline void readsq(const volatile void __iomem *addr, void *buffer,
 270                          unsigned int count)
 271{
 272        if (count) {
 273                u64 *buf = buffer;
 274
 275                do {
 276                        u64 x = __raw_readq(addr);
 277                        *buf++ = x;
 278                } while (--count);
 279        }
 280}
 281#endif
 282#endif /* CONFIG_64BIT */
 283
 284#ifndef writesb
 285#define writesb writesb
 286static inline void writesb(volatile void __iomem *addr, const void *buffer,
 287                           unsigned int count)
 288{
 289        if (count) {
 290                const u8 *buf = buffer;
 291
 292                do {
 293                        __raw_writeb(*buf++, addr);
 294                } while (--count);
 295        }
 296}
 297#endif
 298
 299#ifndef writesw
 300#define writesw writesw
 301static inline void writesw(volatile void __iomem *addr, const void *buffer,
 302                           unsigned int count)
 303{
 304        if (count) {
 305                const u16 *buf = buffer;
 306
 307                do {
 308                        __raw_writew(*buf++, addr);
 309                } while (--count);
 310        }
 311}
 312#endif
 313
 314#ifndef writesl
 315#define writesl writesl
 316static inline void writesl(volatile void __iomem *addr, const void *buffer,
 317                           unsigned int count)
 318{
 319        if (count) {
 320                const u32 *buf = buffer;
 321
 322                do {
 323                        __raw_writel(*buf++, addr);
 324                } while (--count);
 325        }
 326}
 327#endif
 328
 329#ifdef CONFIG_64BIT
 330#ifndef writesq
 331#define writesq writesq
 332static inline void writesq(volatile void __iomem *addr, const void *buffer,
 333                           unsigned int count)
 334{
 335        if (count) {
 336                const u64 *buf = buffer;
 337
 338                do {
 339                        __raw_writeq(*buf++, addr);
 340                } while (--count);
 341        }
 342}
 343#endif
 344#endif /* CONFIG_64BIT */
 345
 346#ifndef PCI_IOBASE
 347#define PCI_IOBASE ((void __iomem *)0)
 348#endif
 349
 350#ifndef IO_SPACE_LIMIT
 351#define IO_SPACE_LIMIT 0xffff
 352#endif
 353
 354/*
 355 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
 356 * implemented on hardware that needs an additional delay for I/O accesses to
 357 * take effect.
 358 */
 359
 360#ifndef inb
 361#define inb inb
 362static inline u8 inb(unsigned long addr)
 363{
 364        return readb(PCI_IOBASE + addr);
 365}
 366#endif
 367
 368#ifndef inw
 369#define inw inw
 370static inline u16 inw(unsigned long addr)
 371{
 372        return readw(PCI_IOBASE + addr);
 373}
 374#endif
 375
 376#ifndef inl
 377#define inl inl
 378static inline u32 inl(unsigned long addr)
 379{
 380        return readl(PCI_IOBASE + addr);
 381}
 382#endif
 383
 384#ifndef outb
 385#define outb outb
 386static inline void outb(u8 value, unsigned long addr)
 387{
 388        writeb(value, PCI_IOBASE + addr);
 389}
 390#endif
 391
 392#ifndef outw
 393#define outw outw
 394static inline void outw(u16 value, unsigned long addr)
 395{
 396        writew(value, PCI_IOBASE + addr);
 397}
 398#endif
 399
 400#ifndef outl
 401#define outl outl
 402static inline void outl(u32 value, unsigned long addr)
 403{
 404        writel(value, PCI_IOBASE + addr);
 405}
 406#endif
 407
 408#ifndef inb_p
 409#define inb_p inb_p
 410static inline u8 inb_p(unsigned long addr)
 411{
 412        return inb(addr);
 413}
 414#endif
 415
 416#ifndef inw_p
 417#define inw_p inw_p
 418static inline u16 inw_p(unsigned long addr)
 419{
 420        return inw(addr);
 421}
 422#endif
 423
 424#ifndef inl_p
 425#define inl_p inl_p
 426static inline u32 inl_p(unsigned long addr)
 427{
 428        return inl(addr);
 429}
 430#endif
 431
 432#ifndef outb_p
 433#define outb_p outb_p
 434static inline void outb_p(u8 value, unsigned long addr)
 435{
 436        outb(value, addr);
 437}
 438#endif
 439
 440#ifndef outw_p
 441#define outw_p outw_p
 442static inline void outw_p(u16 value, unsigned long addr)
 443{
 444        outw(value, addr);
 445}
 446#endif
 447
 448#ifndef outl_p
 449#define outl_p outl_p
 450static inline void outl_p(u32 value, unsigned long addr)
 451{
 452        outl(value, addr);
 453}
 454#endif
 455
 456/*
 457 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
 458 * single I/O port multiple times.
 459 */
 460
 461#ifndef insb
 462#define insb insb
 463static inline void insb(unsigned long addr, void *buffer, unsigned int count)
 464{
 465        readsb(PCI_IOBASE + addr, buffer, count);
 466}
 467#endif
 468
 469#ifndef insw
 470#define insw insw
 471static inline void insw(unsigned long addr, void *buffer, unsigned int count)
 472{
 473        readsw(PCI_IOBASE + addr, buffer, count);
 474}
 475#endif
 476
 477#ifndef insl
 478#define insl insl
 479static inline void insl(unsigned long addr, void *buffer, unsigned int count)
 480{
 481        readsl(PCI_IOBASE + addr, buffer, count);
 482}
 483#endif
 484
 485#ifndef outsb
 486#define outsb outsb
 487static inline void outsb(unsigned long addr, const void *buffer,
 488                         unsigned int count)
 489{
 490        writesb(PCI_IOBASE + addr, buffer, count);
 491}
 492#endif
 493
 494#ifndef outsw
 495#define outsw outsw
 496static inline void outsw(unsigned long addr, const void *buffer,
 497                         unsigned int count)
 498{
 499        writesw(PCI_IOBASE + addr, buffer, count);
 500}
 501#endif
 502
 503#ifndef outsl
 504#define outsl outsl
 505static inline void outsl(unsigned long addr, const void *buffer,
 506                         unsigned int count)
 507{
 508        writesl(PCI_IOBASE + addr, buffer, count);
 509}
 510#endif
 511
 512#ifndef insb_p
 513#define insb_p insb_p
 514static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
 515{
 516        insb(addr, buffer, count);
 517}
 518#endif
 519
 520#ifndef insw_p
 521#define insw_p insw_p
 522static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
 523{
 524        insw(addr, buffer, count);
 525}
 526#endif
 527
 528#ifndef insl_p
 529#define insl_p insl_p
 530static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
 531{
 532        insl(addr, buffer, count);
 533}
 534#endif
 535
 536#ifndef outsb_p
 537#define outsb_p outsb_p
 538static inline void outsb_p(unsigned long addr, const void *buffer,
 539                           unsigned int count)
 540{
 541        outsb(addr, buffer, count);
 542}
 543#endif
 544
 545#ifndef outsw_p
 546#define outsw_p outsw_p
 547static inline void outsw_p(unsigned long addr, const void *buffer,
 548                           unsigned int count)
 549{
 550        outsw(addr, buffer, count);
 551}
 552#endif
 553
 554#ifndef outsl_p
 555#define outsl_p outsl_p
 556static inline void outsl_p(unsigned long addr, const void *buffer,
 557                           unsigned int count)
 558{
 559        outsl(addr, buffer, count);
 560}
 561#endif
 562
 563#ifndef CONFIG_GENERIC_IOMAP
 564#ifndef ioread8
 565#define ioread8 ioread8
 566static inline u8 ioread8(const volatile void __iomem *addr)
 567{
 568        return readb(addr);
 569}
 570#endif
 571
 572#ifndef ioread16
 573#define ioread16 ioread16
 574static inline u16 ioread16(const volatile void __iomem *addr)
 575{
 576        return readw(addr);
 577}
 578#endif
 579
 580#ifndef ioread32
 581#define ioread32 ioread32
 582static inline u32 ioread32(const volatile void __iomem *addr)
 583{
 584        return readl(addr);
 585}
 586#endif
 587
 588#ifdef CONFIG_64BIT
 589#ifndef ioread64
 590#define ioread64 ioread64
 591static inline u64 ioread64(const volatile void __iomem *addr)
 592{
 593        return readq(addr);
 594}
 595#endif
 596#endif /* CONFIG_64BIT */
 597
 598#ifndef iowrite8
 599#define iowrite8 iowrite8
 600static inline void iowrite8(u8 value, volatile void __iomem *addr)
 601{
 602        writeb(value, addr);
 603}
 604#endif
 605
 606#ifndef iowrite16
 607#define iowrite16 iowrite16
 608static inline void iowrite16(u16 value, volatile void __iomem *addr)
 609{
 610        writew(value, addr);
 611}
 612#endif
 613
 614#ifndef iowrite32
 615#define iowrite32 iowrite32
 616static inline void iowrite32(u32 value, volatile void __iomem *addr)
 617{
 618        writel(value, addr);
 619}
 620#endif
 621
 622#ifdef CONFIG_64BIT
 623#ifndef iowrite64
 624#define iowrite64 iowrite64
 625static inline void iowrite64(u64 value, volatile void __iomem *addr)
 626{
 627        writeq(value, addr);
 628}
 629#endif
 630#endif /* CONFIG_64BIT */
 631
 632#ifndef ioread16be
 633#define ioread16be ioread16be
 634static inline u16 ioread16be(const volatile void __iomem *addr)
 635{
 636        return swab16(readw(addr));
 637}
 638#endif
 639
 640#ifndef ioread32be
 641#define ioread32be ioread32be
 642static inline u32 ioread32be(const volatile void __iomem *addr)
 643{
 644        return swab32(readl(addr));
 645}
 646#endif
 647
 648#ifdef CONFIG_64BIT
 649#ifndef ioread64be
 650#define ioread64be ioread64be
 651static inline u64 ioread64be(const volatile void __iomem *addr)
 652{
 653        return swab64(readq(addr));
 654}
 655#endif
 656#endif /* CONFIG_64BIT */
 657
 658#ifndef iowrite16be
 659#define iowrite16be iowrite16be
 660static inline void iowrite16be(u16 value, void volatile __iomem *addr)
 661{
 662        writew(swab16(value), addr);
 663}
 664#endif
 665
 666#ifndef iowrite32be
 667#define iowrite32be iowrite32be
 668static inline void iowrite32be(u32 value, volatile void __iomem *addr)
 669{
 670        writel(swab32(value), addr);
 671}
 672#endif
 673
 674#ifdef CONFIG_64BIT
 675#ifndef iowrite64be
 676#define iowrite64be iowrite64be
 677static inline void iowrite64be(u64 value, volatile void __iomem *addr)
 678{
 679        writeq(swab64(value), addr);
 680}
 681#endif
 682#endif /* CONFIG_64BIT */
 683
 684#ifndef ioread8_rep
 685#define ioread8_rep ioread8_rep
 686static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
 687                               unsigned int count)
 688{
 689        readsb(addr, buffer, count);
 690}
 691#endif
 692
 693#ifndef ioread16_rep
 694#define ioread16_rep ioread16_rep
 695static inline void ioread16_rep(const volatile void __iomem *addr,
 696                                void *buffer, unsigned int count)
 697{
 698        readsw(addr, buffer, count);
 699}
 700#endif
 701
 702#ifndef ioread32_rep
 703#define ioread32_rep ioread32_rep
 704static inline void ioread32_rep(const volatile void __iomem *addr,
 705                                void *buffer, unsigned int count)
 706{
 707        readsl(addr, buffer, count);
 708}
 709#endif
 710
 711#ifdef CONFIG_64BIT
 712#ifndef ioread64_rep
 713#define ioread64_rep ioread64_rep
 714static inline void ioread64_rep(const volatile void __iomem *addr,
 715                                void *buffer, unsigned int count)
 716{
 717        readsq(addr, buffer, count);
 718}
 719#endif
 720#endif /* CONFIG_64BIT */
 721
 722#ifndef iowrite8_rep
 723#define iowrite8_rep iowrite8_rep
 724static inline void iowrite8_rep(volatile void __iomem *addr,
 725                                const void *buffer,
 726                                unsigned int count)
 727{
 728        writesb(addr, buffer, count);
 729}
 730#endif
 731
 732#ifndef iowrite16_rep
 733#define iowrite16_rep iowrite16_rep
 734static inline void iowrite16_rep(volatile void __iomem *addr,
 735                                 const void *buffer,
 736                                 unsigned int count)
 737{
 738        writesw(addr, buffer, count);
 739}
 740#endif
 741
 742#ifndef iowrite32_rep
 743#define iowrite32_rep iowrite32_rep
 744static inline void iowrite32_rep(volatile void __iomem *addr,
 745                                 const void *buffer,
 746                                 unsigned int count)
 747{
 748        writesl(addr, buffer, count);
 749}
 750#endif
 751
 752#ifdef CONFIG_64BIT
 753#ifndef iowrite64_rep
 754#define iowrite64_rep iowrite64_rep
 755static inline void iowrite64_rep(volatile void __iomem *addr,
 756                                 const void *buffer,
 757                                 unsigned int count)
 758{
 759        writesq(addr, buffer, count);
 760}
 761#endif
 762#endif /* CONFIG_64BIT */
 763#endif /* CONFIG_GENERIC_IOMAP */
 764
 765#ifdef __KERNEL__
 766
 767#include <linux/vmalloc.h>
 768#define __io_virt(x) ((void __force *)(x))
 769
 770#ifndef CONFIG_GENERIC_IOMAP
 771struct pci_dev;
 772extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 773
 774#ifndef pci_iounmap
 775#define pci_iounmap pci_iounmap
 776static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 777{
 778}
 779#endif
 780#endif /* CONFIG_GENERIC_IOMAP */
 781
 782/*
 783 * Change virtual addresses to physical addresses and vv.
 784 * These are pretty trivial
 785 */
 786#ifndef virt_to_phys
 787#define virt_to_phys virt_to_phys
 788static inline unsigned long virt_to_phys(volatile void *address)
 789{
 790        return __pa((unsigned long)address);
 791}
 792#endif
 793
 794#ifndef phys_to_virt
 795#define phys_to_virt phys_to_virt
 796static inline void *phys_to_virt(unsigned long address)
 797{
 798        return __va(address);
 799}
 800#endif
 801
 802/**
 803 * DOC: ioremap() and ioremap_*() variants
 804 *
 805 * If you have an IOMMU your architecture is expected to have both ioremap()
 806 * and iounmap() implemented otherwise the asm-generic helpers will provide a
 807 * direct mapping.
 808 *
 809 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
 810 * default to direct mapping for all of them, you can override these defaults.
 811 * If you have an IOMMU you are highly encouraged to provide your own
 812 * ioremap variant implementation as there currently is no safe architecture
 813 * agnostic default. To avoid possible improper behaviour default asm-generic
 814 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
 815 * defined your own ioremap_*() variant you must then declare your own
 816 * ioremap_*() variant as defined to itself to avoid the default NULL return.
 817 */
 818
 819#ifdef CONFIG_MMU
 820
 821#ifndef ioremap_uc
 822#define ioremap_uc ioremap_uc
 823static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
 824{
 825        return NULL;
 826}
 827#endif
 828
 829#else /* !CONFIG_MMU */
 830
 831/*
 832 * Change "struct page" to physical address.
 833 *
 834 * This implementation is for the no-MMU case only... if you have an MMU
 835 * you'll need to provide your own definitions.
 836 */
 837
 838#ifndef ioremap
 839#define ioremap ioremap
 840static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 841{
 842        return (void __iomem *)(unsigned long)offset;
 843}
 844#endif
 845
 846#ifndef __ioremap
 847#define __ioremap __ioremap
 848static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
 849                                      unsigned long flags)
 850{
 851        return ioremap(offset, size);
 852}
 853#endif
 854
 855#ifndef ioremap_nocache
 856#define ioremap_nocache ioremap_nocache
 857static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
 858{
 859        return ioremap(offset, size);
 860}
 861#endif
 862
 863#ifndef ioremap_uc
 864#define ioremap_uc ioremap_uc
 865static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
 866{
 867        return ioremap_nocache(offset, size);
 868}
 869#endif
 870
 871#ifndef ioremap_wc
 872#define ioremap_wc ioremap_wc
 873static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
 874{
 875        return ioremap_nocache(offset, size);
 876}
 877#endif
 878
 879#ifndef ioremap_wt
 880#define ioremap_wt ioremap_wt
 881static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
 882{
 883        return ioremap_nocache(offset, size);
 884}
 885#endif
 886
 887#ifndef iounmap
 888#define iounmap iounmap
 889
 890static inline void iounmap(void __iomem *addr)
 891{
 892}
 893#endif
 894#endif /* CONFIG_MMU */
 895
 896#ifdef CONFIG_HAS_IOPORT_MAP
 897#ifndef CONFIG_GENERIC_IOMAP
 898#ifndef ioport_map
 899#define ioport_map ioport_map
 900static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 901{
 902        return PCI_IOBASE + (port & IO_SPACE_LIMIT);
 903}
 904#endif
 905
 906#ifndef ioport_unmap
 907#define ioport_unmap ioport_unmap
 908static inline void ioport_unmap(void __iomem *p)
 909{
 910}
 911#endif
 912#else /* CONFIG_GENERIC_IOMAP */
 913extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
 914extern void ioport_unmap(void __iomem *p);
 915#endif /* CONFIG_GENERIC_IOMAP */
 916#endif /* CONFIG_HAS_IOPORT_MAP */
 917
 918/*
 919 * Convert a virtual cached pointer to an uncached pointer
 920 */
 921#ifndef xlate_dev_kmem_ptr
 922#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
 923static inline void *xlate_dev_kmem_ptr(void *addr)
 924{
 925        return addr;
 926}
 927#endif
 928
 929#ifndef xlate_dev_mem_ptr
 930#define xlate_dev_mem_ptr xlate_dev_mem_ptr
 931static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
 932{
 933        return __va(addr);
 934}
 935#endif
 936
 937#ifndef unxlate_dev_mem_ptr
 938#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
 939static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 940{
 941}
 942#endif
 943
 944#ifdef CONFIG_VIRT_TO_BUS
 945#ifndef virt_to_bus
 946static inline unsigned long virt_to_bus(void *address)
 947{
 948        return (unsigned long)address;
 949}
 950
 951static inline void *bus_to_virt(unsigned long address)
 952{
 953        return (void *)address;
 954}
 955#endif
 956#endif
 957
 958#ifndef memset_io
 959#define memset_io memset_io
 960/**
 961 * memset_io    Set a range of I/O memory to a constant value
 962 * @addr:       The beginning of the I/O-memory range to set
 963 * @val:        The value to set the memory to
 964 * @count:      The number of bytes to set
 965 *
 966 * Set a range of I/O memory to a given value.
 967 */
 968static inline void memset_io(volatile void __iomem *addr, int value,
 969                             size_t size)
 970{
 971        memset(__io_virt(addr), value, size);
 972}
 973#endif
 974
 975#ifndef memcpy_fromio
 976#define memcpy_fromio memcpy_fromio
 977/**
 978 * memcpy_fromio        Copy a block of data from I/O memory
 979 * @dst:                The (RAM) destination for the copy
 980 * @src:                The (I/O memory) source for the data
 981 * @count:              The number of bytes to copy
 982 *
 983 * Copy a block of data from I/O memory.
 984 */
 985static inline void memcpy_fromio(void *buffer,
 986                                 const volatile void __iomem *addr,
 987                                 size_t size)
 988{
 989        memcpy(buffer, __io_virt(addr), size);
 990}
 991#endif
 992
 993#ifndef memcpy_toio
 994#define memcpy_toio memcpy_toio
 995/**
 996 * memcpy_toio          Copy a block of data into I/O memory
 997 * @dst:                The (I/O memory) destination for the copy
 998 * @src:                The (RAM) source for the data
 999 * @count:              The number of bytes to copy
1000 *
1001 * Copy a block of data to I/O memory.
1002 */
1003static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1004                               size_t size)
1005{
1006        memcpy(__io_virt(addr), buffer, size);
1007}
1008#endif
1009
1010#endif /* __KERNEL__ */
1011
1012#endif /* __ASM_GENERIC_IO_H */
1013