linux/include/asm-generic/io.h
<<
>>
Prefs
   1/* Generic I/O port emulation.
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef __ASM_GENERIC_IO_H
  12#define __ASM_GENERIC_IO_H
  13
  14#include <asm/page.h> /* I/O is all done through memory accesses */
  15#include <linux/string.h> /* for memset() and memcpy() */
  16#include <linux/types.h>
  17
  18#ifdef CONFIG_GENERIC_IOMAP
  19#include <asm-generic/iomap.h>
  20#endif
  21
  22#include <asm/mmiowb.h>
  23#include <asm-generic/pci_iomap.h>
  24
  25#ifndef __io_br
  26#define __io_br()      barrier()
  27#endif
  28
  29/* prevent prefetching of coherent DMA data ahead of a dma-complete */
  30#ifndef __io_ar
  31#ifdef rmb
  32#define __io_ar()      rmb()
  33#else
  34#define __io_ar()      barrier()
  35#endif
  36#endif
  37
  38/* flush writes to coherent DMA data before possibly triggering a DMA read */
  39#ifndef __io_bw
  40#ifdef wmb
  41#define __io_bw()      wmb()
  42#else
  43#define __io_bw()      barrier()
  44#endif
  45#endif
  46
  47/* serialize device access against a spin_unlock, usually handled there. */
  48#ifndef __io_aw
  49#define __io_aw()      mmiowb_set_pending()
  50#endif
  51
  52#ifndef __io_pbw
  53#define __io_pbw()     __io_bw()
  54#endif
  55
  56#ifndef __io_paw
  57#define __io_paw()     __io_aw()
  58#endif
  59
  60#ifndef __io_pbr
  61#define __io_pbr()     __io_br()
  62#endif
  63
  64#ifndef __io_par
  65#define __io_par()     __io_ar()
  66#endif
  67
  68
  69/*
  70 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  71 *
  72 * On some architectures memory mapped IO needs to be accessed differently.
  73 * On the simple architectures, we just read/write the memory location
  74 * directly.
  75 */
  76
  77#ifndef __raw_readb
  78#define __raw_readb __raw_readb
  79static inline u8 __raw_readb(const volatile void __iomem *addr)
  80{
  81        return *(const volatile u8 __force *)addr;
  82}
  83#endif
  84
  85#ifndef __raw_readw
  86#define __raw_readw __raw_readw
  87static inline u16 __raw_readw(const volatile void __iomem *addr)
  88{
  89        return *(const volatile u16 __force *)addr;
  90}
  91#endif
  92
  93#ifndef __raw_readl
  94#define __raw_readl __raw_readl
  95static inline u32 __raw_readl(const volatile void __iomem *addr)
  96{
  97        return *(const volatile u32 __force *)addr;
  98}
  99#endif
 100
 101#ifdef CONFIG_64BIT
 102#ifndef __raw_readq
 103#define __raw_readq __raw_readq
 104static inline u64 __raw_readq(const volatile void __iomem *addr)
 105{
 106        return *(const volatile u64 __force *)addr;
 107}
 108#endif
 109#endif /* CONFIG_64BIT */
 110
 111#ifndef __raw_writeb
 112#define __raw_writeb __raw_writeb
 113static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
 114{
 115        *(volatile u8 __force *)addr = value;
 116}
 117#endif
 118
 119#ifndef __raw_writew
 120#define __raw_writew __raw_writew
 121static inline void __raw_writew(u16 value, volatile void __iomem *addr)
 122{
 123        *(volatile u16 __force *)addr = value;
 124}
 125#endif
 126
 127#ifndef __raw_writel
 128#define __raw_writel __raw_writel
 129static inline void __raw_writel(u32 value, volatile void __iomem *addr)
 130{
 131        *(volatile u32 __force *)addr = value;
 132}
 133#endif
 134
 135#ifdef CONFIG_64BIT
 136#ifndef __raw_writeq
 137#define __raw_writeq __raw_writeq
 138static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
 139{
 140        *(volatile u64 __force *)addr = value;
 141}
 142#endif
 143#endif /* CONFIG_64BIT */
 144
 145/*
 146 * {read,write}{b,w,l,q}() access little endian memory and return result in
 147 * native endianness.
 148 */
 149
 150#ifndef readb
 151#define readb readb
 152static inline u8 readb(const volatile void __iomem *addr)
 153{
 154        u8 val;
 155
 156        __io_br();
 157        val = __raw_readb(addr);
 158        __io_ar();
 159        return val;
 160}
 161#endif
 162
 163#ifndef readw
 164#define readw readw
 165static inline u16 readw(const volatile void __iomem *addr)
 166{
 167        u16 val;
 168
 169        __io_br();
 170        val = __le16_to_cpu(__raw_readw(addr));
 171        __io_ar();
 172        return val;
 173}
 174#endif
 175
 176#ifndef readl
 177#define readl readl
 178static inline u32 readl(const volatile void __iomem *addr)
 179{
 180        u32 val;
 181
 182        __io_br();
 183        val = __le32_to_cpu(__raw_readl(addr));
 184        __io_ar();
 185        return val;
 186}
 187#endif
 188
 189#ifdef CONFIG_64BIT
 190#ifndef readq
 191#define readq readq
 192static inline u64 readq(const volatile void __iomem *addr)
 193{
 194        u64 val;
 195
 196        __io_br();
 197        val = __le64_to_cpu(__raw_readq(addr));
 198        __io_ar();
 199        return val;
 200}
 201#endif
 202#endif /* CONFIG_64BIT */
 203
 204#ifndef writeb
 205#define writeb writeb
 206static inline void writeb(u8 value, volatile void __iomem *addr)
 207{
 208        __io_bw();
 209        __raw_writeb(value, addr);
 210        __io_aw();
 211}
 212#endif
 213
 214#ifndef writew
 215#define writew writew
 216static inline void writew(u16 value, volatile void __iomem *addr)
 217{
 218        __io_bw();
 219        __raw_writew(cpu_to_le16(value), addr);
 220        __io_aw();
 221}
 222#endif
 223
 224#ifndef writel
 225#define writel writel
 226static inline void writel(u32 value, volatile void __iomem *addr)
 227{
 228        __io_bw();
 229        __raw_writel(__cpu_to_le32(value), addr);
 230        __io_aw();
 231}
 232#endif
 233
 234#ifdef CONFIG_64BIT
 235#ifndef writeq
 236#define writeq writeq
 237static inline void writeq(u64 value, volatile void __iomem *addr)
 238{
 239        __io_bw();
 240        __raw_writeq(__cpu_to_le64(value), addr);
 241        __io_aw();
 242}
 243#endif
 244#endif /* CONFIG_64BIT */
 245
 246/*
 247 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
 248 * are not guaranteed to provide ordering against spinlocks or memory
 249 * accesses.
 250 */
 251#ifndef readb_relaxed
 252#define readb_relaxed readb_relaxed
 253static inline u8 readb_relaxed(const volatile void __iomem *addr)
 254{
 255        return __raw_readb(addr);
 256}
 257#endif
 258
 259#ifndef readw_relaxed
 260#define readw_relaxed readw_relaxed
 261static inline u16 readw_relaxed(const volatile void __iomem *addr)
 262{
 263        return __le16_to_cpu(__raw_readw(addr));
 264}
 265#endif
 266
 267#ifndef readl_relaxed
 268#define readl_relaxed readl_relaxed
 269static inline u32 readl_relaxed(const volatile void __iomem *addr)
 270{
 271        return __le32_to_cpu(__raw_readl(addr));
 272}
 273#endif
 274
 275#if defined(readq) && !defined(readq_relaxed)
 276#define readq_relaxed readq_relaxed
 277static inline u64 readq_relaxed(const volatile void __iomem *addr)
 278{
 279        return __le64_to_cpu(__raw_readq(addr));
 280}
 281#endif
 282
 283#ifndef writeb_relaxed
 284#define writeb_relaxed writeb_relaxed
 285static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
 286{
 287        __raw_writeb(value, addr);
 288}
 289#endif
 290
 291#ifndef writew_relaxed
 292#define writew_relaxed writew_relaxed
 293static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
 294{
 295        __raw_writew(cpu_to_le16(value), addr);
 296}
 297#endif
 298
 299#ifndef writel_relaxed
 300#define writel_relaxed writel_relaxed
 301static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
 302{
 303        __raw_writel(__cpu_to_le32(value), addr);
 304}
 305#endif
 306
 307#if defined(writeq) && !defined(writeq_relaxed)
 308#define writeq_relaxed writeq_relaxed
 309static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
 310{
 311        __raw_writeq(__cpu_to_le64(value), addr);
 312}
 313#endif
 314
 315/*
 316 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
 317 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
 318 */
 319#ifndef readsb
 320#define readsb readsb
 321static inline void readsb(const volatile void __iomem *addr, void *buffer,
 322                          unsigned int count)
 323{
 324        if (count) {
 325                u8 *buf = buffer;
 326
 327                do {
 328                        u8 x = __raw_readb(addr);
 329                        *buf++ = x;
 330                } while (--count);
 331        }
 332}
 333#endif
 334
 335#ifndef readsw
 336#define readsw readsw
 337static inline void readsw(const volatile void __iomem *addr, void *buffer,
 338                          unsigned int count)
 339{
 340        if (count) {
 341                u16 *buf = buffer;
 342
 343                do {
 344                        u16 x = __raw_readw(addr);
 345                        *buf++ = x;
 346                } while (--count);
 347        }
 348}
 349#endif
 350
 351#ifndef readsl
 352#define readsl readsl
 353static inline void readsl(const volatile void __iomem *addr, void *buffer,
 354                          unsigned int count)
 355{
 356        if (count) {
 357                u32 *buf = buffer;
 358
 359                do {
 360                        u32 x = __raw_readl(addr);
 361                        *buf++ = x;
 362                } while (--count);
 363        }
 364}
 365#endif
 366
 367#ifdef CONFIG_64BIT
 368#ifndef readsq
 369#define readsq readsq
 370static inline void readsq(const volatile void __iomem *addr, void *buffer,
 371                          unsigned int count)
 372{
 373        if (count) {
 374                u64 *buf = buffer;
 375
 376                do {
 377                        u64 x = __raw_readq(addr);
 378                        *buf++ = x;
 379                } while (--count);
 380        }
 381}
 382#endif
 383#endif /* CONFIG_64BIT */
 384
 385#ifndef writesb
 386#define writesb writesb
 387static inline void writesb(volatile void __iomem *addr, const void *buffer,
 388                           unsigned int count)
 389{
 390        if (count) {
 391                const u8 *buf = buffer;
 392
 393                do {
 394                        __raw_writeb(*buf++, addr);
 395                } while (--count);
 396        }
 397}
 398#endif
 399
 400#ifndef writesw
 401#define writesw writesw
 402static inline void writesw(volatile void __iomem *addr, const void *buffer,
 403                           unsigned int count)
 404{
 405        if (count) {
 406                const u16 *buf = buffer;
 407
 408                do {
 409                        __raw_writew(*buf++, addr);
 410                } while (--count);
 411        }
 412}
 413#endif
 414
 415#ifndef writesl
 416#define writesl writesl
 417static inline void writesl(volatile void __iomem *addr, const void *buffer,
 418                           unsigned int count)
 419{
 420        if (count) {
 421                const u32 *buf = buffer;
 422
 423                do {
 424                        __raw_writel(*buf++, addr);
 425                } while (--count);
 426        }
 427}
 428#endif
 429
 430#ifdef CONFIG_64BIT
 431#ifndef writesq
 432#define writesq writesq
 433static inline void writesq(volatile void __iomem *addr, const void *buffer,
 434                           unsigned int count)
 435{
 436        if (count) {
 437                const u64 *buf = buffer;
 438
 439                do {
 440                        __raw_writeq(*buf++, addr);
 441                } while (--count);
 442        }
 443}
 444#endif
 445#endif /* CONFIG_64BIT */
 446
 447#ifndef PCI_IOBASE
 448#define PCI_IOBASE ((void __iomem *)0)
 449#endif
 450
 451#ifndef IO_SPACE_LIMIT
 452#define IO_SPACE_LIMIT 0xffff
 453#endif
 454
 455#include <linux/logic_pio.h>
 456
 457/*
 458 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
 459 * implemented on hardware that needs an additional delay for I/O accesses to
 460 * take effect.
 461 */
 462
 463#ifndef inb
 464#define inb inb
 465static inline u8 inb(unsigned long addr)
 466{
 467        u8 val;
 468
 469        __io_pbr();
 470        val = __raw_readb(PCI_IOBASE + addr);
 471        __io_par();
 472        return val;
 473}
 474#endif
 475
 476#ifndef inw
 477#define inw inw
 478static inline u16 inw(unsigned long addr)
 479{
 480        u16 val;
 481
 482        __io_pbr();
 483        val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
 484        __io_par();
 485        return val;
 486}
 487#endif
 488
 489#ifndef inl
 490#define inl inl
 491static inline u32 inl(unsigned long addr)
 492{
 493        u32 val;
 494
 495        __io_pbr();
 496        val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
 497        __io_par();
 498        return val;
 499}
 500#endif
 501
 502#ifndef outb
 503#define outb outb
 504static inline void outb(u8 value, unsigned long addr)
 505{
 506        __io_pbw();
 507        __raw_writeb(value, PCI_IOBASE + addr);
 508        __io_paw();
 509}
 510#endif
 511
 512#ifndef outw
 513#define outw outw
 514static inline void outw(u16 value, unsigned long addr)
 515{
 516        __io_pbw();
 517        __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
 518        __io_paw();
 519}
 520#endif
 521
 522#ifndef outl
 523#define outl outl
 524static inline void outl(u32 value, unsigned long addr)
 525{
 526        __io_pbw();
 527        __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
 528        __io_paw();
 529}
 530#endif
 531
 532#ifndef inb_p
 533#define inb_p inb_p
 534static inline u8 inb_p(unsigned long addr)
 535{
 536        return inb(addr);
 537}
 538#endif
 539
 540#ifndef inw_p
 541#define inw_p inw_p
 542static inline u16 inw_p(unsigned long addr)
 543{
 544        return inw(addr);
 545}
 546#endif
 547
 548#ifndef inl_p
 549#define inl_p inl_p
 550static inline u32 inl_p(unsigned long addr)
 551{
 552        return inl(addr);
 553}
 554#endif
 555
 556#ifndef outb_p
 557#define outb_p outb_p
 558static inline void outb_p(u8 value, unsigned long addr)
 559{
 560        outb(value, addr);
 561}
 562#endif
 563
 564#ifndef outw_p
 565#define outw_p outw_p
 566static inline void outw_p(u16 value, unsigned long addr)
 567{
 568        outw(value, addr);
 569}
 570#endif
 571
 572#ifndef outl_p
 573#define outl_p outl_p
 574static inline void outl_p(u32 value, unsigned long addr)
 575{
 576        outl(value, addr);
 577}
 578#endif
 579
 580/*
 581 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
 582 * single I/O port multiple times.
 583 */
 584
 585#ifndef insb
 586#define insb insb
 587static inline void insb(unsigned long addr, void *buffer, unsigned int count)
 588{
 589        readsb(PCI_IOBASE + addr, buffer, count);
 590}
 591#endif
 592
 593#ifndef insw
 594#define insw insw
 595static inline void insw(unsigned long addr, void *buffer, unsigned int count)
 596{
 597        readsw(PCI_IOBASE + addr, buffer, count);
 598}
 599#endif
 600
 601#ifndef insl
 602#define insl insl
 603static inline void insl(unsigned long addr, void *buffer, unsigned int count)
 604{
 605        readsl(PCI_IOBASE + addr, buffer, count);
 606}
 607#endif
 608
 609#ifndef outsb
 610#define outsb outsb
 611static inline void outsb(unsigned long addr, const void *buffer,
 612                         unsigned int count)
 613{
 614        writesb(PCI_IOBASE + addr, buffer, count);
 615}
 616#endif
 617
 618#ifndef outsw
 619#define outsw outsw
 620static inline void outsw(unsigned long addr, const void *buffer,
 621                         unsigned int count)
 622{
 623        writesw(PCI_IOBASE + addr, buffer, count);
 624}
 625#endif
 626
 627#ifndef outsl
 628#define outsl outsl
 629static inline void outsl(unsigned long addr, const void *buffer,
 630                         unsigned int count)
 631{
 632        writesl(PCI_IOBASE + addr, buffer, count);
 633}
 634#endif
 635
 636#ifndef insb_p
 637#define insb_p insb_p
 638static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
 639{
 640        insb(addr, buffer, count);
 641}
 642#endif
 643
 644#ifndef insw_p
 645#define insw_p insw_p
 646static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
 647{
 648        insw(addr, buffer, count);
 649}
 650#endif
 651
 652#ifndef insl_p
 653#define insl_p insl_p
 654static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
 655{
 656        insl(addr, buffer, count);
 657}
 658#endif
 659
 660#ifndef outsb_p
 661#define outsb_p outsb_p
 662static inline void outsb_p(unsigned long addr, const void *buffer,
 663                           unsigned int count)
 664{
 665        outsb(addr, buffer, count);
 666}
 667#endif
 668
 669#ifndef outsw_p
 670#define outsw_p outsw_p
 671static inline void outsw_p(unsigned long addr, const void *buffer,
 672                           unsigned int count)
 673{
 674        outsw(addr, buffer, count);
 675}
 676#endif
 677
 678#ifndef outsl_p
 679#define outsl_p outsl_p
 680static inline void outsl_p(unsigned long addr, const void *buffer,
 681                           unsigned int count)
 682{
 683        outsl(addr, buffer, count);
 684}
 685#endif
 686
 687#ifndef CONFIG_GENERIC_IOMAP
 688#ifndef ioread8
 689#define ioread8 ioread8
 690static inline u8 ioread8(const volatile void __iomem *addr)
 691{
 692        return readb(addr);
 693}
 694#endif
 695
 696#ifndef ioread16
 697#define ioread16 ioread16
 698static inline u16 ioread16(const volatile void __iomem *addr)
 699{
 700        return readw(addr);
 701}
 702#endif
 703
 704#ifndef ioread32
 705#define ioread32 ioread32
 706static inline u32 ioread32(const volatile void __iomem *addr)
 707{
 708        return readl(addr);
 709}
 710#endif
 711
 712#ifdef CONFIG_64BIT
 713#ifndef ioread64
 714#define ioread64 ioread64
 715static inline u64 ioread64(const volatile void __iomem *addr)
 716{
 717        return readq(addr);
 718}
 719#endif
 720#endif /* CONFIG_64BIT */
 721
 722#ifndef iowrite8
 723#define iowrite8 iowrite8
 724static inline void iowrite8(u8 value, volatile void __iomem *addr)
 725{
 726        writeb(value, addr);
 727}
 728#endif
 729
 730#ifndef iowrite16
 731#define iowrite16 iowrite16
 732static inline void iowrite16(u16 value, volatile void __iomem *addr)
 733{
 734        writew(value, addr);
 735}
 736#endif
 737
 738#ifndef iowrite32
 739#define iowrite32 iowrite32
 740static inline void iowrite32(u32 value, volatile void __iomem *addr)
 741{
 742        writel(value, addr);
 743}
 744#endif
 745
 746#ifdef CONFIG_64BIT
 747#ifndef iowrite64
 748#define iowrite64 iowrite64
 749static inline void iowrite64(u64 value, volatile void __iomem *addr)
 750{
 751        writeq(value, addr);
 752}
 753#endif
 754#endif /* CONFIG_64BIT */
 755
 756#ifndef ioread16be
 757#define ioread16be ioread16be
 758static inline u16 ioread16be(const volatile void __iomem *addr)
 759{
 760        return swab16(readw(addr));
 761}
 762#endif
 763
 764#ifndef ioread32be
 765#define ioread32be ioread32be
 766static inline u32 ioread32be(const volatile void __iomem *addr)
 767{
 768        return swab32(readl(addr));
 769}
 770#endif
 771
 772#ifdef CONFIG_64BIT
 773#ifndef ioread64be
 774#define ioread64be ioread64be
 775static inline u64 ioread64be(const volatile void __iomem *addr)
 776{
 777        return swab64(readq(addr));
 778}
 779#endif
 780#endif /* CONFIG_64BIT */
 781
 782#ifndef iowrite16be
 783#define iowrite16be iowrite16be
 784static inline void iowrite16be(u16 value, void volatile __iomem *addr)
 785{
 786        writew(swab16(value), addr);
 787}
 788#endif
 789
 790#ifndef iowrite32be
 791#define iowrite32be iowrite32be
 792static inline void iowrite32be(u32 value, volatile void __iomem *addr)
 793{
 794        writel(swab32(value), addr);
 795}
 796#endif
 797
 798#ifdef CONFIG_64BIT
 799#ifndef iowrite64be
 800#define iowrite64be iowrite64be
 801static inline void iowrite64be(u64 value, volatile void __iomem *addr)
 802{
 803        writeq(swab64(value), addr);
 804}
 805#endif
 806#endif /* CONFIG_64BIT */
 807
 808#ifndef ioread8_rep
 809#define ioread8_rep ioread8_rep
 810static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
 811                               unsigned int count)
 812{
 813        readsb(addr, buffer, count);
 814}
 815#endif
 816
 817#ifndef ioread16_rep
 818#define ioread16_rep ioread16_rep
 819static inline void ioread16_rep(const volatile void __iomem *addr,
 820                                void *buffer, unsigned int count)
 821{
 822        readsw(addr, buffer, count);
 823}
 824#endif
 825
 826#ifndef ioread32_rep
 827#define ioread32_rep ioread32_rep
 828static inline void ioread32_rep(const volatile void __iomem *addr,
 829                                void *buffer, unsigned int count)
 830{
 831        readsl(addr, buffer, count);
 832}
 833#endif
 834
 835#ifdef CONFIG_64BIT
 836#ifndef ioread64_rep
 837#define ioread64_rep ioread64_rep
 838static inline void ioread64_rep(const volatile void __iomem *addr,
 839                                void *buffer, unsigned int count)
 840{
 841        readsq(addr, buffer, count);
 842}
 843#endif
 844#endif /* CONFIG_64BIT */
 845
 846#ifndef iowrite8_rep
 847#define iowrite8_rep iowrite8_rep
 848static inline void iowrite8_rep(volatile void __iomem *addr,
 849                                const void *buffer,
 850                                unsigned int count)
 851{
 852        writesb(addr, buffer, count);
 853}
 854#endif
 855
 856#ifndef iowrite16_rep
 857#define iowrite16_rep iowrite16_rep
 858static inline void iowrite16_rep(volatile void __iomem *addr,
 859                                 const void *buffer,
 860                                 unsigned int count)
 861{
 862        writesw(addr, buffer, count);
 863}
 864#endif
 865
 866#ifndef iowrite32_rep
 867#define iowrite32_rep iowrite32_rep
 868static inline void iowrite32_rep(volatile void __iomem *addr,
 869                                 const void *buffer,
 870                                 unsigned int count)
 871{
 872        writesl(addr, buffer, count);
 873}
 874#endif
 875
 876#ifdef CONFIG_64BIT
 877#ifndef iowrite64_rep
 878#define iowrite64_rep iowrite64_rep
 879static inline void iowrite64_rep(volatile void __iomem *addr,
 880                                 const void *buffer,
 881                                 unsigned int count)
 882{
 883        writesq(addr, buffer, count);
 884}
 885#endif
 886#endif /* CONFIG_64BIT */
 887#endif /* CONFIG_GENERIC_IOMAP */
 888
 889#ifdef __KERNEL__
 890
 891#include <linux/vmalloc.h>
 892#define __io_virt(x) ((void __force *)(x))
 893
 894#ifndef CONFIG_GENERIC_IOMAP
 895struct pci_dev;
 896extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 897
 898#ifndef pci_iounmap
 899#define pci_iounmap pci_iounmap
 900static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 901{
 902}
 903#endif
 904#endif /* CONFIG_GENERIC_IOMAP */
 905
 906/*
 907 * Change virtual addresses to physical addresses and vv.
 908 * These are pretty trivial
 909 */
 910#ifndef virt_to_phys
 911#define virt_to_phys virt_to_phys
 912static inline unsigned long virt_to_phys(volatile void *address)
 913{
 914        return __pa((unsigned long)address);
 915}
 916#endif
 917
 918#ifndef phys_to_virt
 919#define phys_to_virt phys_to_virt
 920static inline void *phys_to_virt(unsigned long address)
 921{
 922        return __va(address);
 923}
 924#endif
 925
 926/**
 927 * DOC: ioremap() and ioremap_*() variants
 928 *
 929 * If you have an IOMMU your architecture is expected to have both ioremap()
 930 * and iounmap() implemented otherwise the asm-generic helpers will provide a
 931 * direct mapping.
 932 *
 933 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
 934 * default to direct mapping for all of them, you can override these defaults.
 935 * If you have an IOMMU you are highly encouraged to provide your own
 936 * ioremap variant implementation as there currently is no safe architecture
 937 * agnostic default. To avoid possible improper behaviour default asm-generic
 938 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
 939 * defined your own ioremap_*() variant you must then declare your own
 940 * ioremap_*() variant as defined to itself to avoid the default NULL return.
 941 */
 942
 943#ifdef CONFIG_MMU
 944
 945#ifndef ioremap_uc
 946#define ioremap_uc ioremap_uc
 947static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
 948{
 949        return NULL;
 950}
 951#endif
 952
 953#else /* !CONFIG_MMU */
 954
 955/*
 956 * Change "struct page" to physical address.
 957 *
 958 * This implementation is for the no-MMU case only... if you have an MMU
 959 * you'll need to provide your own definitions.
 960 */
 961
 962#ifndef ioremap
 963#define ioremap ioremap
 964static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 965{
 966        return (void __iomem *)(unsigned long)offset;
 967}
 968#endif
 969
 970#ifndef __ioremap
 971#define __ioremap __ioremap
 972static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
 973                                      unsigned long flags)
 974{
 975        return ioremap(offset, size);
 976}
 977#endif
 978
 979#ifndef iounmap
 980#define iounmap iounmap
 981
 982static inline void iounmap(void __iomem *addr)
 983{
 984}
 985#endif
 986#endif /* CONFIG_MMU */
 987#ifndef ioremap_nocache
 988void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
 989#define ioremap_nocache ioremap_nocache
 990static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
 991{
 992        return ioremap(offset, size);
 993}
 994#endif
 995
 996#ifndef ioremap_uc
 997#define ioremap_uc ioremap_uc
 998static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
 999{
1000        return ioremap_nocache(offset, size);
1001}
1002#endif
1003
1004#ifndef ioremap_wc
1005#define ioremap_wc ioremap_wc
1006static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
1007{
1008        return ioremap_nocache(offset, size);
1009}
1010#endif
1011
1012#ifndef ioremap_wt
1013#define ioremap_wt ioremap_wt
1014static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
1015{
1016        return ioremap_nocache(offset, size);
1017}
1018#endif
1019
1020#ifdef CONFIG_HAS_IOPORT_MAP
1021#ifndef CONFIG_GENERIC_IOMAP
1022#ifndef ioport_map
1023#define ioport_map ioport_map
1024static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1025{
1026        return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
1027}
1028#endif
1029
1030#ifndef ioport_unmap
1031#define ioport_unmap ioport_unmap
1032static inline void ioport_unmap(void __iomem *p)
1033{
1034}
1035#endif
1036#else /* CONFIG_GENERIC_IOMAP */
1037extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1038extern void ioport_unmap(void __iomem *p);
1039#endif /* CONFIG_GENERIC_IOMAP */
1040#endif /* CONFIG_HAS_IOPORT_MAP */
1041
1042/*
1043 * Convert a virtual cached pointer to an uncached pointer
1044 */
1045#ifndef xlate_dev_kmem_ptr
1046#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
1047static inline void *xlate_dev_kmem_ptr(void *addr)
1048{
1049        return addr;
1050}
1051#endif
1052
1053#ifndef xlate_dev_mem_ptr
1054#define xlate_dev_mem_ptr xlate_dev_mem_ptr
1055static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1056{
1057        return __va(addr);
1058}
1059#endif
1060
1061#ifndef unxlate_dev_mem_ptr
1062#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
1063static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1064{
1065}
1066#endif
1067
1068#ifdef CONFIG_VIRT_TO_BUS
1069#ifndef virt_to_bus
1070static inline unsigned long virt_to_bus(void *address)
1071{
1072        return (unsigned long)address;
1073}
1074
1075static inline void *bus_to_virt(unsigned long address)
1076{
1077        return (void *)address;
1078}
1079#endif
1080#endif
1081
1082#ifndef memset_io
1083#define memset_io memset_io
1084/**
1085 * memset_io    Set a range of I/O memory to a constant value
1086 * @addr:       The beginning of the I/O-memory range to set
1087 * @val:        The value to set the memory to
1088 * @count:      The number of bytes to set
1089 *
1090 * Set a range of I/O memory to a given value.
1091 */
1092static inline void memset_io(volatile void __iomem *addr, int value,
1093                             size_t size)
1094{
1095        memset(__io_virt(addr), value, size);
1096}
1097#endif
1098
1099#ifndef memcpy_fromio
1100#define memcpy_fromio memcpy_fromio
1101/**
1102 * memcpy_fromio        Copy a block of data from I/O memory
1103 * @dst:                The (RAM) destination for the copy
1104 * @src:                The (I/O memory) source for the data
1105 * @count:              The number of bytes to copy
1106 *
1107 * Copy a block of data from I/O memory.
1108 */
1109static inline void memcpy_fromio(void *buffer,
1110                                 const volatile void __iomem *addr,
1111                                 size_t size)
1112{
1113        memcpy(buffer, __io_virt(addr), size);
1114}
1115#endif
1116
1117#ifndef memcpy_toio
1118#define memcpy_toio memcpy_toio
1119/**
1120 * memcpy_toio          Copy a block of data into I/O memory
1121 * @dst:                The (I/O memory) destination for the copy
1122 * @src:                The (RAM) source for the data
1123 * @count:              The number of bytes to copy
1124 *
1125 * Copy a block of data to I/O memory.
1126 */
1127static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1128                               size_t size)
1129{
1130        memcpy(__io_virt(addr), buffer, size);
1131}
1132#endif
1133
1134#endif /* __KERNEL__ */
1135
1136#endif /* __ASM_GENERIC_IO_H */
1137