linux/include/asm-generic/io.h
<<
>>
Prefs
   1/* Generic I/O port emulation.
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef __ASM_GENERIC_IO_H
  12#define __ASM_GENERIC_IO_H
  13
  14#include <asm/page.h> /* I/O is all done through memory accesses */
  15#include <linux/string.h> /* for memset() and memcpy() */
  16#include <linux/types.h>
  17
  18#ifdef CONFIG_GENERIC_IOMAP
  19#include <asm-generic/iomap.h>
  20#endif
  21
  22#include <asm-generic/pci_iomap.h>
  23
  24#ifndef mmiowb
  25#define mmiowb() do {} while (0)
  26#endif
  27
  28#ifndef __io_br
  29#define __io_br()      barrier()
  30#endif
  31
  32/* prevent prefetching of coherent DMA data ahead of a dma-complete */
  33#ifndef __io_ar
  34#ifdef rmb
  35#define __io_ar()      rmb()
  36#else
  37#define __io_ar()      barrier()
  38#endif
  39#endif
  40
  41/* flush writes to coherent DMA data before possibly triggering a DMA read */
  42#ifndef __io_bw
  43#ifdef wmb
  44#define __io_bw()      wmb()
  45#else
  46#define __io_bw()      barrier()
  47#endif
  48#endif
  49
  50/* serialize device access against a spin_unlock, usually handled there. */
  51#ifndef __io_aw
  52#define __io_aw()      barrier()
  53#endif
  54
  55#ifndef __io_pbw
  56#define __io_pbw()     __io_bw()
  57#endif
  58
  59#ifndef __io_paw
  60#define __io_paw()     __io_aw()
  61#endif
  62
  63#ifndef __io_pbr
  64#define __io_pbr()     __io_br()
  65#endif
  66
  67#ifndef __io_par
  68#define __io_par()     __io_ar()
  69#endif
  70
  71
  72/*
  73 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  74 *
  75 * On some architectures memory mapped IO needs to be accessed differently.
  76 * On the simple architectures, we just read/write the memory location
  77 * directly.
  78 */
  79
  80#ifndef __raw_readb
  81#define __raw_readb __raw_readb
  82static inline u8 __raw_readb(const volatile void __iomem *addr)
  83{
  84        return *(const volatile u8 __force *)addr;
  85}
  86#endif
  87
  88#ifndef __raw_readw
  89#define __raw_readw __raw_readw
  90static inline u16 __raw_readw(const volatile void __iomem *addr)
  91{
  92        return *(const volatile u16 __force *)addr;
  93}
  94#endif
  95
  96#ifndef __raw_readl
  97#define __raw_readl __raw_readl
  98static inline u32 __raw_readl(const volatile void __iomem *addr)
  99{
 100        return *(const volatile u32 __force *)addr;
 101}
 102#endif
 103
 104#ifdef CONFIG_64BIT
 105#ifndef __raw_readq
 106#define __raw_readq __raw_readq
 107static inline u64 __raw_readq(const volatile void __iomem *addr)
 108{
 109        return *(const volatile u64 __force *)addr;
 110}
 111#endif
 112#endif /* CONFIG_64BIT */
 113
 114#ifndef __raw_writeb
 115#define __raw_writeb __raw_writeb
 116static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
 117{
 118        *(volatile u8 __force *)addr = value;
 119}
 120#endif
 121
 122#ifndef __raw_writew
 123#define __raw_writew __raw_writew
 124static inline void __raw_writew(u16 value, volatile void __iomem *addr)
 125{
 126        *(volatile u16 __force *)addr = value;
 127}
 128#endif
 129
 130#ifndef __raw_writel
 131#define __raw_writel __raw_writel
 132static inline void __raw_writel(u32 value, volatile void __iomem *addr)
 133{
 134        *(volatile u32 __force *)addr = value;
 135}
 136#endif
 137
 138#ifdef CONFIG_64BIT
 139#ifndef __raw_writeq
 140#define __raw_writeq __raw_writeq
 141static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
 142{
 143        *(volatile u64 __force *)addr = value;
 144}
 145#endif
 146#endif /* CONFIG_64BIT */
 147
 148/*
 149 * {read,write}{b,w,l,q}() access little endian memory and return result in
 150 * native endianness.
 151 */
 152
 153#ifndef readb
 154#define readb readb
 155static inline u8 readb(const volatile void __iomem *addr)
 156{
 157        u8 val;
 158
 159        __io_br();
 160        val = __raw_readb(addr);
 161        __io_ar();
 162        return val;
 163}
 164#endif
 165
 166#ifndef readw
 167#define readw readw
 168static inline u16 readw(const volatile void __iomem *addr)
 169{
 170        u16 val;
 171
 172        __io_br();
 173        val = __le16_to_cpu(__raw_readw(addr));
 174        __io_ar();
 175        return val;
 176}
 177#endif
 178
 179#ifndef readl
 180#define readl readl
 181static inline u32 readl(const volatile void __iomem *addr)
 182{
 183        u32 val;
 184
 185        __io_br();
 186        val = __le32_to_cpu(__raw_readl(addr));
 187        __io_ar();
 188        return val;
 189}
 190#endif
 191
 192#ifdef CONFIG_64BIT
 193#ifndef readq
 194#define readq readq
 195static inline u64 readq(const volatile void __iomem *addr)
 196{
 197        u64 val;
 198
 199        __io_br();
 200        val = __le64_to_cpu(__raw_readq(addr));
 201        __io_ar();
 202        return val;
 203}
 204#endif
 205#endif /* CONFIG_64BIT */
 206
 207#ifndef writeb
 208#define writeb writeb
 209static inline void writeb(u8 value, volatile void __iomem *addr)
 210{
 211        __io_bw();
 212        __raw_writeb(value, addr);
 213        __io_aw();
 214}
 215#endif
 216
 217#ifndef writew
 218#define writew writew
 219static inline void writew(u16 value, volatile void __iomem *addr)
 220{
 221        __io_bw();
 222        __raw_writew(cpu_to_le16(value), addr);
 223        __io_aw();
 224}
 225#endif
 226
 227#ifndef writel
 228#define writel writel
 229static inline void writel(u32 value, volatile void __iomem *addr)
 230{
 231        __io_bw();
 232        __raw_writel(__cpu_to_le32(value), addr);
 233        __io_aw();
 234}
 235#endif
 236
 237#ifdef CONFIG_64BIT
 238#ifndef writeq
 239#define writeq writeq
 240static inline void writeq(u64 value, volatile void __iomem *addr)
 241{
 242        __io_bw();
 243        __raw_writeq(__cpu_to_le64(value), addr);
 244        __io_aw();
 245}
 246#endif
 247#endif /* CONFIG_64BIT */
 248
 249/*
 250 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
 251 * are not guaranteed to provide ordering against spinlocks or memory
 252 * accesses.
 253 */
 254#ifndef readb_relaxed
 255#define readb_relaxed readb_relaxed
 256static inline u8 readb_relaxed(const volatile void __iomem *addr)
 257{
 258        return __raw_readb(addr);
 259}
 260#endif
 261
 262#ifndef readw_relaxed
 263#define readw_relaxed readw_relaxed
 264static inline u16 readw_relaxed(const volatile void __iomem *addr)
 265{
 266        return __le16_to_cpu(__raw_readw(addr));
 267}
 268#endif
 269
 270#ifndef readl_relaxed
 271#define readl_relaxed readl_relaxed
 272static inline u32 readl_relaxed(const volatile void __iomem *addr)
 273{
 274        return __le32_to_cpu(__raw_readl(addr));
 275}
 276#endif
 277
 278#if defined(readq) && !defined(readq_relaxed)
 279#define readq_relaxed readq_relaxed
 280static inline u64 readq_relaxed(const volatile void __iomem *addr)
 281{
 282        return __le64_to_cpu(__raw_readq(addr));
 283}
 284#endif
 285
 286#ifndef writeb_relaxed
 287#define writeb_relaxed writeb_relaxed
 288static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
 289{
 290        __raw_writeb(value, addr);
 291}
 292#endif
 293
 294#ifndef writew_relaxed
 295#define writew_relaxed writew_relaxed
 296static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
 297{
 298        __raw_writew(cpu_to_le16(value), addr);
 299}
 300#endif
 301
 302#ifndef writel_relaxed
 303#define writel_relaxed writel_relaxed
 304static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
 305{
 306        __raw_writel(__cpu_to_le32(value), addr);
 307}
 308#endif
 309
 310#if defined(writeq) && !defined(writeq_relaxed)
 311#define writeq_relaxed writeq_relaxed
 312static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
 313{
 314        __raw_writeq(__cpu_to_le64(value), addr);
 315}
 316#endif
 317
 318/*
 319 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
 320 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
 321 */
 322#ifndef readsb
 323#define readsb readsb
 324static inline void readsb(const volatile void __iomem *addr, void *buffer,
 325                          unsigned int count)
 326{
 327        if (count) {
 328                u8 *buf = buffer;
 329
 330                do {
 331                        u8 x = __raw_readb(addr);
 332                        *buf++ = x;
 333                } while (--count);
 334        }
 335}
 336#endif
 337
 338#ifndef readsw
 339#define readsw readsw
 340static inline void readsw(const volatile void __iomem *addr, void *buffer,
 341                          unsigned int count)
 342{
 343        if (count) {
 344                u16 *buf = buffer;
 345
 346                do {
 347                        u16 x = __raw_readw(addr);
 348                        *buf++ = x;
 349                } while (--count);
 350        }
 351}
 352#endif
 353
 354#ifndef readsl
 355#define readsl readsl
 356static inline void readsl(const volatile void __iomem *addr, void *buffer,
 357                          unsigned int count)
 358{
 359        if (count) {
 360                u32 *buf = buffer;
 361
 362                do {
 363                        u32 x = __raw_readl(addr);
 364                        *buf++ = x;
 365                } while (--count);
 366        }
 367}
 368#endif
 369
 370#ifdef CONFIG_64BIT
 371#ifndef readsq
 372#define readsq readsq
 373static inline void readsq(const volatile void __iomem *addr, void *buffer,
 374                          unsigned int count)
 375{
 376        if (count) {
 377                u64 *buf = buffer;
 378
 379                do {
 380                        u64 x = __raw_readq(addr);
 381                        *buf++ = x;
 382                } while (--count);
 383        }
 384}
 385#endif
 386#endif /* CONFIG_64BIT */
 387
 388#ifndef writesb
 389#define writesb writesb
 390static inline void writesb(volatile void __iomem *addr, const void *buffer,
 391                           unsigned int count)
 392{
 393        if (count) {
 394                const u8 *buf = buffer;
 395
 396                do {
 397                        __raw_writeb(*buf++, addr);
 398                } while (--count);
 399        }
 400}
 401#endif
 402
 403#ifndef writesw
 404#define writesw writesw
 405static inline void writesw(volatile void __iomem *addr, const void *buffer,
 406                           unsigned int count)
 407{
 408        if (count) {
 409                const u16 *buf = buffer;
 410
 411                do {
 412                        __raw_writew(*buf++, addr);
 413                } while (--count);
 414        }
 415}
 416#endif
 417
 418#ifndef writesl
 419#define writesl writesl
 420static inline void writesl(volatile void __iomem *addr, const void *buffer,
 421                           unsigned int count)
 422{
 423        if (count) {
 424                const u32 *buf = buffer;
 425
 426                do {
 427                        __raw_writel(*buf++, addr);
 428                } while (--count);
 429        }
 430}
 431#endif
 432
 433#ifdef CONFIG_64BIT
 434#ifndef writesq
 435#define writesq writesq
 436static inline void writesq(volatile void __iomem *addr, const void *buffer,
 437                           unsigned int count)
 438{
 439        if (count) {
 440                const u64 *buf = buffer;
 441
 442                do {
 443                        __raw_writeq(*buf++, addr);
 444                } while (--count);
 445        }
 446}
 447#endif
 448#endif /* CONFIG_64BIT */
 449
 450#ifndef PCI_IOBASE
 451#define PCI_IOBASE ((void __iomem *)0)
 452#endif
 453
 454#ifndef IO_SPACE_LIMIT
 455#define IO_SPACE_LIMIT 0xffff
 456#endif
 457
 458#include <linux/logic_pio.h>
 459
 460/*
 461 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
 462 * implemented on hardware that needs an additional delay for I/O accesses to
 463 * take effect.
 464 */
 465
 466#ifndef inb
 467#define inb inb
 468static inline u8 inb(unsigned long addr)
 469{
 470        u8 val;
 471
 472        __io_pbr();
 473        val = __raw_readb(PCI_IOBASE + addr);
 474        __io_par();
 475        return val;
 476}
 477#endif
 478
 479#ifndef inw
 480#define inw inw
 481static inline u16 inw(unsigned long addr)
 482{
 483        u16 val;
 484
 485        __io_pbr();
 486        val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
 487        __io_par();
 488        return val;
 489}
 490#endif
 491
 492#ifndef inl
 493#define inl inl
 494static inline u32 inl(unsigned long addr)
 495{
 496        u32 val;
 497
 498        __io_pbr();
 499        val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
 500        __io_par();
 501        return val;
 502}
 503#endif
 504
 505#ifndef outb
 506#define outb outb
 507static inline void outb(u8 value, unsigned long addr)
 508{
 509        __io_pbw();
 510        __raw_writeb(value, PCI_IOBASE + addr);
 511        __io_paw();
 512}
 513#endif
 514
 515#ifndef outw
 516#define outw outw
 517static inline void outw(u16 value, unsigned long addr)
 518{
 519        __io_pbw();
 520        __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
 521        __io_paw();
 522}
 523#endif
 524
 525#ifndef outl
 526#define outl outl
 527static inline void outl(u32 value, unsigned long addr)
 528{
 529        __io_pbw();
 530        __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
 531        __io_paw();
 532}
 533#endif
 534
 535#ifndef inb_p
 536#define inb_p inb_p
 537static inline u8 inb_p(unsigned long addr)
 538{
 539        return inb(addr);
 540}
 541#endif
 542
 543#ifndef inw_p
 544#define inw_p inw_p
 545static inline u16 inw_p(unsigned long addr)
 546{
 547        return inw(addr);
 548}
 549#endif
 550
 551#ifndef inl_p
 552#define inl_p inl_p
 553static inline u32 inl_p(unsigned long addr)
 554{
 555        return inl(addr);
 556}
 557#endif
 558
 559#ifndef outb_p
 560#define outb_p outb_p
 561static inline void outb_p(u8 value, unsigned long addr)
 562{
 563        outb(value, addr);
 564}
 565#endif
 566
 567#ifndef outw_p
 568#define outw_p outw_p
 569static inline void outw_p(u16 value, unsigned long addr)
 570{
 571        outw(value, addr);
 572}
 573#endif
 574
 575#ifndef outl_p
 576#define outl_p outl_p
 577static inline void outl_p(u32 value, unsigned long addr)
 578{
 579        outl(value, addr);
 580}
 581#endif
 582
 583/*
 584 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
 585 * single I/O port multiple times.
 586 */
 587
 588#ifndef insb
 589#define insb insb
 590static inline void insb(unsigned long addr, void *buffer, unsigned int count)
 591{
 592        readsb(PCI_IOBASE + addr, buffer, count);
 593}
 594#endif
 595
 596#ifndef insw
 597#define insw insw
 598static inline void insw(unsigned long addr, void *buffer, unsigned int count)
 599{
 600        readsw(PCI_IOBASE + addr, buffer, count);
 601}
 602#endif
 603
 604#ifndef insl
 605#define insl insl
 606static inline void insl(unsigned long addr, void *buffer, unsigned int count)
 607{
 608        readsl(PCI_IOBASE + addr, buffer, count);
 609}
 610#endif
 611
 612#ifndef outsb
 613#define outsb outsb
 614static inline void outsb(unsigned long addr, const void *buffer,
 615                         unsigned int count)
 616{
 617        writesb(PCI_IOBASE + addr, buffer, count);
 618}
 619#endif
 620
 621#ifndef outsw
 622#define outsw outsw
 623static inline void outsw(unsigned long addr, const void *buffer,
 624                         unsigned int count)
 625{
 626        writesw(PCI_IOBASE + addr, buffer, count);
 627}
 628#endif
 629
 630#ifndef outsl
 631#define outsl outsl
 632static inline void outsl(unsigned long addr, const void *buffer,
 633                         unsigned int count)
 634{
 635        writesl(PCI_IOBASE + addr, buffer, count);
 636}
 637#endif
 638
 639#ifndef insb_p
 640#define insb_p insb_p
 641static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
 642{
 643        insb(addr, buffer, count);
 644}
 645#endif
 646
 647#ifndef insw_p
 648#define insw_p insw_p
 649static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
 650{
 651        insw(addr, buffer, count);
 652}
 653#endif
 654
 655#ifndef insl_p
 656#define insl_p insl_p
 657static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
 658{
 659        insl(addr, buffer, count);
 660}
 661#endif
 662
 663#ifndef outsb_p
 664#define outsb_p outsb_p
 665static inline void outsb_p(unsigned long addr, const void *buffer,
 666                           unsigned int count)
 667{
 668        outsb(addr, buffer, count);
 669}
 670#endif
 671
 672#ifndef outsw_p
 673#define outsw_p outsw_p
 674static inline void outsw_p(unsigned long addr, const void *buffer,
 675                           unsigned int count)
 676{
 677        outsw(addr, buffer, count);
 678}
 679#endif
 680
 681#ifndef outsl_p
 682#define outsl_p outsl_p
 683static inline void outsl_p(unsigned long addr, const void *buffer,
 684                           unsigned int count)
 685{
 686        outsl(addr, buffer, count);
 687}
 688#endif
 689
 690#ifndef CONFIG_GENERIC_IOMAP
 691#ifndef ioread8
 692#define ioread8 ioread8
 693static inline u8 ioread8(const volatile void __iomem *addr)
 694{
 695        return readb(addr);
 696}
 697#endif
 698
 699#ifndef ioread16
 700#define ioread16 ioread16
 701static inline u16 ioread16(const volatile void __iomem *addr)
 702{
 703        return readw(addr);
 704}
 705#endif
 706
 707#ifndef ioread32
 708#define ioread32 ioread32
 709static inline u32 ioread32(const volatile void __iomem *addr)
 710{
 711        return readl(addr);
 712}
 713#endif
 714
 715#ifdef CONFIG_64BIT
 716#ifndef ioread64
 717#define ioread64 ioread64
 718static inline u64 ioread64(const volatile void __iomem *addr)
 719{
 720        return readq(addr);
 721}
 722#endif
 723#endif /* CONFIG_64BIT */
 724
 725#ifndef iowrite8
 726#define iowrite8 iowrite8
 727static inline void iowrite8(u8 value, volatile void __iomem *addr)
 728{
 729        writeb(value, addr);
 730}
 731#endif
 732
 733#ifndef iowrite16
 734#define iowrite16 iowrite16
 735static inline void iowrite16(u16 value, volatile void __iomem *addr)
 736{
 737        writew(value, addr);
 738}
 739#endif
 740
 741#ifndef iowrite32
 742#define iowrite32 iowrite32
 743static inline void iowrite32(u32 value, volatile void __iomem *addr)
 744{
 745        writel(value, addr);
 746}
 747#endif
 748
 749#ifdef CONFIG_64BIT
 750#ifndef iowrite64
 751#define iowrite64 iowrite64
 752static inline void iowrite64(u64 value, volatile void __iomem *addr)
 753{
 754        writeq(value, addr);
 755}
 756#endif
 757#endif /* CONFIG_64BIT */
 758
 759#ifndef ioread16be
 760#define ioread16be ioread16be
 761static inline u16 ioread16be(const volatile void __iomem *addr)
 762{
 763        return swab16(readw(addr));
 764}
 765#endif
 766
 767#ifndef ioread32be
 768#define ioread32be ioread32be
 769static inline u32 ioread32be(const volatile void __iomem *addr)
 770{
 771        return swab32(readl(addr));
 772}
 773#endif
 774
 775#ifdef CONFIG_64BIT
 776#ifndef ioread64be
 777#define ioread64be ioread64be
 778static inline u64 ioread64be(const volatile void __iomem *addr)
 779{
 780        return swab64(readq(addr));
 781}
 782#endif
 783#endif /* CONFIG_64BIT */
 784
 785#ifndef iowrite16be
 786#define iowrite16be iowrite16be
 787static inline void iowrite16be(u16 value, void volatile __iomem *addr)
 788{
 789        writew(swab16(value), addr);
 790}
 791#endif
 792
 793#ifndef iowrite32be
 794#define iowrite32be iowrite32be
 795static inline void iowrite32be(u32 value, volatile void __iomem *addr)
 796{
 797        writel(swab32(value), addr);
 798}
 799#endif
 800
 801#ifdef CONFIG_64BIT
 802#ifndef iowrite64be
 803#define iowrite64be iowrite64be
 804static inline void iowrite64be(u64 value, volatile void __iomem *addr)
 805{
 806        writeq(swab64(value), addr);
 807}
 808#endif
 809#endif /* CONFIG_64BIT */
 810
 811#ifndef ioread8_rep
 812#define ioread8_rep ioread8_rep
 813static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
 814                               unsigned int count)
 815{
 816        readsb(addr, buffer, count);
 817}
 818#endif
 819
 820#ifndef ioread16_rep
 821#define ioread16_rep ioread16_rep
 822static inline void ioread16_rep(const volatile void __iomem *addr,
 823                                void *buffer, unsigned int count)
 824{
 825        readsw(addr, buffer, count);
 826}
 827#endif
 828
 829#ifndef ioread32_rep
 830#define ioread32_rep ioread32_rep
 831static inline void ioread32_rep(const volatile void __iomem *addr,
 832                                void *buffer, unsigned int count)
 833{
 834        readsl(addr, buffer, count);
 835}
 836#endif
 837
 838#ifdef CONFIG_64BIT
 839#ifndef ioread64_rep
 840#define ioread64_rep ioread64_rep
 841static inline void ioread64_rep(const volatile void __iomem *addr,
 842                                void *buffer, unsigned int count)
 843{
 844        readsq(addr, buffer, count);
 845}
 846#endif
 847#endif /* CONFIG_64BIT */
 848
 849#ifndef iowrite8_rep
 850#define iowrite8_rep iowrite8_rep
 851static inline void iowrite8_rep(volatile void __iomem *addr,
 852                                const void *buffer,
 853                                unsigned int count)
 854{
 855        writesb(addr, buffer, count);
 856}
 857#endif
 858
 859#ifndef iowrite16_rep
 860#define iowrite16_rep iowrite16_rep
 861static inline void iowrite16_rep(volatile void __iomem *addr,
 862                                 const void *buffer,
 863                                 unsigned int count)
 864{
 865        writesw(addr, buffer, count);
 866}
 867#endif
 868
 869#ifndef iowrite32_rep
 870#define iowrite32_rep iowrite32_rep
 871static inline void iowrite32_rep(volatile void __iomem *addr,
 872                                 const void *buffer,
 873                                 unsigned int count)
 874{
 875        writesl(addr, buffer, count);
 876}
 877#endif
 878
 879#ifdef CONFIG_64BIT
 880#ifndef iowrite64_rep
 881#define iowrite64_rep iowrite64_rep
 882static inline void iowrite64_rep(volatile void __iomem *addr,
 883                                 const void *buffer,
 884                                 unsigned int count)
 885{
 886        writesq(addr, buffer, count);
 887}
 888#endif
 889#endif /* CONFIG_64BIT */
 890#endif /* CONFIG_GENERIC_IOMAP */
 891
 892#ifdef __KERNEL__
 893
 894#include <linux/vmalloc.h>
 895#define __io_virt(x) ((void __force *)(x))
 896
 897#ifndef CONFIG_GENERIC_IOMAP
 898struct pci_dev;
 899extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 900
 901#ifndef pci_iounmap
 902#define pci_iounmap pci_iounmap
 903static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 904{
 905}
 906#endif
 907#endif /* CONFIG_GENERIC_IOMAP */
 908
 909/*
 910 * Change virtual addresses to physical addresses and vv.
 911 * These are pretty trivial
 912 */
 913#ifndef virt_to_phys
 914#define virt_to_phys virt_to_phys
 915static inline unsigned long virt_to_phys(volatile void *address)
 916{
 917        return __pa((unsigned long)address);
 918}
 919#endif
 920
 921#ifndef phys_to_virt
 922#define phys_to_virt phys_to_virt
 923static inline void *phys_to_virt(unsigned long address)
 924{
 925        return __va(address);
 926}
 927#endif
 928
 929/**
 930 * DOC: ioremap() and ioremap_*() variants
 931 *
 932 * If you have an IOMMU your architecture is expected to have both ioremap()
 933 * and iounmap() implemented otherwise the asm-generic helpers will provide a
 934 * direct mapping.
 935 *
 936 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
 937 * default to direct mapping for all of them, you can override these defaults.
 938 * If you have an IOMMU you are highly encouraged to provide your own
 939 * ioremap variant implementation as there currently is no safe architecture
 940 * agnostic default. To avoid possible improper behaviour default asm-generic
 941 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
 942 * defined your own ioremap_*() variant you must then declare your own
 943 * ioremap_*() variant as defined to itself to avoid the default NULL return.
 944 */
 945
 946#ifdef CONFIG_MMU
 947
 948#ifndef ioremap_uc
 949#define ioremap_uc ioremap_uc
 950static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
 951{
 952        return NULL;
 953}
 954#endif
 955
 956#else /* !CONFIG_MMU */
 957
 958/*
 959 * Change "struct page" to physical address.
 960 *
 961 * This implementation is for the no-MMU case only... if you have an MMU
 962 * you'll need to provide your own definitions.
 963 */
 964
 965#ifndef ioremap
 966#define ioremap ioremap
 967static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 968{
 969        return (void __iomem *)(unsigned long)offset;
 970}
 971#endif
 972
 973#ifndef __ioremap
 974#define __ioremap __ioremap
 975static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
 976                                      unsigned long flags)
 977{
 978        return ioremap(offset, size);
 979}
 980#endif
 981
 982#ifndef iounmap
 983#define iounmap iounmap
 984
 985static inline void iounmap(void __iomem *addr)
 986{
 987}
 988#endif
 989#endif /* CONFIG_MMU */
 990#ifndef ioremap_nocache
 991void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
 992#define ioremap_nocache ioremap_nocache
 993static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
 994{
 995        return ioremap(offset, size);
 996}
 997#endif
 998
 999#ifndef ioremap_uc
1000#define ioremap_uc ioremap_uc
1001static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
1002{
1003        return ioremap_nocache(offset, size);
1004}
1005#endif
1006
1007#ifndef ioremap_wc
1008#define ioremap_wc ioremap_wc
1009static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
1010{
1011        return ioremap_nocache(offset, size);
1012}
1013#endif
1014
1015#ifndef ioremap_wt
1016#define ioremap_wt ioremap_wt
1017static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
1018{
1019        return ioremap_nocache(offset, size);
1020}
1021#endif
1022
1023#ifdef CONFIG_HAS_IOPORT_MAP
1024#ifndef CONFIG_GENERIC_IOMAP
1025#ifndef ioport_map
1026#define ioport_map ioport_map
1027static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1028{
1029        port &= IO_SPACE_LIMIT;
1030        return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
1031}
1032#endif
1033
1034#ifndef ioport_unmap
1035#define ioport_unmap ioport_unmap
1036static inline void ioport_unmap(void __iomem *p)
1037{
1038}
1039#endif
1040#else /* CONFIG_GENERIC_IOMAP */
1041extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1042extern void ioport_unmap(void __iomem *p);
1043#endif /* CONFIG_GENERIC_IOMAP */
1044#endif /* CONFIG_HAS_IOPORT_MAP */
1045
1046/*
1047 * Convert a virtual cached pointer to an uncached pointer
1048 */
1049#ifndef xlate_dev_kmem_ptr
1050#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
1051static inline void *xlate_dev_kmem_ptr(void *addr)
1052{
1053        return addr;
1054}
1055#endif
1056
1057#ifndef xlate_dev_mem_ptr
1058#define xlate_dev_mem_ptr xlate_dev_mem_ptr
1059static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1060{
1061        return __va(addr);
1062}
1063#endif
1064
1065#ifndef unxlate_dev_mem_ptr
1066#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
1067static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1068{
1069}
1070#endif
1071
1072#ifdef CONFIG_VIRT_TO_BUS
1073#ifndef virt_to_bus
1074static inline unsigned long virt_to_bus(void *address)
1075{
1076        return (unsigned long)address;
1077}
1078
1079static inline void *bus_to_virt(unsigned long address)
1080{
1081        return (void *)address;
1082}
1083#endif
1084#endif
1085
1086#ifndef memset_io
1087#define memset_io memset_io
1088/**
1089 * memset_io    Set a range of I/O memory to a constant value
1090 * @addr:       The beginning of the I/O-memory range to set
1091 * @val:        The value to set the memory to
1092 * @count:      The number of bytes to set
1093 *
1094 * Set a range of I/O memory to a given value.
1095 */
1096static inline void memset_io(volatile void __iomem *addr, int value,
1097                             size_t size)
1098{
1099        memset(__io_virt(addr), value, size);
1100}
1101#endif
1102
1103#ifndef memcpy_fromio
1104#define memcpy_fromio memcpy_fromio
1105/**
1106 * memcpy_fromio        Copy a block of data from I/O memory
1107 * @dst:                The (RAM) destination for the copy
1108 * @src:                The (I/O memory) source for the data
1109 * @count:              The number of bytes to copy
1110 *
1111 * Copy a block of data from I/O memory.
1112 */
1113static inline void memcpy_fromio(void *buffer,
1114                                 const volatile void __iomem *addr,
1115                                 size_t size)
1116{
1117        memcpy(buffer, __io_virt(addr), size);
1118}
1119#endif
1120
1121#ifndef memcpy_toio
1122#define memcpy_toio memcpy_toio
1123/**
1124 * memcpy_toio          Copy a block of data into I/O memory
1125 * @dst:                The (I/O memory) destination for the copy
1126 * @src:                The (RAM) source for the data
1127 * @count:              The number of bytes to copy
1128 *
1129 * Copy a block of data to I/O memory.
1130 */
1131static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1132                               size_t size)
1133{
1134        memcpy(__io_virt(addr), buffer, size);
1135}
1136#endif
1137
1138#endif /* __KERNEL__ */
1139
1140#endif /* __ASM_GENERIC_IO_H */
1141