linux/arch/riscv/include/asm/io.h
<<
>>
Prefs
   1/*
   2 * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
   3 *   which was based on arch/arm/include/io.h
   4 *
   5 * Copyright (C) 1996-2000 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
   7 * Copyright (C) 2014 Regents of the University of California
   8 *
   9 *   This program is free software; you can redistribute it and/or
  10 *   modify it under the terms of the GNU General Public License
  11 *   as published by the Free Software Foundation, version 2.
  12 *
  13 *   This program is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *   GNU General Public License for more details.
  17 */
  18
  19#ifndef _ASM_RISCV_IO_H
  20#define _ASM_RISCV_IO_H
  21
  22#include <linux/types.h>
  23
  24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
  25
  26/*
  27 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
  28 * change the properties of memory regions.  This should be fixed by the
  29 * upcoming platform spec.
  30 */
  31#define ioremap_nocache(addr, size) ioremap((addr), (size))
  32#define ioremap_wc(addr, size) ioremap((addr), (size))
  33#define ioremap_wt(addr, size) ioremap((addr), (size))
  34
  35extern void iounmap(volatile void __iomem *addr);
  36
  37/* Generic IO read/write.  These perform native-endian accesses. */
  38#define __raw_writeb __raw_writeb
  39static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
  40{
  41        asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
  42}
  43
  44#define __raw_writew __raw_writew
  45static inline void __raw_writew(u16 val, volatile void __iomem *addr)
  46{
  47        asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
  48}
  49
  50#define __raw_writel __raw_writel
  51static inline void __raw_writel(u32 val, volatile void __iomem *addr)
  52{
  53        asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
  54}
  55
  56#ifdef CONFIG_64BIT
  57#define __raw_writeq __raw_writeq
  58static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
  59{
  60        asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
  61}
  62#endif
  63
  64#define __raw_readb __raw_readb
  65static inline u8 __raw_readb(const volatile void __iomem *addr)
  66{
  67        u8 val;
  68
  69        asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
  70        return val;
  71}
  72
  73#define __raw_readw __raw_readw
  74static inline u16 __raw_readw(const volatile void __iomem *addr)
  75{
  76        u16 val;
  77
  78        asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
  79        return val;
  80}
  81
  82#define __raw_readl __raw_readl
  83static inline u32 __raw_readl(const volatile void __iomem *addr)
  84{
  85        u32 val;
  86
  87        asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
  88        return val;
  89}
  90
  91#ifdef CONFIG_64BIT
  92#define __raw_readq __raw_readq
  93static inline u64 __raw_readq(const volatile void __iomem *addr)
  94{
  95        u64 val;
  96
  97        asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
  98        return val;
  99}
 100#endif
 101
 102/*
 103 * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
 104 * the ordering with I/O on spinlocks like PowerPC does.  The worry is that
 105 * drivers won't get this correct, but I also don't want to introduce a fence
 106 * into the lock code that otherwise only uses AMOs (and is essentially defined
 107 * by the ISA to be correct).   For now I'm leaving this here: "o,w" is
 108 * sufficient to ensure that all writes to the device have completed before the
 109 * write to the spinlock is allowed to commit.  I surmised this from reading
 110 * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
 111 */
 112#define mmiowb()        __asm__ __volatile__ ("fence o,w" : : : "memory");
 113
 114/*
 115 * Unordered I/O memory access primitives.  These are even more relaxed than
 116 * the relaxed versions, as they don't even order accesses between successive
 117 * operations to the I/O regions.
 118 */
 119#define readb_cpu(c)            ({ u8  __r = __raw_readb(c); __r; })
 120#define readw_cpu(c)            ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
 121#define readl_cpu(c)            ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
 122
 123#define writeb_cpu(v,c)         ((void)__raw_writeb((v),(c)))
 124#define writew_cpu(v,c)         ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
 125#define writel_cpu(v,c)         ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
 126
 127#ifdef CONFIG_64BIT
 128#define readq_cpu(c)            ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
 129#define writeq_cpu(v,c)         ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
 130#endif
 131
 132/*
 133 * Relaxed I/O memory access primitives. These follow the Device memory
 134 * ordering rules but do not guarantee any ordering relative to Normal memory
 135 * accesses.  These are defined to order the indicated access (either a read or
 136 * write) with all other I/O memory accesses. Since the platform specification
 137 * defines that all I/O regions are strongly ordered on channel 2, no explicit
 138 * fences are required to enforce this ordering.
 139 */
 140/* FIXME: These are now the same as asm-generic */
 141#define __io_rbr()              do {} while (0)
 142#define __io_rar()              do {} while (0)
 143#define __io_rbw()              do {} while (0)
 144#define __io_raw()              do {} while (0)
 145
 146#define readb_relaxed(c)        ({ u8  __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
 147#define readw_relaxed(c)        ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
 148#define readl_relaxed(c)        ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
 149
 150#define writeb_relaxed(v,c)     ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
 151#define writew_relaxed(v,c)     ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
 152#define writel_relaxed(v,c)     ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
 153
 154#ifdef CONFIG_64BIT
 155#define readq_relaxed(c)        ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
 156#define writeq_relaxed(v,c)     ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
 157#endif
 158
 159/*
 160 * I/O memory access primitives. Reads are ordered relative to any
 161 * following Normal memory access. Writes are ordered relative to any prior
 162 * Normal memory access.  The memory barriers here are necessary as RISC-V
 163 * doesn't define any ordering between the memory space and the I/O space.
 164 */
 165#define __io_br()       do {} while (0)
 166#define __io_ar()       __asm__ __volatile__ ("fence i,r" : : : "memory");
 167#define __io_bw()       __asm__ __volatile__ ("fence w,o" : : : "memory");
 168#define __io_aw()       do {} while (0)
 169
 170#define readb(c)        ({ u8  __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; })
 171#define readw(c)        ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; })
 172#define readl(c)        ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; })
 173
 174#define writeb(v,c)     ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
 175#define writew(v,c)     ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
 176#define writel(v,c)     ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
 177
 178#ifdef CONFIG_64BIT
 179#define readq(c)        ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; })
 180#define writeq(v,c)     ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
 181#endif
 182
 183/*
 184 * Emulation routines for the port-mapped IO space used by some PCI drivers.
 185 * These are defined as being "fully synchronous", but also "not guaranteed to
 186 * be fully ordered with respect to other memory and I/O operations".  We're
 187 * going to be on the safe side here and just make them:
 188 *  - Fully ordered WRT each other, by bracketing them with two fences.  The
 189 *    outer set contains both I/O so inX is ordered with outX, while the inner just
 190 *    needs the type of the access (I for inX and O for outX).
 191 *  - Ordered in the same manner as readX/writeX WRT memory by subsuming their
 192 *    fences.
 193 *  - Ordered WRT timer reads, so udelay and friends don't get elided by the
 194 *    implementation.
 195 * Note that there is no way to actually enforce that outX is a non-posted
 196 * operation on RISC-V, but hopefully the timer ordering constraint is
 197 * sufficient to ensure this works sanely on controllers that support I/O
 198 * writes.
 199 */
 200#define __io_pbr()      __asm__ __volatile__ ("fence io,i"  : : : "memory");
 201#define __io_par()      __asm__ __volatile__ ("fence i,ior" : : : "memory");
 202#define __io_pbw()      __asm__ __volatile__ ("fence iow,o" : : : "memory");
 203#define __io_paw()      __asm__ __volatile__ ("fence o,io"  : : : "memory");
 204
 205#define inb(c)          ({ u8  __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
 206#define inw(c)          ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
 207#define inl(c)          ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
 208
 209#define outb(v,c)       ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
 210#define outw(v,c)       ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
 211#define outl(v,c)       ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
 212
 213#ifdef CONFIG_64BIT
 214#define inq(c)          ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; })
 215#define outq(v,c)       ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
 216#endif
 217
 218/*
 219 * Accesses from a single hart to a single I/O address must be ordered.  This
 220 * allows us to use the raw read macros, but we still need to fence before and
 221 * after the block to ensure ordering WRT other macros.  These are defined to
 222 * perform host-endian accesses so we use __raw instead of __cpu.
 223 */
 224#define __io_reads_ins(port, ctype, len, bfence, afence)                        \
 225        static inline void __ ## port ## len(const volatile void __iomem *addr, \
 226                                             void *buffer,                      \
 227                                             unsigned int count)                \
 228        {                                                                       \
 229                bfence;                                                         \
 230                if (count) {                                                    \
 231                        ctype *buf = buffer;                                    \
 232                                                                                \
 233                        do {                                                    \
 234                                ctype x = __raw_read ## len(addr);              \
 235                                *buf++ = x;                                     \
 236                        } while (--count);                                      \
 237                }                                                               \
 238                afence;                                                         \
 239        }
 240
 241#define __io_writes_outs(port, ctype, len, bfence, afence)                      \
 242        static inline void __ ## port ## len(volatile void __iomem *addr,       \
 243                                             const void *buffer,                \
 244                                             unsigned int count)                \
 245        {                                                                       \
 246                bfence;                                                         \
 247                if (count) {                                                    \
 248                        const ctype *buf = buffer;                              \
 249                                                                                \
 250                        do {                                                    \
 251                                __raw_write ## len(*buf++, addr);               \
 252                        } while (--count);                                      \
 253                }                                                               \
 254                afence;                                                         \
 255        }
 256
 257__io_reads_ins(reads,  u8, b, __io_br(), __io_ar())
 258__io_reads_ins(reads, u16, w, __io_br(), __io_ar())
 259__io_reads_ins(reads, u32, l, __io_br(), __io_ar())
 260#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
 261#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
 262#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
 263
 264__io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 265__io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 266__io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
 267#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
 268#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
 269#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
 270
 271__io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 272__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
 273__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
 274#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
 275#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
 276#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
 277
 278__io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 279__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 280__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
 281#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
 282#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
 283#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
 284
 285#ifdef CONFIG_64BIT
 286__io_reads_ins(reads, u64, q, __io_br(), __io_ar())
 287#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
 288
 289__io_reads_ins(ins, u64, q, __io_pbr(), __io_par())
 290#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
 291
 292__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
 293#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
 294
 295__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
 296#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
 297#endif
 298
 299#include <asm-generic/io.h>
 300
 301#endif /* _ASM_RISCV_IO_H */
 302