linux/arch/arc/include/asm/io.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#ifndef _ASM_ARC_IO_H
   7#define _ASM_ARC_IO_H
   8
   9#include <linux/types.h>
  10#include <asm/byteorder.h>
  11#include <asm/page.h>
  12#include <asm/unaligned.h>
  13
  14#ifdef CONFIG_ISA_ARCV2
  15#include <asm/barrier.h>
  16#define __iormb()               rmb()
  17#define __iowmb()               wmb()
  18#else
  19#define __iormb()               do { } while (0)
  20#define __iowmb()               do { } while (0)
  21#endif
  22
  23extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
  24extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
  25                                  unsigned long flags);
  26static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
  27{
  28        return (void __iomem *)port;
  29}
  30
  31static inline void ioport_unmap(void __iomem *addr)
  32{
  33}
  34
  35extern void iounmap(const void __iomem *addr);
  36
  37/*
  38 * io{read,write}{16,32}be() macros
  39 */
  40#define ioread16be(p)           ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
  41#define ioread32be(p)           ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
  42
  43#define iowrite16be(v,p)        ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
  44#define iowrite32be(v,p)        ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
  45
  46/* Change struct page to physical address */
  47#define page_to_phys(page)              (page_to_pfn(page) << PAGE_SHIFT)
  48
  49#define __raw_readb __raw_readb
  50static inline u8 __raw_readb(const volatile void __iomem *addr)
  51{
  52        u8 b;
  53
  54        __asm__ __volatile__(
  55        "       ldb%U1 %0, %1   \n"
  56        : "=r" (b)
  57        : "m" (*(volatile u8 __force *)addr)
  58        : "memory");
  59
  60        return b;
  61}
  62
  63#define __raw_readw __raw_readw
  64static inline u16 __raw_readw(const volatile void __iomem *addr)
  65{
  66        u16 s;
  67
  68        __asm__ __volatile__(
  69        "       ldw%U1 %0, %1   \n"
  70        : "=r" (s)
  71        : "m" (*(volatile u16 __force *)addr)
  72        : "memory");
  73
  74        return s;
  75}
  76
  77#define __raw_readl __raw_readl
  78static inline u32 __raw_readl(const volatile void __iomem *addr)
  79{
  80        u32 w;
  81
  82        __asm__ __volatile__(
  83        "       ld%U1 %0, %1    \n"
  84        : "=r" (w)
  85        : "m" (*(volatile u32 __force *)addr)
  86        : "memory");
  87
  88        return w;
  89}
  90
  91/*
  92 * {read,write}s{b,w,l}() repeatedly access the same IO address in
  93 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
  94 * @count times
  95 */
  96#define __raw_readsx(t,f) \
  97static inline void __raw_reads##f(const volatile void __iomem *addr,    \
  98                                  void *ptr, unsigned int count)        \
  99{                                                                       \
 100        bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
 101        u##t *buf = ptr;                                                \
 102                                                                        \
 103        if (!count)                                                     \
 104                return;                                                 \
 105                                                                        \
 106        /* Some ARC CPU's don't support unaligned accesses */           \
 107        if (is_aligned) {                                               \
 108                do {                                                    \
 109                        u##t x = __raw_read##f(addr);                   \
 110                        *buf++ = x;                                     \
 111                } while (--count);                                      \
 112        } else {                                                        \
 113                do {                                                    \
 114                        u##t x = __raw_read##f(addr);                   \
 115                        put_unaligned(x, buf++);                        \
 116                } while (--count);                                      \
 117        }                                                               \
 118}
 119
 120#define __raw_readsb __raw_readsb
 121__raw_readsx(8, b)
 122#define __raw_readsw __raw_readsw
 123__raw_readsx(16, w)
 124#define __raw_readsl __raw_readsl
 125__raw_readsx(32, l)
 126
 127#define __raw_writeb __raw_writeb
 128static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
 129{
 130        __asm__ __volatile__(
 131        "       stb%U1 %0, %1   \n"
 132        :
 133        : "r" (b), "m" (*(volatile u8 __force *)addr)
 134        : "memory");
 135}
 136
 137#define __raw_writew __raw_writew
 138static inline void __raw_writew(u16 s, volatile void __iomem *addr)
 139{
 140        __asm__ __volatile__(
 141        "       stw%U1 %0, %1   \n"
 142        :
 143        : "r" (s), "m" (*(volatile u16 __force *)addr)
 144        : "memory");
 145
 146}
 147
 148#define __raw_writel __raw_writel
 149static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 150{
 151        __asm__ __volatile__(
 152        "       st%U1 %0, %1    \n"
 153        :
 154        : "r" (w), "m" (*(volatile u32 __force *)addr)
 155        : "memory");
 156
 157}
 158
 159#define __raw_writesx(t,f)                                              \
 160static inline void __raw_writes##f(volatile void __iomem *addr,         \
 161                                   const void *ptr, unsigned int count) \
 162{                                                                       \
 163        bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
 164        const u##t *buf = ptr;                                          \
 165                                                                        \
 166        if (!count)                                                     \
 167                return;                                                 \
 168                                                                        \
 169        /* Some ARC CPU's don't support unaligned accesses */           \
 170        if (is_aligned) {                                               \
 171                do {                                                    \
 172                        __raw_write##f(*buf++, addr);                   \
 173                } while (--count);                                      \
 174        } else {                                                        \
 175                do {                                                    \
 176                        __raw_write##f(get_unaligned(buf++), addr);     \
 177                } while (--count);                                      \
 178        }                                                               \
 179}
 180
 181#define __raw_writesb __raw_writesb
 182__raw_writesx(8, b)
 183#define __raw_writesw __raw_writesw
 184__raw_writesx(16, w)
 185#define __raw_writesl __raw_writesl
 186__raw_writesx(32, l)
 187
 188/*
 189 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
 190 * Based on ARM model for the typical use case
 191 *
 192 *      <ST [DMA buffer]>
 193 *      <writel MMIO "go" reg>
 194 *  or:
 195 *      <readl MMIO "status" reg>
 196 *      <LD [DMA buffer]>
 197 *
 198 * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
 199 */
 200#define readb(c)                ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 201#define readw(c)                ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 202#define readl(c)                ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
 203#define readsb(p,d,l)           ({ __raw_readsb(p,d,l); __iormb(); })
 204#define readsw(p,d,l)           ({ __raw_readsw(p,d,l); __iormb(); })
 205#define readsl(p,d,l)           ({ __raw_readsl(p,d,l); __iormb(); })
 206
 207#define writeb(v,c)             ({ __iowmb(); writeb_relaxed(v,c); })
 208#define writew(v,c)             ({ __iowmb(); writew_relaxed(v,c); })
 209#define writel(v,c)             ({ __iowmb(); writel_relaxed(v,c); })
 210#define writesb(p,d,l)          ({ __iowmb(); __raw_writesb(p,d,l); })
 211#define writesw(p,d,l)          ({ __iowmb(); __raw_writesw(p,d,l); })
 212#define writesl(p,d,l)          ({ __iowmb(); __raw_writesl(p,d,l); })
 213
 214/*
 215 * Relaxed API for drivers which can handle barrier ordering themselves
 216 *
 217 * Also these are defined to perform little endian accesses.
 218 * To provide the typical device register semantics of fixed endian,
 219 * swap the byte order for Big Endian
 220 *
 221 * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
 222 */
 223#define readb_relaxed(c)        __raw_readb(c)
 224#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
 225                                        __raw_readw(c)); __r; })
 226#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
 227                                        __raw_readl(c)); __r; })
 228
 229#define writeb_relaxed(v,c)     __raw_writeb(v,c)
 230#define writew_relaxed(v,c)     __raw_writew((__force u16) cpu_to_le16(v),c)
 231#define writel_relaxed(v,c)     __raw_writel((__force u32) cpu_to_le32(v),c)
 232
 233#include <asm-generic/io.h>
 234
 235#endif /* _ASM_ARC_IO_H */
 236