linux/arch/sh/include/asm/io.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_SH_IO_H
   3#define __ASM_SH_IO_H
   4
   5/*
   6 * Convention:
   7 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
   8 *    while in{b,w,l}/out{b,w,l} are for ISA
   9 *
  10 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
  11 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
  12 *
  13 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
  14 * automatically, there are also __raw versions, which do not.
  15 */
  16#include <linux/errno.h>
  17#include <asm/cache.h>
  18#include <asm/addrspace.h>
  19#include <asm/machvec.h>
  20#include <asm/pgtable.h>
  21#include <asm-generic/iomap.h>
  22
  23#ifdef __KERNEL__
  24#define __IO_PREFIX     generic
  25#include <asm/io_generic.h>
  26#include <asm/io_trapped.h>
  27#include <asm-generic/pci_iomap.h>
  28#include <mach/mangle-port.h>
  29
  30#define __raw_writeb(v,a)       (__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
  31#define __raw_writew(v,a)       (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
  32#define __raw_writel(v,a)       (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
  33#define __raw_writeq(v,a)       (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
  34
  35#define __raw_readb(a)          (__chk_io_ptr(a), *(volatile u8  __force *)(a))
  36#define __raw_readw(a)          (__chk_io_ptr(a), *(volatile u16 __force *)(a))
  37#define __raw_readl(a)          (__chk_io_ptr(a), *(volatile u32 __force *)(a))
  38#define __raw_readq(a)          (__chk_io_ptr(a), *(volatile u64 __force *)(a))
  39
  40#define readb_relaxed(c)        ({ u8  __v = ioswabb(__raw_readb(c)); __v; })
  41#define readw_relaxed(c)        ({ u16 __v = ioswabw(__raw_readw(c)); __v; })
  42#define readl_relaxed(c)        ({ u32 __v = ioswabl(__raw_readl(c)); __v; })
  43#define readq_relaxed(c)        ({ u64 __v = ioswabq(__raw_readq(c)); __v; })
  44
  45#define writeb_relaxed(v,c)     ((void)__raw_writeb((__force  u8)ioswabb(v),c))
  46#define writew_relaxed(v,c)     ((void)__raw_writew((__force u16)ioswabw(v),c))
  47#define writel_relaxed(v,c)     ((void)__raw_writel((__force u32)ioswabl(v),c))
  48#define writeq_relaxed(v,c)     ((void)__raw_writeq((__force u64)ioswabq(v),c))
  49
  50#define readb(a)                ({ u8  r_ = readb_relaxed(a); rmb(); r_; })
  51#define readw(a)                ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
  52#define readl(a)                ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
  53#define readq(a)                ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
  54
  55#define writeb(v,a)             ({ wmb(); writeb_relaxed((v),(a)); })
  56#define writew(v,a)             ({ wmb(); writew_relaxed((v),(a)); })
  57#define writel(v,a)             ({ wmb(); writel_relaxed((v),(a)); })
  58#define writeq(v,a)             ({ wmb(); writeq_relaxed((v),(a)); })
  59
  60#define readsb(p,d,l)           __raw_readsb(p,d,l)
  61#define readsw(p,d,l)           __raw_readsw(p,d,l)
  62#define readsl(p,d,l)           __raw_readsl(p,d,l)
  63
  64#define writesb(p,d,l)          __raw_writesb(p,d,l)
  65#define writesw(p,d,l)          __raw_writesw(p,d,l)
  66#define writesl(p,d,l)          __raw_writesl(p,d,l)
  67
  68#define __BUILD_UNCACHED_IO(bwlq, type)                                 \
  69static inline type read##bwlq##_uncached(unsigned long addr)            \
  70{                                                                       \
  71        type ret;                                                       \
  72        jump_to_uncached();                                             \
  73        ret = __raw_read##bwlq(addr);                                   \
  74        back_to_cached();                                               \
  75        return ret;                                                     \
  76}                                                                       \
  77                                                                        \
  78static inline void write##bwlq##_uncached(type v, unsigned long addr)   \
  79{                                                                       \
  80        jump_to_uncached();                                             \
  81        __raw_write##bwlq(v, addr);                                     \
  82        back_to_cached();                                               \
  83}
  84
  85__BUILD_UNCACHED_IO(b, u8)
  86__BUILD_UNCACHED_IO(w, u16)
  87__BUILD_UNCACHED_IO(l, u32)
  88__BUILD_UNCACHED_IO(q, u64)
  89
  90#define __BUILD_MEMORY_STRING(pfx, bwlq, type)                          \
  91                                                                        \
  92static inline void                                                      \
  93pfx##writes##bwlq(volatile void __iomem *mem, const void *addr,         \
  94                  unsigned int count)                                   \
  95{                                                                       \
  96        const volatile type *__addr = addr;                             \
  97                                                                        \
  98        while (count--) {                                               \
  99                __raw_write##bwlq(*__addr, mem);                        \
 100                __addr++;                                               \
 101        }                                                               \
 102}                                                                       \
 103                                                                        \
 104static inline void pfx##reads##bwlq(volatile void __iomem *mem,         \
 105                                    void *addr, unsigned int count)     \
 106{                                                                       \
 107        volatile type *__addr = addr;                                   \
 108                                                                        \
 109        while (count--) {                                               \
 110                *__addr = __raw_read##bwlq(mem);                        \
 111                __addr++;                                               \
 112        }                                                               \
 113}
 114
 115__BUILD_MEMORY_STRING(__raw_, b, u8)
 116__BUILD_MEMORY_STRING(__raw_, w, u16)
 117
 118#ifdef CONFIG_SUPERH32
 119void __raw_writesl(void __iomem *addr, const void *data, int longlen);
 120void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 121#else
 122__BUILD_MEMORY_STRING(__raw_, l, u32)
 123#endif
 124
 125__BUILD_MEMORY_STRING(__raw_, q, u64)
 126
 127#ifdef CONFIG_HAS_IOPORT_MAP
 128
 129/*
 130 * Slowdown I/O port space accesses for antique hardware.
 131 */
 132#undef CONF_SLOWDOWN_IO
 133
 134/*
 135 * On SuperH I/O ports are memory mapped, so we access them using normal
 136 * load/store instructions. sh_io_port_base is the virtual address to
 137 * which all ports are being mapped.
 138 */
 139extern unsigned long sh_io_port_base;
 140
 141static inline void __set_io_port_base(unsigned long pbase)
 142{
 143        *(unsigned long *)&sh_io_port_base = pbase;
 144        barrier();
 145}
 146
 147#ifdef CONFIG_GENERIC_IOMAP
 148#define __ioport_map ioport_map
 149#else
 150extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
 151#endif
 152
 153#ifdef CONF_SLOWDOWN_IO
 154#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
 155#else
 156#define SLOW_DOWN_IO
 157#endif
 158
 159#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)                 \
 160                                                                        \
 161static inline void pfx##out##bwlq##p(type val, unsigned long port)      \
 162{                                                                       \
 163        volatile type *__addr;                                          \
 164                                                                        \
 165        __addr = __ioport_map(port, sizeof(type));                      \
 166        *__addr = val;                                                  \
 167        slow;                                                           \
 168}                                                                       \
 169                                                                        \
 170static inline type pfx##in##bwlq##p(unsigned long port)                 \
 171{                                                                       \
 172        volatile type *__addr;                                          \
 173        type __val;                                                     \
 174                                                                        \
 175        __addr = __ioport_map(port, sizeof(type));                      \
 176        __val = *__addr;                                                \
 177        slow;                                                           \
 178                                                                        \
 179        return __val;                                                   \
 180}
 181
 182#define __BUILD_IOPORT_PFX(bus, bwlq, type)                             \
 183        __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)                       \
 184        __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
 185
 186#define BUILDIO_IOPORT(bwlq, type)                                      \
 187        __BUILD_IOPORT_PFX(, bwlq, type)
 188
 189BUILDIO_IOPORT(b, u8)
 190BUILDIO_IOPORT(w, u16)
 191BUILDIO_IOPORT(l, u32)
 192BUILDIO_IOPORT(q, u64)
 193
 194#define __BUILD_IOPORT_STRING(bwlq, type)                               \
 195                                                                        \
 196static inline void outs##bwlq(unsigned long port, const void *addr,     \
 197                              unsigned int count)                       \
 198{                                                                       \
 199        const volatile type *__addr = addr;                             \
 200                                                                        \
 201        while (count--) {                                               \
 202                out##bwlq(*__addr, port);                               \
 203                __addr++;                                               \
 204        }                                                               \
 205}                                                                       \
 206                                                                        \
 207static inline void ins##bwlq(unsigned long port, void *addr,            \
 208                             unsigned int count)                        \
 209{                                                                       \
 210        volatile type *__addr = addr;                                   \
 211                                                                        \
 212        while (count--) {                                               \
 213                *__addr = in##bwlq(port);                               \
 214                __addr++;                                               \
 215        }                                                               \
 216}
 217
 218__BUILD_IOPORT_STRING(b, u8)
 219__BUILD_IOPORT_STRING(w, u16)
 220__BUILD_IOPORT_STRING(l, u32)
 221__BUILD_IOPORT_STRING(q, u64)
 222
 223#else /* !CONFIG_HAS_IOPORT_MAP */
 224
 225#include <asm/io_noioport.h>
 226
 227#endif
 228
 229
 230#define IO_SPACE_LIMIT 0xffffffff
 231
 232/* We really want to try and get these to memcpy etc */
 233void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
 234void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
 235void memset_io(volatile void __iomem *, int, unsigned long);
 236
 237/* Quad-word real-mode I/O, don't ask.. */
 238unsigned long long peek_real_address_q(unsigned long long addr);
 239unsigned long long poke_real_address_q(unsigned long long addr,
 240                                       unsigned long long val);
 241
 242#if !defined(CONFIG_MMU)
 243#define virt_to_phys(address)   ((unsigned long)(address))
 244#define phys_to_virt(address)   ((void *)(address))
 245#else
 246#define virt_to_phys(address)   (__pa(address))
 247#define phys_to_virt(address)   (__va(address))
 248#endif
 249
 250/*
 251 * On 32-bit SH, we traditionally have the whole physical address space
 252 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
 253 * not need to do anything but place the address in the proper segment.
 254 * This is true for P1 and P2 addresses, as well as some P3 ones.
 255 * However, most of the P3 addresses and newer cores using extended
 256 * addressing need to map through page tables, so the ioremap()
 257 * implementation becomes a bit more complicated.
 258 *
 259 * See arch/sh/mm/ioremap.c for additional notes on this.
 260 *
 261 * We cheat a bit and always return uncachable areas until we've fixed
 262 * the drivers to handle caching properly.
 263 *
 264 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
 265 * doesn't exist, so everything must go through page tables.
 266 */
 267#ifdef CONFIG_MMU
 268void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
 269                               pgprot_t prot, void *caller);
 270void __iounmap(void __iomem *addr);
 271
 272static inline void __iomem *
 273__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
 274{
 275        return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
 276}
 277
 278static inline void __iomem *
 279__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
 280{
 281#ifdef CONFIG_29BIT
 282        phys_addr_t last_addr = offset + size - 1;
 283
 284        /*
 285         * For P1 and P2 space this is trivial, as everything is already
 286         * mapped. Uncached access for P1 addresses are done through P2.
 287         * In the P3 case or for addresses outside of the 29-bit space,
 288         * mapping must be done by the PMB or by using page tables.
 289         */
 290        if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
 291                u64 flags = pgprot_val(prot);
 292
 293                /*
 294                 * Anything using the legacy PTEA space attributes needs
 295                 * to be kicked down to page table mappings.
 296                 */
 297                if (unlikely(flags & _PAGE_PCC_MASK))
 298                        return NULL;
 299                if (unlikely(flags & _PAGE_CACHABLE))
 300                        return (void __iomem *)P1SEGADDR(offset);
 301
 302                return (void __iomem *)P2SEGADDR(offset);
 303        }
 304
 305        /* P4 above the store queues are always mapped. */
 306        if (unlikely(offset >= P3_ADDR_MAX))
 307                return (void __iomem *)P4SEGADDR(offset);
 308#endif
 309
 310        return NULL;
 311}
 312
 313static inline void __iomem *
 314__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
 315{
 316        void __iomem *ret;
 317
 318        ret = __ioremap_trapped(offset, size);
 319        if (ret)
 320                return ret;
 321
 322        ret = __ioremap_29bit(offset, size, prot);
 323        if (ret)
 324                return ret;
 325
 326        return __ioremap(offset, size, prot);
 327}
 328#else
 329#define __ioremap(offset, size, prot)           ((void __iomem *)(offset))
 330#define __ioremap_mode(offset, size, prot)      ((void __iomem *)(offset))
 331#define __iounmap(addr)                         do { } while (0)
 332#endif /* CONFIG_MMU */
 333
 334static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 335{
 336        return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
 337}
 338
 339static inline void __iomem *
 340ioremap_cache(phys_addr_t offset, unsigned long size)
 341{
 342        return __ioremap_mode(offset, size, PAGE_KERNEL);
 343}
 344#define ioremap_cache ioremap_cache
 345
 346#ifdef CONFIG_HAVE_IOREMAP_PROT
 347static inline void __iomem *
 348ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
 349{
 350        return __ioremap_mode(offset, size, __pgprot(flags));
 351}
 352#endif
 353
 354#ifdef CONFIG_IOREMAP_FIXED
 355extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
 356extern int iounmap_fixed(void __iomem *);
 357extern void ioremap_fixed_init(void);
 358#else
 359static inline void __iomem *
 360ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
 361{
 362        BUG();
 363        return NULL;
 364}
 365
 366static inline void ioremap_fixed_init(void) { }
 367static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
 368#endif
 369
 370#define ioremap_nocache ioremap
 371#define ioremap_uc      ioremap
 372
 373static inline void iounmap(void __iomem *addr)
 374{
 375        __iounmap(addr);
 376}
 377
 378/*
 379 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 380 * access
 381 */
 382#define xlate_dev_mem_ptr(p)    __va(p)
 383
 384/*
 385 * Convert a virtual cached pointer to an uncached pointer
 386 */
 387#define xlate_dev_kmem_ptr(p)   p
 388
 389#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
 390int valid_phys_addr_range(phys_addr_t addr, size_t size);
 391int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 392
 393#endif /* __KERNEL__ */
 394
 395#endif /* __ASM_SH_IO_H */
 396