linux/arch/powerpc/include/asm/dcr-native.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
   4 *                    <benh@kernel.crashing.org>
   5 */
   6
   7#ifndef _ASM_POWERPC_DCR_NATIVE_H
   8#define _ASM_POWERPC_DCR_NATIVE_H
   9#ifdef __KERNEL__
  10#ifndef __ASSEMBLY__
  11
  12#include <linux/spinlock.h>
  13#include <asm/cputable.h>
  14#include <asm/cpu_has_feature.h>
  15#include <linux/stringify.h>
  16
  17typedef struct {
  18        unsigned int base;
  19} dcr_host_native_t;
  20
  21static inline bool dcr_map_ok_native(dcr_host_native_t host)
  22{
  23        return true;
  24}
  25
  26#define dcr_map_native(dev, dcr_n, dcr_c) \
  27        ((dcr_host_native_t){ .base = (dcr_n) })
  28#define dcr_unmap_native(host, dcr_c)           do {} while (0)
  29#define dcr_read_native(host, dcr_n)            mfdcr(dcr_n + host.base)
  30#define dcr_write_native(host, dcr_n, value)    mtdcr(dcr_n + host.base, value)
  31
  32/* Table based DCR accessors */
  33extern void __mtdcr(unsigned int reg, unsigned int val);
  34extern unsigned int __mfdcr(unsigned int reg);
  35
  36/* mfdcrx/mtdcrx instruction based accessors. We hand code
  37 * the opcodes in order not to depend on newer binutils
  38 */
  39static inline unsigned int mfdcrx(unsigned int reg)
  40{
  41        unsigned int ret;
  42        asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
  43                     : "=r" (ret) : "r" (reg));
  44        return ret;
  45}
  46
  47static inline void mtdcrx(unsigned int reg, unsigned int val)
  48{
  49        asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
  50                     : : "r" (val), "r" (reg));
  51}
  52
  53#define mfdcr(rn)                                               \
  54        ({unsigned int rval;                                    \
  55        if (__builtin_constant_p(rn) && rn < 1024)              \
  56                asm volatile("mfdcr %0," __stringify(rn)        \
  57                              : "=r" (rval));                   \
  58        else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))  \
  59                rval = mfdcrx(rn);                              \
  60        else                                                    \
  61                rval = __mfdcr(rn);                             \
  62        rval;})
  63
  64#define mtdcr(rn, v)                                            \
  65do {                                                            \
  66        if (__builtin_constant_p(rn) && rn < 1024)              \
  67                asm volatile("mtdcr " __stringify(rn) ",%0"     \
  68                              : : "r" (v));                     \
  69        else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))  \
  70                mtdcrx(rn, v);                                  \
  71        else                                                    \
  72                __mtdcr(rn, v);                                 \
  73} while (0)
  74
  75/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
  76extern spinlock_t dcr_ind_lock;
  77
  78static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
  79{
  80        unsigned long flags;
  81        unsigned int val;
  82
  83        spin_lock_irqsave(&dcr_ind_lock, flags);
  84        if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
  85                mtdcrx(base_addr, reg);
  86                val = mfdcrx(base_data);
  87        } else {
  88                __mtdcr(base_addr, reg);
  89                val = __mfdcr(base_data);
  90        }
  91        spin_unlock_irqrestore(&dcr_ind_lock, flags);
  92        return val;
  93}
  94
  95static inline void __mtdcri(int base_addr, int base_data, int reg,
  96                            unsigned val)
  97{
  98        unsigned long flags;
  99
 100        spin_lock_irqsave(&dcr_ind_lock, flags);
 101        if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
 102                mtdcrx(base_addr, reg);
 103                mtdcrx(base_data, val);
 104        } else {
 105                __mtdcr(base_addr, reg);
 106                __mtdcr(base_data, val);
 107        }
 108        spin_unlock_irqrestore(&dcr_ind_lock, flags);
 109}
 110
 111static inline void __dcri_clrset(int base_addr, int base_data, int reg,
 112                                 unsigned clr, unsigned set)
 113{
 114        unsigned long flags;
 115        unsigned int val;
 116
 117        spin_lock_irqsave(&dcr_ind_lock, flags);
 118        if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
 119                mtdcrx(base_addr, reg);
 120                val = (mfdcrx(base_data) & ~clr) | set;
 121                mtdcrx(base_data, val);
 122        } else {
 123                __mtdcr(base_addr, reg);
 124                val = (__mfdcr(base_data) & ~clr) | set;
 125                __mtdcr(base_data, val);
 126        }
 127        spin_unlock_irqrestore(&dcr_ind_lock, flags);
 128}
 129
 130#define mfdcri(base, reg)       __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
 131                                         DCRN_ ## base ## _CONFIG_DATA, \
 132                                         reg)
 133
 134#define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
 135                                         DCRN_ ## base ## _CONFIG_DATA, \
 136                                         reg, data)
 137
 138#define dcri_clrset(base, reg, clr, set)        __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR,    \
 139                                                              DCRN_ ## base ## _CONFIG_DATA,    \
 140                                                              reg, clr, set)
 141
 142#endif /* __ASSEMBLY__ */
 143#endif /* __KERNEL__ */
 144#endif /* _ASM_POWERPC_DCR_NATIVE_H */
 145