linux/arch/arc/include/asm/smp.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#ifndef __ASM_ARC_SMP_H
  10#define __ASM_ARC_SMP_H
  11
  12#ifdef CONFIG_SMP
  13
  14#include <linux/types.h>
  15#include <linux/init.h>
  16#include <linux/threads.h>
  17
  18#define raw_smp_processor_id() (current_thread_info()->cpu)
  19
  20/* including cpumask.h leads to cyclic deps hence this Forward declaration */
  21struct cpumask;
  22
  23/*
  24 * APIs provided by arch SMP code to generic code
  25 */
  26extern void arch_send_call_function_single_ipi(int cpu);
  27extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
  28
  29/*
  30 * APIs provided by arch SMP code to rest of arch code
  31 */
  32extern void __init smp_init_cpus(void);
  33extern void first_lines_of_secondary(void);
  34extern const char *arc_platform_smp_cpuinfo(void);
  35
  36/*
  37 * API expected BY platform smp code (FROM arch smp code)
  38 *
  39 * smp_ipi_irq_setup:
  40 *      Takes @cpu and @irq to which the arch-common ISR is hooked up
  41 */
  42extern int smp_ipi_irq_setup(int cpu, int irq);
  43
  44/*
  45 * struct plat_smp_ops  - SMP callbacks provided by platform to ARC SMP
  46 *
  47 * @info:               SoC SMP specific info for /proc/cpuinfo etc
  48 * @cpu_kick:           For Master to kickstart a cpu (optionally at a PC)
  49 * @ipi_send:           To send IPI to a @cpu
  50 * @ips_clear:          To clear IPI received at @irq
  51 */
  52struct plat_smp_ops {
  53        const char      *info;
  54        void            (*cpu_kick)(int cpu, unsigned long pc);
  55        void            (*ipi_send)(int cpu);
  56        void            (*ipi_clear)(int irq);
  57};
  58
  59/* TBD: stop exporting it for direct population by platform */
  60extern struct plat_smp_ops  plat_smp_ops;
  61
  62#else /* CONFIG_SMP */
  63
  64static inline void smp_init_cpus(void) {}
  65static inline const char *arc_platform_smp_cpuinfo(void)
  66{
  67        return "";
  68}
  69
  70#endif  /* !CONFIG_SMP */
  71
  72/*
  73 * ARC700 doesn't support atomic Read-Modify-Write ops.
  74 * Originally Interrupts had to be disabled around code to gaurantee atomicity.
  75 * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
  76 * based on retry-if-irq-in-atomic (with hardware assist).
  77 * However despite these, we provide the IRQ disabling variant
  78 *
  79 * (1) These insn were introduced only in 4.10 release. So for older released
  80 *      support needed.
  81 *
  82 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
  83 *      gaurantted by the platform (not something which core handles).
  84 *      Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  85 *      disabling for atomicity.
  86 *
  87 *      However exported spinlock API is not usable due to cyclic hdr deps
  88 *      (even after system.h disintegration upstream)
  89 *      asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
  90 *              -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
  91 *
  92 *      So the workaround is to use the lowest level arch spinlock API.
  93 *      The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
  94 *      but same is not true for ARCH backend, hence the need for 2 variants
  95 */
  96#ifndef CONFIG_ARC_HAS_LLSC
  97
  98#include <linux/irqflags.h>
  99#ifdef CONFIG_SMP
 100
 101#include <asm/spinlock.h>
 102
 103extern arch_spinlock_t smp_atomic_ops_lock;
 104extern arch_spinlock_t smp_bitops_lock;
 105
 106#define atomic_ops_lock(flags)  do {            \
 107        local_irq_save(flags);                  \
 108        arch_spin_lock(&smp_atomic_ops_lock);   \
 109} while (0)
 110
 111#define atomic_ops_unlock(flags) do {           \
 112        arch_spin_unlock(&smp_atomic_ops_lock); \
 113        local_irq_restore(flags);               \
 114} while (0)
 115
 116#define bitops_lock(flags)      do {            \
 117        local_irq_save(flags);                  \
 118        arch_spin_lock(&smp_bitops_lock);       \
 119} while (0)
 120
 121#define bitops_unlock(flags) do {               \
 122        arch_spin_unlock(&smp_bitops_lock);     \
 123        local_irq_restore(flags);               \
 124} while (0)
 125
 126#else /* !CONFIG_SMP */
 127
 128#define atomic_ops_lock(flags)          local_irq_save(flags)
 129#define atomic_ops_unlock(flags)        local_irq_restore(flags)
 130
 131#define bitops_lock(flags)              local_irq_save(flags)
 132#define bitops_unlock(flags)            local_irq_restore(flags)
 133
 134#endif /* !CONFIG_SMP */
 135
 136#endif  /* !CONFIG_ARC_HAS_LLSC */
 137
 138#endif
 139