linux/arch/mips/include/asm/smp.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General
   3 * Public License.  See the file "COPYING" in the main directory of this
   4 * archive for more details.
   5 *
   6 * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
   7 * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
   8 * Copyright (C) 2000, 2001, 2002 Ralf Baechle
   9 * Copyright (C) 2000, 2001 Broadcom Corporation
  10 */
  11#ifndef __ASM_SMP_H
  12#define __ASM_SMP_H
  13
  14#include <linux/bitops.h>
  15#include <linux/linkage.h>
  16#include <linux/smp.h>
  17#include <linux/threads.h>
  18#include <linux/cpumask.h>
  19
  20#include <linux/atomic.h>
  21#include <asm/smp-ops.h>
  22
  23extern int smp_num_siblings;
  24extern cpumask_t cpu_sibling_map[];
  25extern cpumask_t cpu_core_map[];
  26extern cpumask_t cpu_foreign_map[];
  27
  28#define raw_smp_processor_id() (current_thread_info()->cpu)
  29
  30/* Map from cpu id to sequential logical cpu number.  This will only
  31   not be idempotent when cpus failed to come on-line.  */
  32extern int __cpu_number_map[NR_CPUS];
  33#define cpu_number_map(cpu)  __cpu_number_map[cpu]
  34
  35/* The reverse map from sequential logical cpu number to cpu id.  */
  36extern int __cpu_logical_map[NR_CPUS];
  37#define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
  38
  39#define NO_PROC_ID      (-1)
  40
  41#define SMP_RESCHEDULE_YOURSELF 0x1     /* XXX braindead */
  42#define SMP_CALL_FUNCTION       0x2
  43/* Octeon - Tell another core to flush its icache */
  44#define SMP_ICACHE_FLUSH        0x4
  45#define SMP_ASK_C0COUNT         0x8
  46
  47/* Mask of CPUs which are currently definitely operating coherently */
  48extern cpumask_t cpu_coherent_mask;
  49
  50extern asmlinkage void smp_bootstrap(void);
  51
  52extern void calculate_cpu_foreign_map(void);
  53
  54/*
  55 * this function sends a 'reschedule' IPI to another CPU.
  56 * it goes straight through and wastes no time serializing
  57 * anything. Worst case is that we lose a reschedule ...
  58 */
  59static inline void smp_send_reschedule(int cpu)
  60{
  61        extern const struct plat_smp_ops *mp_ops;       /* private */
  62
  63        mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
  64}
  65
  66#ifdef CONFIG_HOTPLUG_CPU
  67static inline int __cpu_disable(void)
  68{
  69        extern const struct plat_smp_ops *mp_ops;       /* private */
  70
  71        return mp_ops->cpu_disable();
  72}
  73
  74static inline void __cpu_die(unsigned int cpu)
  75{
  76        extern const struct plat_smp_ops *mp_ops;       /* private */
  77
  78        mp_ops->cpu_die(cpu);
  79}
  80
  81extern void play_dead(void);
  82#endif
  83
  84/*
  85 * This function will set up the necessary IPIs for Linux to communicate
  86 * with the CPUs in mask.
  87 * Return 0 on success.
  88 */
  89int mips_smp_ipi_allocate(const struct cpumask *mask);
  90
  91/*
  92 * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
  93 * CPUs in mask, which must be a subset of the IPIs that have been configured.
  94 * Return 0 on success.
  95 */
  96int mips_smp_ipi_free(const struct cpumask *mask);
  97
  98static inline void arch_send_call_function_single_ipi(int cpu)
  99{
 100        extern const struct plat_smp_ops *mp_ops;       /* private */
 101
 102        mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
 103}
 104
 105static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 106{
 107        extern const struct plat_smp_ops *mp_ops;       /* private */
 108
 109        mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
 110}
 111
 112#endif /* __ASM_SMP_H */
 113