linux/arch/powerpc/include/asm/cputhreads.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_CPUTHREADS_H
   2#define _ASM_POWERPC_CPUTHREADS_H
   3
   4#ifndef __ASSEMBLY__
   5#include <linux/cpumask.h>
   6#include <asm/cpu_has_feature.h>
   7
   8/*
   9 * Mapping of threads to cores
  10 *
  11 * Note: This implementation is limited to a power of 2 number of
  12 * threads per core and the same number for each core in the system
  13 * (though it would work if some processors had less threads as long
  14 * as the CPU numbers are still allocated, just not brought online).
  15 *
  16 * However, the API allows for a different implementation in the future
  17 * if needed, as long as you only use the functions and not the variables
  18 * directly.
  19 */
  20
  21#ifdef CONFIG_SMP
  22extern int threads_per_core;
  23extern int threads_per_subcore;
  24extern int threads_shift;
  25extern cpumask_t threads_core_mask;
  26#else
  27#define threads_per_core        1
  28#define threads_per_subcore     1
  29#define threads_shift           0
  30#define threads_core_mask       (*get_cpu_mask(0))
  31#endif
  32
  33/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
  34 *                            hit by the argument
  35 *
  36 * @threads:    a cpumask of online threads
  37 *
  38 * This function returns a cpumask which will have one online cpu's
  39 * bit set for each core that has at least one thread set in the argument.
  40 *
  41 * This can typically be used for things like IPI for tlb invalidations
  42 * since those need to be done only once per core/TLB
  43 */
  44static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
  45{
  46        cpumask_t       tmp, res;
  47        int             i, cpu;
  48
  49        cpumask_clear(&res);
  50        for (i = 0; i < NR_CPUS; i += threads_per_core) {
  51                cpumask_shift_left(&tmp, &threads_core_mask, i);
  52                if (cpumask_intersects(threads, &tmp)) {
  53                        cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
  54                        if (cpu < nr_cpu_ids)
  55                                cpumask_set_cpu(cpu, &res);
  56                }
  57        }
  58        return res;
  59}
  60
  61static inline int cpu_nr_cores(void)
  62{
  63        return nr_cpu_ids >> threads_shift;
  64}
  65
  66static inline cpumask_t cpu_online_cores_map(void)
  67{
  68        return cpu_thread_mask_to_cores(cpu_online_mask);
  69}
  70
  71#ifdef CONFIG_SMP
  72int cpu_core_index_of_thread(int cpu);
  73int cpu_first_thread_of_core(int core);
  74#else
  75static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
  76static inline int cpu_first_thread_of_core(int core) { return core; }
  77#endif
  78
  79static inline int cpu_thread_in_core(int cpu)
  80{
  81        return cpu & (threads_per_core - 1);
  82}
  83
  84static inline int cpu_thread_in_subcore(int cpu)
  85{
  86        return cpu & (threads_per_subcore - 1);
  87}
  88
  89static inline int cpu_first_thread_sibling(int cpu)
  90{
  91        return cpu & ~(threads_per_core - 1);
  92}
  93
  94static inline int cpu_last_thread_sibling(int cpu)
  95{
  96        return cpu | (threads_per_core - 1);
  97}
  98
  99static inline u32 get_tensr(void)
 100{
 101#ifdef  CONFIG_BOOKE
 102        if (cpu_has_feature(CPU_FTR_SMT))
 103                return mfspr(SPRN_TENSR);
 104#endif
 105        return 1;
 106}
 107
 108void book3e_start_thread(int thread, unsigned long addr);
 109void book3e_stop_thread(int thread);
 110
 111#endif /* __ASSEMBLY__ */
 112
 113#define INVALID_THREAD_HWID     0x0fff
 114
 115#endif /* _ASM_POWERPC_CPUTHREADS_H */
 116
 117