linux/arch/powerpc/include/asm/cputhreads.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_CPUTHREADS_H
   3#define _ASM_POWERPC_CPUTHREADS_H
   4
   5#ifndef __ASSEMBLY__
   6#include <linux/cpumask.h>
   7#include <asm/cpu_has_feature.h>
   8
   9/*
  10 * Mapping of threads to cores
  11 *
  12 * Note: This implementation is limited to a power of 2 number of
  13 * threads per core and the same number for each core in the system
  14 * (though it would work if some processors had less threads as long
  15 * as the CPU numbers are still allocated, just not brought online).
  16 *
  17 * However, the API allows for a different implementation in the future
  18 * if needed, as long as you only use the functions and not the variables
  19 * directly.
  20 */
  21
  22#ifdef CONFIG_SMP
  23extern int threads_per_core;
  24extern int threads_per_subcore;
  25extern int threads_shift;
  26extern cpumask_t threads_core_mask;
  27#else
  28#define threads_per_core        1
  29#define threads_per_subcore     1
  30#define threads_shift           0
  31#define has_big_cores           0
  32#define threads_core_mask       (*get_cpu_mask(0))
  33#endif
  34
  35/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
  36 *                            hit by the argument
  37 *
  38 * @threads:    a cpumask of online threads
  39 *
  40 * This function returns a cpumask which will have one online cpu's
  41 * bit set for each core that has at least one thread set in the argument.
  42 *
  43 * This can typically be used for things like IPI for tlb invalidations
  44 * since those need to be done only once per core/TLB
  45 */
  46static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
  47{
  48        cpumask_t       tmp, res;
  49        int             i, cpu;
  50
  51        cpumask_clear(&res);
  52        for (i = 0; i < NR_CPUS; i += threads_per_core) {
  53                cpumask_shift_left(&tmp, &threads_core_mask, i);
  54                if (cpumask_intersects(threads, &tmp)) {
  55                        cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
  56                        if (cpu < nr_cpu_ids)
  57                                cpumask_set_cpu(cpu, &res);
  58                }
  59        }
  60        return res;
  61}
  62
  63static inline int cpu_nr_cores(void)
  64{
  65        return nr_cpu_ids >> threads_shift;
  66}
  67
  68static inline cpumask_t cpu_online_cores_map(void)
  69{
  70        return cpu_thread_mask_to_cores(cpu_online_mask);
  71}
  72
  73#ifdef CONFIG_SMP
  74int cpu_core_index_of_thread(int cpu);
  75int cpu_first_thread_of_core(int core);
  76#else
  77static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
  78static inline int cpu_first_thread_of_core(int core) { return core; }
  79#endif
  80
  81static inline int cpu_thread_in_core(int cpu)
  82{
  83        return cpu & (threads_per_core - 1);
  84}
  85
  86static inline int cpu_thread_in_subcore(int cpu)
  87{
  88        return cpu & (threads_per_subcore - 1);
  89}
  90
  91static inline int cpu_first_thread_sibling(int cpu)
  92{
  93        return cpu & ~(threads_per_core - 1);
  94}
  95
  96static inline int cpu_last_thread_sibling(int cpu)
  97{
  98        return cpu | (threads_per_core - 1);
  99}
 100
 101/*
 102 * tlb_thread_siblings are siblings which share a TLB. This is not
 103 * architected, is not something a hypervisor could emulate and a future
 104 * CPU may change behaviour even in compat mode, so this should only be
 105 * used on PowerNV, and only with care.
 106 */
 107static inline int cpu_first_tlb_thread_sibling(int cpu)
 108{
 109        if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
 110                return cpu & ~0x6;      /* Big Core */
 111        else
 112                return cpu_first_thread_sibling(cpu);
 113}
 114
 115static inline int cpu_last_tlb_thread_sibling(int cpu)
 116{
 117        if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
 118                return cpu | 0x6;       /* Big Core */
 119        else
 120                return cpu_last_thread_sibling(cpu);
 121}
 122
 123static inline int cpu_tlb_thread_sibling_step(void)
 124{
 125        if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
 126                return 2;               /* Big Core */
 127        else
 128                return 1;
 129}
 130
 131static inline u32 get_tensr(void)
 132{
 133#ifdef  CONFIG_BOOKE
 134        if (cpu_has_feature(CPU_FTR_SMT))
 135                return mfspr(SPRN_TENSR);
 136#endif
 137        return 1;
 138}
 139
 140void book3e_start_thread(int thread, unsigned long addr);
 141void book3e_stop_thread(int thread);
 142
 143#endif /* __ASSEMBLY__ */
 144
 145#define INVALID_THREAD_HWID     0x0fff
 146
 147#endif /* _ASM_POWERPC_CPUTHREADS_H */
 148
 149