linux/arch/powerpc/kernel/paca.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * c 2001 PPC 64 Team, IBM Corp
   4 */
   5
   6#include <linux/smp.h>
   7#include <linux/export.h>
   8#include <linux/memblock.h>
   9#include <linux/sched/task.h>
  10#include <linux/numa.h>
  11
  12#include <asm/lppaca.h>
  13#include <asm/paca.h>
  14#include <asm/sections.h>
  15#include <asm/pgtable.h>
  16#include <asm/kexec.h>
  17
  18#include "setup.h"
  19
  20#ifndef CONFIG_SMP
  21#define boot_cpuid 0
  22#endif
  23
  24static void *__init alloc_paca_data(unsigned long size, unsigned long align,
  25                                unsigned long limit, int cpu)
  26{
  27        void *ptr;
  28        int nid;
  29
  30        /*
  31         * boot_cpuid paca is allocated very early before cpu_to_node is up.
  32         * Set bottom-up mode, because the boot CPU should be on node-0,
  33         * which will put its paca in the right place.
  34         */
  35        if (cpu == boot_cpuid) {
  36                nid = NUMA_NO_NODE;
  37                memblock_set_bottom_up(true);
  38        } else {
  39                nid = early_cpu_to_node(cpu);
  40        }
  41
  42        ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
  43                                     limit, nid);
  44        if (!ptr)
  45                panic("cannot allocate paca data");
  46
  47        if (cpu == boot_cpuid)
  48                memblock_set_bottom_up(false);
  49
  50        return ptr;
  51}
  52
  53#ifdef CONFIG_PPC_PSERIES
  54
  55/*
  56 * See asm/lppaca.h for more detail.
  57 *
  58 * lppaca structures must must be 1kB in size, L1 cache line aligned,
  59 * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
  60 * these requirements.
  61 */
  62static inline void init_lppaca(struct lppaca *lppaca)
  63{
  64        BUILD_BUG_ON(sizeof(struct lppaca) != 640);
  65
  66        *lppaca = (struct lppaca) {
  67                .desc = cpu_to_be32(0xd397d781),        /* "LpPa" */
  68                .size = cpu_to_be16(0x400),
  69                .fpregs_in_use = 1,
  70                .slb_count = cpu_to_be16(64),
  71                .vmxregs_in_use = 0,
  72                .page_ins = 0, };
  73};
  74
  75static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
  76{
  77        struct lppaca *lp;
  78        size_t size = 0x400;
  79
  80        BUILD_BUG_ON(size < sizeof(struct lppaca));
  81
  82        if (early_cpu_has_feature(CPU_FTR_HVMODE))
  83                return NULL;
  84
  85        lp = alloc_paca_data(size, 0x400, limit, cpu);
  86        init_lppaca(lp);
  87
  88        return lp;
  89}
  90#endif /* CONFIG_PPC_BOOK3S */
  91
  92#ifdef CONFIG_PPC_BOOK3S_64
  93
  94/*
  95 * 3 persistent SLBs are allocated here.  The buffer will be zero
  96 * initially, hence will all be invaild until we actually write them.
  97 *
  98 * If you make the number of persistent SLB entries dynamic, please also
  99 * update PR KVM to flush and restore them accordingly.
 100 */
 101static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
 102{
 103        struct slb_shadow *s;
 104
 105        if (cpu != boot_cpuid) {
 106                /*
 107                 * Boot CPU comes here before early_radix_enabled
 108                 * is parsed (e.g., for disable_radix). So allocate
 109                 * always and this will be fixed up in free_unused_pacas.
 110                 */
 111                if (early_radix_enabled())
 112                        return NULL;
 113        }
 114
 115        s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
 116
 117        s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
 118        s->buffer_length = cpu_to_be32(sizeof(*s));
 119
 120        return s;
 121}
 122
 123#endif /* CONFIG_PPC_BOOK3S_64 */
 124
 125/* The Paca is an array with one entry per processor.  Each contains an
 126 * lppaca, which contains the information shared between the
 127 * hypervisor and Linux.
 128 * On systems with hardware multi-threading, there are two threads
 129 * per processor.  The Paca array must contain an entry for each thread.
 130 * The VPD Areas will give a max logical processors = 2 * max physical
 131 * processors.  The processor VPD array needs one entry per physical
 132 * processor (not thread).
 133 */
 134struct paca_struct **paca_ptrs __read_mostly;
 135EXPORT_SYMBOL(paca_ptrs);
 136
 137void __init initialise_paca(struct paca_struct *new_paca, int cpu)
 138{
 139#ifdef CONFIG_PPC_PSERIES
 140        new_paca->lppaca_ptr = NULL;
 141#endif
 142#ifdef CONFIG_PPC_BOOK3E
 143        new_paca->kernel_pgd = swapper_pg_dir;
 144#endif
 145        new_paca->lock_token = 0x8000;
 146        new_paca->paca_index = cpu;
 147        new_paca->kernel_toc = kernel_toc_addr();
 148        new_paca->kernelbase = (unsigned long) _stext;
 149        /* Only set MSR:IR/DR when MMU is initialized */
 150        new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
 151        new_paca->hw_cpu_id = 0xffff;
 152        new_paca->kexec_state = KEXEC_STATE_NONE;
 153        new_paca->__current = &init_task;
 154        new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
 155#ifdef CONFIG_PPC_BOOK3S_64
 156        new_paca->slb_shadow_ptr = NULL;
 157#endif
 158
 159#ifdef CONFIG_PPC_BOOK3E
 160        /* For now -- if we have threads this will be adjusted later */
 161        new_paca->tcd_ptr = &new_paca->tcd;
 162#endif
 163}
 164
 165/* Put the paca pointer into r13 and SPRG_PACA */
 166void setup_paca(struct paca_struct *new_paca)
 167{
 168        /* Setup r13 */
 169        local_paca = new_paca;
 170
 171#ifdef CONFIG_PPC_BOOK3E
 172        /* On Book3E, initialize the TLB miss exception frames */
 173        mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
 174#else
 175        /* In HV mode, we setup both HPACA and PACA to avoid problems
 176         * if we do a GET_PACA() before the feature fixups have been
 177         * applied
 178         */
 179        if (early_cpu_has_feature(CPU_FTR_HVMODE))
 180                mtspr(SPRN_SPRG_HPACA, local_paca);
 181#endif
 182        mtspr(SPRN_SPRG_PACA, local_paca);
 183
 184}
 185
 186static int __initdata paca_nr_cpu_ids;
 187static int __initdata paca_ptrs_size;
 188static int __initdata paca_struct_size;
 189
 190void __init allocate_paca_ptrs(void)
 191{
 192        paca_nr_cpu_ids = nr_cpu_ids;
 193
 194        paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 195        paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
 196        if (!paca_ptrs)
 197                panic("Failed to allocate %d bytes for paca pointers\n",
 198                      paca_ptrs_size);
 199
 200        memset(paca_ptrs, 0x88, paca_ptrs_size);
 201}
 202
 203void __init allocate_paca(int cpu)
 204{
 205        u64 limit;
 206        struct paca_struct *paca;
 207
 208        BUG_ON(cpu >= paca_nr_cpu_ids);
 209
 210#ifdef CONFIG_PPC_BOOK3S_64
 211        /*
 212         * We access pacas in real mode, and cannot take SLB faults
 213         * on them when in virtual mode, so allocate them accordingly.
 214         */
 215        limit = min(ppc64_bolted_size(), ppc64_rma_size);
 216#else
 217        limit = ppc64_rma_size;
 218#endif
 219
 220        paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
 221                                limit, cpu);
 222        paca_ptrs[cpu] = paca;
 223
 224        initialise_paca(paca, cpu);
 225#ifdef CONFIG_PPC_PSERIES
 226        paca->lppaca_ptr = new_lppaca(cpu, limit);
 227#endif
 228#ifdef CONFIG_PPC_BOOK3S_64
 229        paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
 230#endif
 231        paca_struct_size += sizeof(struct paca_struct);
 232}
 233
 234void __init free_unused_pacas(void)
 235{
 236        int new_ptrs_size;
 237
 238        new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 239        if (new_ptrs_size < paca_ptrs_size)
 240                memblock_free(__pa(paca_ptrs) + new_ptrs_size,
 241                                        paca_ptrs_size - new_ptrs_size);
 242
 243        paca_nr_cpu_ids = nr_cpu_ids;
 244        paca_ptrs_size = new_ptrs_size;
 245
 246#ifdef CONFIG_PPC_BOOK3S_64
 247        if (early_radix_enabled()) {
 248                /* Ugly fixup, see new_slb_shadow() */
 249                memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
 250                                sizeof(struct slb_shadow));
 251                paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
 252        }
 253#endif
 254
 255        printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
 256                        paca_ptrs_size + paca_struct_size, nr_cpu_ids);
 257}
 258
 259void copy_mm_to_paca(struct mm_struct *mm)
 260{
 261#ifdef CONFIG_PPC_BOOK3S
 262        mm_context_t *context = &mm->context;
 263
 264        get_paca()->mm_ctx_id = context->id;
 265#ifdef CONFIG_PPC_MM_SLICES
 266        VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
 267        get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
 268        memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
 269               LOW_SLICE_ARRAY_SZ);
 270        memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
 271               TASK_SLICE_ARRAY_SZ(context));
 272#else /* CONFIG_PPC_MM_SLICES */
 273        get_paca()->mm_ctx_user_psize = context->user_psize;
 274        get_paca()->mm_ctx_sllp = context->sllp;
 275#endif
 276#else /* !CONFIG_PPC_BOOK3S */
 277        return;
 278#endif
 279}
 280