linux/arch/powerpc/kernel/paca.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * c 2001 PPC 64 Team, IBM Corp
   4 */
   5
   6#include <linux/smp.h>
   7#include <linux/export.h>
   8#include <linux/memblock.h>
   9#include <linux/sched/task.h>
  10#include <linux/numa.h>
  11#include <linux/pgtable.h>
  12
  13#include <asm/lppaca.h>
  14#include <asm/paca.h>
  15#include <asm/sections.h>
  16#include <asm/kexec.h>
  17#include <asm/svm.h>
  18#include <asm/ultravisor.h>
  19#include <asm/rtas.h>
  20
  21#include "setup.h"
  22
  23#ifndef CONFIG_SMP
  24#define boot_cpuid 0
  25#endif
  26
  27static void *__init alloc_paca_data(unsigned long size, unsigned long align,
  28                                unsigned long limit, int cpu)
  29{
  30        void *ptr;
  31        int nid;
  32
  33        /*
  34         * boot_cpuid paca is allocated very early before cpu_to_node is up.
  35         * Set bottom-up mode, because the boot CPU should be on node-0,
  36         * which will put its paca in the right place.
  37         */
  38        if (cpu == boot_cpuid) {
  39                nid = NUMA_NO_NODE;
  40                memblock_set_bottom_up(true);
  41        } else {
  42                nid = early_cpu_to_node(cpu);
  43        }
  44
  45        ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
  46                                     limit, nid);
  47        if (!ptr)
  48                panic("cannot allocate paca data");
  49
  50        if (cpu == boot_cpuid)
  51                memblock_set_bottom_up(false);
  52
  53        return ptr;
  54}
  55
  56#ifdef CONFIG_PPC_PSERIES
  57
  58#define LPPACA_SIZE 0x400
  59
  60static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
  61                                        int cpu)
  62{
  63        size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
  64        static unsigned long shared_lppaca_size;
  65        static void *shared_lppaca;
  66        void *ptr;
  67
  68        if (!shared_lppaca) {
  69                memblock_set_bottom_up(true);
  70
  71                /*
  72                 * See Documentation/powerpc/ultravisor.rst for more details.
  73                 *
  74                 * UV/HV data sharing is in PAGE_SIZE granularity. In order to
  75                 * minimize the number of pages shared, align the allocation to
  76                 * PAGE_SIZE.
  77                 */
  78                shared_lppaca =
  79                        memblock_alloc_try_nid(shared_lppaca_total_size,
  80                                               PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
  81                                               limit, NUMA_NO_NODE);
  82                if (!shared_lppaca)
  83                        panic("cannot allocate shared data");
  84
  85                memblock_set_bottom_up(false);
  86                uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
  87                              shared_lppaca_total_size >> PAGE_SHIFT);
  88        }
  89
  90        ptr = shared_lppaca + shared_lppaca_size;
  91        shared_lppaca_size += size;
  92
  93        /*
  94         * This is very early in boot, so no harm done if the kernel crashes at
  95         * this point.
  96         */
  97        BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
  98
  99        return ptr;
 100}
 101
 102/*
 103 * See asm/lppaca.h for more detail.
 104 *
 105 * lppaca structures must must be 1kB in size, L1 cache line aligned,
 106 * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
 107 * these requirements.
 108 */
 109static inline void init_lppaca(struct lppaca *lppaca)
 110{
 111        BUILD_BUG_ON(sizeof(struct lppaca) != 640);
 112
 113        *lppaca = (struct lppaca) {
 114                .desc = cpu_to_be32(0xd397d781),        /* "LpPa" */
 115                .size = cpu_to_be16(LPPACA_SIZE),
 116                .fpregs_in_use = 1,
 117                .slb_count = cpu_to_be16(64),
 118                .vmxregs_in_use = 0,
 119                .page_ins = 0, };
 120};
 121
 122static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
 123{
 124        struct lppaca *lp;
 125
 126        BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
 127
 128        if (early_cpu_has_feature(CPU_FTR_HVMODE))
 129                return NULL;
 130
 131        if (is_secure_guest())
 132                lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
 133        else
 134                lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
 135
 136        init_lppaca(lp);
 137
 138        return lp;
 139}
 140#endif /* CONFIG_PPC_PSERIES */
 141
 142#ifdef CONFIG_PPC_BOOK3S_64
 143
 144/*
 145 * 3 persistent SLBs are allocated here.  The buffer will be zero
 146 * initially, hence will all be invaild until we actually write them.
 147 *
 148 * If you make the number of persistent SLB entries dynamic, please also
 149 * update PR KVM to flush and restore them accordingly.
 150 */
 151static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
 152{
 153        struct slb_shadow *s;
 154
 155        if (cpu != boot_cpuid) {
 156                /*
 157                 * Boot CPU comes here before early_radix_enabled
 158                 * is parsed (e.g., for disable_radix). So allocate
 159                 * always and this will be fixed up in free_unused_pacas.
 160                 */
 161                if (early_radix_enabled())
 162                        return NULL;
 163        }
 164
 165        s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
 166
 167        s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
 168        s->buffer_length = cpu_to_be32(sizeof(*s));
 169
 170        return s;
 171}
 172
 173#endif /* CONFIG_PPC_BOOK3S_64 */
 174
 175#ifdef CONFIG_PPC_PSERIES
 176/**
 177 * new_rtas_args() - Allocates rtas args
 178 * @cpu:        CPU number
 179 * @limit:      Memory limit for this allocation
 180 *
 181 * Allocates a struct rtas_args and return it's pointer,
 182 * if not in Hypervisor mode
 183 *
 184 * Return:      Pointer to allocated rtas_args
 185 *              NULL if CPU in Hypervisor Mode
 186 */
 187static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit)
 188{
 189        limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX);
 190
 191        if (early_cpu_has_feature(CPU_FTR_HVMODE))
 192                return NULL;
 193
 194        return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES,
 195                               limit, cpu);
 196}
 197#endif /* CONFIG_PPC_PSERIES */
 198
 199/* The Paca is an array with one entry per processor.  Each contains an
 200 * lppaca, which contains the information shared between the
 201 * hypervisor and Linux.
 202 * On systems with hardware multi-threading, there are two threads
 203 * per processor.  The Paca array must contain an entry for each thread.
 204 * The VPD Areas will give a max logical processors = 2 * max physical
 205 * processors.  The processor VPD array needs one entry per physical
 206 * processor (not thread).
 207 */
 208struct paca_struct **paca_ptrs __read_mostly;
 209EXPORT_SYMBOL(paca_ptrs);
 210
 211void __init initialise_paca(struct paca_struct *new_paca, int cpu)
 212{
 213#ifdef CONFIG_PPC_PSERIES
 214        new_paca->lppaca_ptr = NULL;
 215#endif
 216#ifdef CONFIG_PPC_BOOK3E
 217        new_paca->kernel_pgd = swapper_pg_dir;
 218#endif
 219        new_paca->lock_token = 0x8000;
 220        new_paca->paca_index = cpu;
 221        new_paca->kernel_toc = kernel_toc_addr();
 222        new_paca->kernelbase = (unsigned long) _stext;
 223        /* Only set MSR:IR/DR when MMU is initialized */
 224        new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
 225        new_paca->hw_cpu_id = 0xffff;
 226        new_paca->kexec_state = KEXEC_STATE_NONE;
 227        new_paca->__current = &init_task;
 228        new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
 229#ifdef CONFIG_PPC_BOOK3S_64
 230        new_paca->slb_shadow_ptr = NULL;
 231#endif
 232
 233#ifdef CONFIG_PPC_BOOK3E
 234        /* For now -- if we have threads this will be adjusted later */
 235        new_paca->tcd_ptr = &new_paca->tcd;
 236#endif
 237
 238#ifdef CONFIG_PPC_PSERIES
 239        new_paca->rtas_args_reentrant = NULL;
 240#endif
 241}
 242
 243/* Put the paca pointer into r13 and SPRG_PACA */
 244void setup_paca(struct paca_struct *new_paca)
 245{
 246        /* Setup r13 */
 247        local_paca = new_paca;
 248
 249#ifdef CONFIG_PPC_BOOK3E
 250        /* On Book3E, initialize the TLB miss exception frames */
 251        mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
 252#else
 253        /*
 254         * In HV mode, we setup both HPACA and PACA to avoid problems
 255         * if we do a GET_PACA() before the feature fixups have been
 256         * applied.
 257         *
 258         * Normally you should test against CPU_FTR_HVMODE, but CPU features
 259         * are not yet set up when we first reach here.
 260         */
 261        if (mfmsr() & MSR_HV)
 262                mtspr(SPRN_SPRG_HPACA, local_paca);
 263#endif
 264        mtspr(SPRN_SPRG_PACA, local_paca);
 265
 266}
 267
 268static int __initdata paca_nr_cpu_ids;
 269static int __initdata paca_ptrs_size;
 270static int __initdata paca_struct_size;
 271
 272void __init allocate_paca_ptrs(void)
 273{
 274        paca_nr_cpu_ids = nr_cpu_ids;
 275
 276        paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 277        paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
 278        if (!paca_ptrs)
 279                panic("Failed to allocate %d bytes for paca pointers\n",
 280                      paca_ptrs_size);
 281
 282        memset(paca_ptrs, 0x88, paca_ptrs_size);
 283}
 284
 285void __init allocate_paca(int cpu)
 286{
 287        u64 limit;
 288        struct paca_struct *paca;
 289
 290        BUG_ON(cpu >= paca_nr_cpu_ids);
 291
 292#ifdef CONFIG_PPC_BOOK3S_64
 293        /*
 294         * We access pacas in real mode, and cannot take SLB faults
 295         * on them when in virtual mode, so allocate them accordingly.
 296         */
 297        limit = min(ppc64_bolted_size(), ppc64_rma_size);
 298#else
 299        limit = ppc64_rma_size;
 300#endif
 301
 302        paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
 303                                limit, cpu);
 304        paca_ptrs[cpu] = paca;
 305
 306        initialise_paca(paca, cpu);
 307#ifdef CONFIG_PPC_PSERIES
 308        paca->lppaca_ptr = new_lppaca(cpu, limit);
 309#endif
 310#ifdef CONFIG_PPC_BOOK3S_64
 311        paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
 312#endif
 313#ifdef CONFIG_PPC_PSERIES
 314        paca->rtas_args_reentrant = new_rtas_args(cpu, limit);
 315#endif
 316        paca_struct_size += sizeof(struct paca_struct);
 317}
 318
 319void __init free_unused_pacas(void)
 320{
 321        int new_ptrs_size;
 322
 323        new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 324        if (new_ptrs_size < paca_ptrs_size)
 325                memblock_free(__pa(paca_ptrs) + new_ptrs_size,
 326                                        paca_ptrs_size - new_ptrs_size);
 327
 328        paca_nr_cpu_ids = nr_cpu_ids;
 329        paca_ptrs_size = new_ptrs_size;
 330
 331#ifdef CONFIG_PPC_BOOK3S_64
 332        if (early_radix_enabled()) {
 333                /* Ugly fixup, see new_slb_shadow() */
 334                memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
 335                                sizeof(struct slb_shadow));
 336                paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
 337        }
 338#endif
 339
 340        printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
 341                        paca_ptrs_size + paca_struct_size, nr_cpu_ids);
 342}
 343
 344void copy_mm_to_paca(struct mm_struct *mm)
 345{
 346#ifdef CONFIG_PPC_BOOK3S
 347        mm_context_t *context = &mm->context;
 348
 349#ifdef CONFIG_PPC_MM_SLICES
 350        VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
 351        memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
 352               LOW_SLICE_ARRAY_SZ);
 353        memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
 354               TASK_SLICE_ARRAY_SZ(context));
 355#else /* CONFIG_PPC_MM_SLICES */
 356        get_paca()->mm_ctx_user_psize = context->user_psize;
 357        get_paca()->mm_ctx_sllp = context->sllp;
 358#endif
 359#else /* !CONFIG_PPC_BOOK3S */
 360        return;
 361#endif
 362}
 363