linux/arch/powerpc/kernel/paca.c
<<
>>
Prefs
   1/*
   2 * c 2001 PPC 64 Team, IBM Corp
   3 *
   4 *      This program is free software; you can redistribute it and/or
   5 *      modify it under the terms of the GNU General Public License
   6 *      as published by the Free Software Foundation; either version
   7 *      2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/smp.h>
  11#include <linux/export.h>
  12#include <linux/memblock.h>
  13#include <linux/sched/task.h>
  14
  15#include <asm/lppaca.h>
  16#include <asm/paca.h>
  17#include <asm/sections.h>
  18#include <asm/pgtable.h>
  19#include <asm/kexec.h>
  20
  21#include "setup.h"
  22
  23#ifndef CONFIG_SMP
  24#define boot_cpuid 0
  25#endif
  26
  27static void *__init alloc_paca_data(unsigned long size, unsigned long align,
  28                                unsigned long limit, int cpu)
  29{
  30        unsigned long pa;
  31        int nid;
  32
  33        /*
  34         * boot_cpuid paca is allocated very early before cpu_to_node is up.
  35         * Set bottom-up mode, because the boot CPU should be on node-0,
  36         * which will put its paca in the right place.
  37         */
  38        if (cpu == boot_cpuid) {
  39                nid = -1;
  40                memblock_set_bottom_up(true);
  41        } else {
  42                nid = early_cpu_to_node(cpu);
  43        }
  44
  45        pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE);
  46        if (!pa) {
  47                pa = memblock_alloc_base(size, align, limit);
  48                if (!pa)
  49                        panic("cannot allocate paca data");
  50        }
  51
  52        if (cpu == boot_cpuid)
  53                memblock_set_bottom_up(false);
  54
  55        return __va(pa);
  56}
  57
  58#ifdef CONFIG_PPC_PSERIES
  59
  60/*
  61 * See asm/lppaca.h for more detail.
  62 *
  63 * lppaca structures must must be 1kB in size, L1 cache line aligned,
  64 * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
  65 * these requirements.
  66 */
  67static inline void init_lppaca(struct lppaca *lppaca)
  68{
  69        BUILD_BUG_ON(sizeof(struct lppaca) != 640);
  70
  71        *lppaca = (struct lppaca) {
  72                .desc = cpu_to_be32(0xd397d781),        /* "LpPa" */
  73                .size = cpu_to_be16(0x400),
  74                .fpregs_in_use = 1,
  75                .slb_count = cpu_to_be16(64),
  76                .vmxregs_in_use = 0,
  77                .page_ins = 0, };
  78};
  79
  80static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
  81{
  82        struct lppaca *lp;
  83        size_t size = 0x400;
  84
  85        BUILD_BUG_ON(size < sizeof(struct lppaca));
  86
  87        if (early_cpu_has_feature(CPU_FTR_HVMODE))
  88                return NULL;
  89
  90        lp = alloc_paca_data(size, 0x400, limit, cpu);
  91        init_lppaca(lp);
  92
  93        return lp;
  94}
  95#endif /* CONFIG_PPC_BOOK3S */
  96
  97#ifdef CONFIG_PPC_BOOK3S_64
  98
  99/*
 100 * 3 persistent SLBs are allocated here.  The buffer will be zero
 101 * initially, hence will all be invaild until we actually write them.
 102 *
 103 * If you make the number of persistent SLB entries dynamic, please also
 104 * update PR KVM to flush and restore them accordingly.
 105 */
 106static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
 107{
 108        struct slb_shadow *s;
 109
 110        if (cpu != boot_cpuid) {
 111                /*
 112                 * Boot CPU comes here before early_radix_enabled
 113                 * is parsed (e.g., for disable_radix). So allocate
 114                 * always and this will be fixed up in free_unused_pacas.
 115                 */
 116                if (early_radix_enabled())
 117                        return NULL;
 118        }
 119
 120        s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
 121        memset(s, 0, sizeof(*s));
 122
 123        s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
 124        s->buffer_length = cpu_to_be32(sizeof(*s));
 125
 126        return s;
 127}
 128
 129#endif /* CONFIG_PPC_BOOK3S_64 */
 130
 131/* The Paca is an array with one entry per processor.  Each contains an
 132 * lppaca, which contains the information shared between the
 133 * hypervisor and Linux.
 134 * On systems with hardware multi-threading, there are two threads
 135 * per processor.  The Paca array must contain an entry for each thread.
 136 * The VPD Areas will give a max logical processors = 2 * max physical
 137 * processors.  The processor VPD array needs one entry per physical
 138 * processor (not thread).
 139 */
 140struct paca_struct **paca_ptrs __read_mostly;
 141EXPORT_SYMBOL(paca_ptrs);
 142
 143void __init initialise_paca(struct paca_struct *new_paca, int cpu)
 144{
 145#ifdef CONFIG_PPC_PSERIES
 146        new_paca->lppaca_ptr = NULL;
 147#endif
 148#ifdef CONFIG_PPC_BOOK3E
 149        new_paca->kernel_pgd = swapper_pg_dir;
 150#endif
 151        new_paca->lock_token = 0x8000;
 152        new_paca->paca_index = cpu;
 153        new_paca->kernel_toc = kernel_toc_addr();
 154        new_paca->kernelbase = (unsigned long) _stext;
 155        /* Only set MSR:IR/DR when MMU is initialized */
 156        new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
 157        new_paca->hw_cpu_id = 0xffff;
 158        new_paca->kexec_state = KEXEC_STATE_NONE;
 159        new_paca->__current = &init_task;
 160        new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
 161#ifdef CONFIG_PPC_BOOK3S_64
 162        new_paca->slb_shadow_ptr = NULL;
 163#endif
 164
 165#ifdef CONFIG_PPC_BOOK3E
 166        /* For now -- if we have threads this will be adjusted later */
 167        new_paca->tcd_ptr = &new_paca->tcd;
 168#endif
 169}
 170
 171/* Put the paca pointer into r13 and SPRG_PACA */
 172void setup_paca(struct paca_struct *new_paca)
 173{
 174        /* Setup r13 */
 175        local_paca = new_paca;
 176
 177#ifdef CONFIG_PPC_BOOK3E
 178        /* On Book3E, initialize the TLB miss exception frames */
 179        mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
 180#else
 181        /* In HV mode, we setup both HPACA and PACA to avoid problems
 182         * if we do a GET_PACA() before the feature fixups have been
 183         * applied
 184         */
 185        if (early_cpu_has_feature(CPU_FTR_HVMODE))
 186                mtspr(SPRN_SPRG_HPACA, local_paca);
 187#endif
 188        mtspr(SPRN_SPRG_PACA, local_paca);
 189
 190}
 191
 192static int __initdata paca_nr_cpu_ids;
 193static int __initdata paca_ptrs_size;
 194static int __initdata paca_struct_size;
 195
 196void __init allocate_paca_ptrs(void)
 197{
 198        paca_nr_cpu_ids = nr_cpu_ids;
 199
 200        paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 201        paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
 202        memset(paca_ptrs, 0x88, paca_ptrs_size);
 203}
 204
 205void __init allocate_paca(int cpu)
 206{
 207        u64 limit;
 208        struct paca_struct *paca;
 209
 210        BUG_ON(cpu >= paca_nr_cpu_ids);
 211
 212#ifdef CONFIG_PPC_BOOK3S_64
 213        /*
 214         * We access pacas in real mode, and cannot take SLB faults
 215         * on them when in virtual mode, so allocate them accordingly.
 216         */
 217        limit = min(ppc64_bolted_size(), ppc64_rma_size);
 218#else
 219        limit = ppc64_rma_size;
 220#endif
 221
 222        paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
 223                                limit, cpu);
 224        paca_ptrs[cpu] = paca;
 225        memset(paca, 0, sizeof(struct paca_struct));
 226
 227        initialise_paca(paca, cpu);
 228#ifdef CONFIG_PPC_PSERIES
 229        paca->lppaca_ptr = new_lppaca(cpu, limit);
 230#endif
 231#ifdef CONFIG_PPC_BOOK3S_64
 232        paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
 233#endif
 234        paca_struct_size += sizeof(struct paca_struct);
 235}
 236
 237void __init free_unused_pacas(void)
 238{
 239        int new_ptrs_size;
 240
 241        new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 242        if (new_ptrs_size < paca_ptrs_size)
 243                memblock_free(__pa(paca_ptrs) + new_ptrs_size,
 244                                        paca_ptrs_size - new_ptrs_size);
 245
 246        paca_nr_cpu_ids = nr_cpu_ids;
 247        paca_ptrs_size = new_ptrs_size;
 248
 249#ifdef CONFIG_PPC_BOOK3S_64
 250        if (early_radix_enabled()) {
 251                /* Ugly fixup, see new_slb_shadow() */
 252                memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
 253                                sizeof(struct slb_shadow));
 254                paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
 255        }
 256#endif
 257
 258        printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
 259                        paca_ptrs_size + paca_struct_size, nr_cpu_ids);
 260}
 261
 262void copy_mm_to_paca(struct mm_struct *mm)
 263{
 264#ifdef CONFIG_PPC_BOOK3S
 265        mm_context_t *context = &mm->context;
 266
 267        get_paca()->mm_ctx_id = context->id;
 268#ifdef CONFIG_PPC_MM_SLICES
 269        VM_BUG_ON(!mm->context.slb_addr_limit);
 270        get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
 271        memcpy(&get_paca()->mm_ctx_low_slices_psize,
 272               &context->low_slices_psize, sizeof(context->low_slices_psize));
 273        memcpy(&get_paca()->mm_ctx_high_slices_psize,
 274               &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
 275#else /* CONFIG_PPC_MM_SLICES */
 276        get_paca()->mm_ctx_user_psize = context->user_psize;
 277        get_paca()->mm_ctx_sllp = context->sllp;
 278#endif
 279#else /* !CONFIG_PPC_BOOK3S */
 280        return;
 281#endif
 282}
 283