linux/arch/arm64/kernel/alternative.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * alternative runtime patching
   4 * inspired by the x86 version
   5 *
   6 * Copyright (C) 2014 ARM Ltd.
   7 */
   8
   9#define pr_fmt(fmt) "alternatives: " fmt
  10
  11#include <linux/init.h>
  12#include <linux/cpu.h>
  13#include <asm/cacheflush.h>
  14#include <asm/alternative.h>
  15#include <asm/cpufeature.h>
  16#include <asm/insn.h>
  17#include <asm/sections.h>
  18#include <linux/stop_machine.h>
  19
  20#define __ALT_PTR(a, f)         ((void *)&(a)->f + (a)->f)
  21#define ALT_ORIG_PTR(a)         __ALT_PTR(a, orig_offset)
  22#define ALT_REPL_PTR(a)         __ALT_PTR(a, alt_offset)
  23
  24/* Volatile, as we may be patching the guts of READ_ONCE() */
  25static volatile int all_alternatives_applied;
  26
  27static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
  28
  29struct alt_region {
  30        struct alt_instr *begin;
  31        struct alt_instr *end;
  32};
  33
  34bool alternative_is_applied(u16 cpufeature)
  35{
  36        if (WARN_ON(cpufeature >= ARM64_NCAPS))
  37                return false;
  38
  39        return test_bit(cpufeature, applied_alternatives);
  40}
  41
  42/*
  43 * Check if the target PC is within an alternative block.
  44 */
  45static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
  46{
  47        unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
  48        return !(pc >= replptr && pc <= (replptr + alt->alt_len));
  49}
  50
  51#define align_down(x, a)        ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
  52
  53static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
  54{
  55        u32 insn;
  56
  57        insn = le32_to_cpu(*altinsnptr);
  58
  59        if (aarch64_insn_is_branch_imm(insn)) {
  60                s32 offset = aarch64_get_branch_offset(insn);
  61                unsigned long target;
  62
  63                target = (unsigned long)altinsnptr + offset;
  64
  65                /*
  66                 * If we're branching inside the alternate sequence,
  67                 * do not rewrite the instruction, as it is already
  68                 * correct. Otherwise, generate the new instruction.
  69                 */
  70                if (branch_insn_requires_update(alt, target)) {
  71                        offset = target - (unsigned long)insnptr;
  72                        insn = aarch64_set_branch_offset(insn, offset);
  73                }
  74        } else if (aarch64_insn_is_adrp(insn)) {
  75                s32 orig_offset, new_offset;
  76                unsigned long target;
  77
  78                /*
  79                 * If we're replacing an adrp instruction, which uses PC-relative
  80                 * immediate addressing, adjust the offset to reflect the new
  81                 * PC. adrp operates on 4K aligned addresses.
  82                 */
  83                orig_offset  = aarch64_insn_adrp_get_offset(insn);
  84                target = align_down(altinsnptr, SZ_4K) + orig_offset;
  85                new_offset = target - align_down(insnptr, SZ_4K);
  86                insn = aarch64_insn_adrp_set_offset(insn, new_offset);
  87        } else if (aarch64_insn_uses_literal(insn)) {
  88                /*
  89                 * Disallow patching unhandled instructions using PC relative
  90                 * literal addresses
  91                 */
  92                BUG();
  93        }
  94
  95        return insn;
  96}
  97
  98static void patch_alternative(struct alt_instr *alt,
  99                              __le32 *origptr, __le32 *updptr, int nr_inst)
 100{
 101        __le32 *replptr;
 102        int i;
 103
 104        replptr = ALT_REPL_PTR(alt);
 105        for (i = 0; i < nr_inst; i++) {
 106                u32 insn;
 107
 108                insn = get_alt_insn(alt, origptr + i, replptr + i);
 109                updptr[i] = cpu_to_le32(insn);
 110        }
 111}
 112
 113/*
 114 * We provide our own, private D-cache cleaning function so that we don't
 115 * accidentally call into the cache.S code, which is patched by us at
 116 * runtime.
 117 */
 118static void clean_dcache_range_nopatch(u64 start, u64 end)
 119{
 120        u64 cur, d_size, ctr_el0;
 121
 122        ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
 123        d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
 124                                                           CTR_DMINLINE_SHIFT);
 125        cur = start & ~(d_size - 1);
 126        do {
 127                /*
 128                 * We must clean+invalidate to the PoC in order to avoid
 129                 * Cortex-A53 errata 826319, 827319, 824069 and 819472
 130                 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
 131                 */
 132                asm volatile("dc civac, %0" : : "r" (cur) : "memory");
 133        } while (cur += d_size, cur < end);
 134}
 135
 136static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module,
 137                                 unsigned long *feature_mask)
 138{
 139        struct alt_instr *alt;
 140        __le32 *origptr, *updptr;
 141        alternative_cb_t alt_cb;
 142
 143        for (alt = region->begin; alt < region->end; alt++) {
 144                int nr_inst;
 145
 146                if (!test_bit(alt->cpufeature, feature_mask))
 147                        continue;
 148
 149                /* Use ARM64_CB_PATCH as an unconditional patch */
 150                if (alt->cpufeature < ARM64_CB_PATCH &&
 151                    !cpus_have_cap(alt->cpufeature))
 152                        continue;
 153
 154                if (alt->cpufeature == ARM64_CB_PATCH)
 155                        BUG_ON(alt->alt_len != 0);
 156                else
 157                        BUG_ON(alt->alt_len != alt->orig_len);
 158
 159                pr_info_once("patching kernel code\n");
 160
 161                origptr = ALT_ORIG_PTR(alt);
 162                updptr = is_module ? origptr : lm_alias(origptr);
 163                nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 164
 165                if (alt->cpufeature < ARM64_CB_PATCH)
 166                        alt_cb = patch_alternative;
 167                else
 168                        alt_cb  = ALT_REPL_PTR(alt);
 169
 170                alt_cb(alt, origptr, updptr, nr_inst);
 171
 172                if (!is_module) {
 173                        clean_dcache_range_nopatch((u64)origptr,
 174                                                   (u64)(origptr + nr_inst));
 175                }
 176        }
 177
 178        /*
 179         * The core module code takes care of cache maintenance in
 180         * flush_module_icache().
 181         */
 182        if (!is_module) {
 183                dsb(ish);
 184                icache_inval_all_pou();
 185                isb();
 186
 187                /* Ignore ARM64_CB bit from feature mask */
 188                bitmap_or(applied_alternatives, applied_alternatives,
 189                          feature_mask, ARM64_NCAPS);
 190                bitmap_and(applied_alternatives, applied_alternatives,
 191                           cpu_hwcaps, ARM64_NCAPS);
 192        }
 193}
 194
 195/*
 196 * We might be patching the stop_machine state machine, so implement a
 197 * really simple polling protocol here.
 198 */
 199static int __apply_alternatives_multi_stop(void *unused)
 200{
 201        struct alt_region region = {
 202                .begin  = (struct alt_instr *)__alt_instructions,
 203                .end    = (struct alt_instr *)__alt_instructions_end,
 204        };
 205
 206        /* We always have a CPU 0 at this point (__init) */
 207        if (smp_processor_id()) {
 208                while (!all_alternatives_applied)
 209                        cpu_relax();
 210                isb();
 211        } else {
 212                DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
 213
 214                bitmap_complement(remaining_capabilities, boot_capabilities,
 215                                  ARM64_NPATCHABLE);
 216
 217                BUG_ON(all_alternatives_applied);
 218                __apply_alternatives(&region, false, remaining_capabilities);
 219                /* Barriers provided by the cache flushing */
 220                all_alternatives_applied = 1;
 221        }
 222
 223        return 0;
 224}
 225
 226void __init apply_alternatives_all(void)
 227{
 228        /* better not try code patching on a live SMP system */
 229        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 230}
 231
 232/*
 233 * This is called very early in the boot process (directly after we run
 234 * a feature detect on the boot CPU). No need to worry about other CPUs
 235 * here.
 236 */
 237void __init apply_boot_alternatives(void)
 238{
 239        struct alt_region region = {
 240                .begin  = (struct alt_instr *)__alt_instructions,
 241                .end    = (struct alt_instr *)__alt_instructions_end,
 242        };
 243
 244        /* If called on non-boot cpu things could go wrong */
 245        WARN_ON(smp_processor_id() != 0);
 246
 247        __apply_alternatives(&region, false, &boot_capabilities[0]);
 248}
 249
 250#ifdef CONFIG_MODULES
 251void apply_alternatives_module(void *start, size_t length)
 252{
 253        struct alt_region region = {
 254                .begin  = start,
 255                .end    = start + length,
 256        };
 257        DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
 258
 259        bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
 260
 261        __apply_alternatives(&region, true, &all_capabilities[0]);
 262}
 263#endif
 264