linux/arch/arm64/kernel/alternative.c
<<
>>
Prefs
   1/*
   2 * alternative runtime patching
   3 * inspired by the x86 version
   4 *
   5 * Copyright (C) 2014 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define pr_fmt(fmt) "alternatives: " fmt
  21
  22#include <linux/init.h>
  23#include <linux/cpu.h>
  24#include <asm/cacheflush.h>
  25#include <asm/alternative.h>
  26#include <asm/cpufeature.h>
  27#include <asm/insn.h>
  28#include <linux/stop_machine.h>
  29
  30#define __ALT_PTR(a,f)          (u32 *)((void *)&(a)->f + (a)->f)
  31#define ALT_ORIG_PTR(a)         __ALT_PTR(a, orig_offset)
  32#define ALT_REPL_PTR(a)         __ALT_PTR(a, alt_offset)
  33
  34extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
  35
  36struct alt_region {
  37        struct alt_instr *begin;
  38        struct alt_instr *end;
  39};
  40
  41/*
  42 * Check if the target PC is within an alternative block.
  43 */
  44static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
  45{
  46        unsigned long replptr;
  47
  48        if (kernel_text_address(pc))
  49                return 1;
  50
  51        replptr = (unsigned long)ALT_REPL_PTR(alt);
  52        if (pc >= replptr && pc <= (replptr + alt->alt_len))
  53                return 0;
  54
  55        /*
  56         * Branching into *another* alternate sequence is doomed, and
  57         * we're not even trying to fix it up.
  58         */
  59        BUG();
  60}
  61
  62static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
  63{
  64        u32 insn;
  65
  66        insn = le32_to_cpu(*altinsnptr);
  67
  68        if (aarch64_insn_is_branch_imm(insn)) {
  69                s32 offset = aarch64_get_branch_offset(insn);
  70                unsigned long target;
  71
  72                target = (unsigned long)altinsnptr + offset;
  73
  74                /*
  75                 * If we're branching inside the alternate sequence,
  76                 * do not rewrite the instruction, as it is already
  77                 * correct. Otherwise, generate the new instruction.
  78                 */
  79                if (branch_insn_requires_update(alt, target)) {
  80                        offset = target - (unsigned long)insnptr;
  81                        insn = aarch64_set_branch_offset(insn, offset);
  82                }
  83        }
  84
  85        return insn;
  86}
  87
  88static void __apply_alternatives(void *alt_region)
  89{
  90        struct alt_instr *alt;
  91        struct alt_region *region = alt_region;
  92        u32 *origptr, *replptr;
  93
  94        for (alt = region->begin; alt < region->end; alt++) {
  95                u32 insn;
  96                int i, nr_inst;
  97
  98                if (!cpus_have_cap(alt->cpufeature))
  99                        continue;
 100
 101                BUG_ON(alt->alt_len != alt->orig_len);
 102
 103                pr_info_once("patching kernel code\n");
 104
 105                origptr = ALT_ORIG_PTR(alt);
 106                replptr = ALT_REPL_PTR(alt);
 107                nr_inst = alt->alt_len / sizeof(insn);
 108
 109                for (i = 0; i < nr_inst; i++) {
 110                        insn = get_alt_insn(alt, origptr + i, replptr + i);
 111                        *(origptr + i) = cpu_to_le32(insn);
 112                }
 113
 114                flush_icache_range((uintptr_t)origptr,
 115                                   (uintptr_t)(origptr + nr_inst));
 116        }
 117}
 118
 119/*
 120 * We might be patching the stop_machine state machine, so implement a
 121 * really simple polling protocol here.
 122 */
 123static int __apply_alternatives_multi_stop(void *unused)
 124{
 125        static int patched = 0;
 126        struct alt_region region = {
 127                .begin  = __alt_instructions,
 128                .end    = __alt_instructions_end,
 129        };
 130
 131        /* We always have a CPU 0 at this point (__init) */
 132        if (smp_processor_id()) {
 133                while (!READ_ONCE(patched))
 134                        cpu_relax();
 135                isb();
 136        } else {
 137                BUG_ON(patched);
 138                __apply_alternatives(&region);
 139                /* Barriers provided by the cache flushing */
 140                WRITE_ONCE(patched, 1);
 141        }
 142
 143        return 0;
 144}
 145
 146void __init apply_alternatives_all(void)
 147{
 148        /* better not try code patching on a live SMP system */
 149        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 150}
 151
 152void apply_alternatives(void *start, size_t length)
 153{
 154        struct alt_region region = {
 155                .begin  = start,
 156                .end    = start + length,
 157        };
 158
 159        __apply_alternatives(&region);
 160}
 161