linux/arch/arm64/kernel/patching.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/mm.h>
   4#include <linux/smp.h>
   5#include <linux/spinlock.h>
   6#include <linux/stop_machine.h>
   7#include <linux/uaccess.h>
   8
   9#include <asm/cacheflush.h>
  10#include <asm/fixmap.h>
  11#include <asm/insn.h>
  12#include <asm/kprobes.h>
  13#include <asm/patching.h>
  14#include <asm/sections.h>
  15
  16static DEFINE_RAW_SPINLOCK(patch_lock);
  17
  18static bool is_exit_text(unsigned long addr)
  19{
  20        /* discarded with init text/data */
  21        return system_state < SYSTEM_RUNNING &&
  22                addr >= (unsigned long)__exittext_begin &&
  23                addr < (unsigned long)__exittext_end;
  24}
  25
  26static bool is_image_text(unsigned long addr)
  27{
  28        return core_kernel_text(addr) || is_exit_text(addr);
  29}
  30
  31static void __kprobes *patch_map(void *addr, int fixmap)
  32{
  33        unsigned long uintaddr = (uintptr_t) addr;
  34        bool image = is_image_text(uintaddr);
  35        struct page *page;
  36
  37        if (image)
  38                page = phys_to_page(__pa_symbol(addr));
  39        else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  40                page = vmalloc_to_page(addr);
  41        else
  42                return addr;
  43
  44        BUG_ON(!page);
  45        return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  46                        (uintaddr & ~PAGE_MASK));
  47}
  48
  49static void __kprobes patch_unmap(int fixmap)
  50{
  51        clear_fixmap(fixmap);
  52}
  53/*
  54 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  55 * little-endian.
  56 */
  57int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  58{
  59        int ret;
  60        __le32 val;
  61
  62        ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
  63        if (!ret)
  64                *insnp = le32_to_cpu(val);
  65
  66        return ret;
  67}
  68
  69static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
  70{
  71        void *waddr = addr;
  72        unsigned long flags = 0;
  73        int ret;
  74
  75        raw_spin_lock_irqsave(&patch_lock, flags);
  76        waddr = patch_map(addr, FIX_TEXT_POKE0);
  77
  78        ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
  79
  80        patch_unmap(FIX_TEXT_POKE0);
  81        raw_spin_unlock_irqrestore(&patch_lock, flags);
  82
  83        return ret;
  84}
  85
  86int __kprobes aarch64_insn_write(void *addr, u32 insn)
  87{
  88        return __aarch64_insn_write(addr, cpu_to_le32(insn));
  89}
  90
  91int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  92{
  93        u32 *tp = addr;
  94        int ret;
  95
  96        /* A64 instructions must be word aligned */
  97        if ((uintptr_t)tp & 0x3)
  98                return -EINVAL;
  99
 100        ret = aarch64_insn_write(tp, insn);
 101        if (ret == 0)
 102                caches_clean_inval_pou((uintptr_t)tp,
 103                                     (uintptr_t)tp + AARCH64_INSN_SIZE);
 104
 105        return ret;
 106}
 107
 108struct aarch64_insn_patch {
 109        void            **text_addrs;
 110        u32             *new_insns;
 111        int             insn_cnt;
 112        atomic_t        cpu_count;
 113};
 114
 115static int __kprobes aarch64_insn_patch_text_cb(void *arg)
 116{
 117        int i, ret = 0;
 118        struct aarch64_insn_patch *pp = arg;
 119
 120        /* The first CPU becomes master */
 121        if (atomic_inc_return(&pp->cpu_count) == 1) {
 122                for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
 123                        ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
 124                                                             pp->new_insns[i]);
 125                /* Notify other processors with an additional increment. */
 126                atomic_inc(&pp->cpu_count);
 127        } else {
 128                while (atomic_read(&pp->cpu_count) <= num_online_cpus())
 129                        cpu_relax();
 130                isb();
 131        }
 132
 133        return ret;
 134}
 135
 136int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
 137{
 138        struct aarch64_insn_patch patch = {
 139                .text_addrs = addrs,
 140                .new_insns = insns,
 141                .insn_cnt = cnt,
 142                .cpu_count = ATOMIC_INIT(0),
 143        };
 144
 145        if (cnt <= 0)
 146                return -EINVAL;
 147
 148        return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
 149                                       cpu_online_mask);
 150}
 151