linux/arch/ia64/kernel/patch.c
<<
>>
Prefs
   1/*
   2 * Instruction-patching support.
   3 *
   4 * Copyright (C) 2003 Hewlett-Packard Co
   5 *      David Mosberger-Tang <davidm@hpl.hp.com>
   6 */
   7#include <linux/init.h>
   8#include <linux/string.h>
   9
  10#include <asm/paravirt.h>
  11#include <asm/patch.h>
  12#include <asm/processor.h>
  13#include <asm/sections.h>
  14#include <asm/unistd.h>
  15
  16/*
  17 * This was adapted from code written by Tony Luck:
  18 *
  19 * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
  20 * like this:
  21 *
  22 * 6  6         5         4         3         2         1
  23 * 3210987654321098765432109876543210987654321098765432109876543210
  24 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
  25 *
  26 * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
  27 * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
  28 */
  29static u64
  30get_imm64 (u64 insn_addr)
  31{
  32        u64 *p = (u64 *) (insn_addr & -16);     /* mask out slot number */
  33
  34        return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
  35                ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
  36                ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
  37                ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
  38                ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
  39                ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
  40                ((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
  41}
  42
  43/* Patch instruction with "val" where "mask" has 1 bits. */
  44void
  45ia64_patch (u64 insn_addr, u64 mask, u64 val)
  46{
  47        u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
  48#       define insn_mask ((1UL << 41) - 1)
  49        unsigned long shift;
  50
  51        b0 = b[0]; b1 = b[1];
  52        shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */
  53        if (shift >= 64) {
  54                m1 = mask << (shift - 64);
  55                v1 = val << (shift - 64);
  56        } else {
  57                m0 = mask << shift; m1 = mask >> (64 - shift);
  58                v0 = val  << shift; v1 = val >> (64 - shift);
  59                b[0] = (b0 & ~m0) | (v0 & m0);
  60        }
  61        b[1] = (b1 & ~m1) | (v1 & m1);
  62}
  63
  64void
  65ia64_patch_imm64 (u64 insn_addr, u64 val)
  66{
  67        /* The assembler may generate offset pointing to either slot 1
  68           or slot 2 for a long (2-slot) instruction, occupying slots 1
  69           and 2.  */
  70        insn_addr &= -16UL;
  71        ia64_patch(insn_addr + 2,
  72                   0x01fffefe000UL, (  ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
  73                                     | ((val & 0x0000000000200000UL) <<  0) /* bit 21 -> 21 */
  74                                     | ((val & 0x00000000001f0000UL) <<  6) /* bit 16 -> 22 */
  75                                     | ((val & 0x000000000000ff80UL) << 20) /* bit  7 -> 27 */
  76                                     | ((val & 0x000000000000007fUL) << 13) /* bit  0 -> 13 */));
  77        ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
  78}
  79
  80void
  81ia64_patch_imm60 (u64 insn_addr, u64 val)
  82{
  83        /* The assembler may generate offset pointing to either slot 1
  84           or slot 2 for a long (2-slot) instruction, occupying slots 1
  85           and 2.  */
  86        insn_addr &= -16UL;
  87        ia64_patch(insn_addr + 2,
  88                   0x011ffffe000UL, (  ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
  89                                     | ((val & 0x00000000000fffffUL) << 13) /* bit  0 -> 13 */));
  90        ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
  91}
  92
  93/*
  94 * We need sometimes to load the physical address of a kernel
  95 * object.  Often we can convert the virtual address to physical
  96 * at execution time, but sometimes (either for performance reasons
  97 * or during error recovery) we cannot to this.  Patch the marked
  98 * bundles to load the physical address.
  99 */
 100void __init
 101ia64_patch_vtop (unsigned long start, unsigned long end)
 102{
 103        s32 *offp = (s32 *) start;
 104        u64 ip;
 105
 106        while (offp < (s32 *) end) {
 107                ip = (u64) offp + *offp;
 108
 109                /* replace virtual address with corresponding physical address: */
 110                ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
 111                ia64_fc((void *) ip);
 112                ++offp;
 113        }
 114        ia64_sync_i();
 115        ia64_srlz_i();
 116}
 117
 118/*
 119 * Disable the RSE workaround by turning the conditional branch
 120 * that we tagged in each place the workaround was used into an
 121 * unconditional branch.
 122 */
 123void __init
 124ia64_patch_rse (unsigned long start, unsigned long end)
 125{
 126        s32 *offp = (s32 *) start;
 127        u64 ip, *b;
 128
 129        while (offp < (s32 *) end) {
 130                ip = (u64) offp + *offp;
 131
 132                b = (u64 *)(ip & -16);
 133                b[1] &= ~0xf800000L;
 134                ia64_fc((void *) ip);
 135                ++offp;
 136        }
 137        ia64_sync_i();
 138        ia64_srlz_i();
 139}
 140
 141void __init
 142ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
 143{
 144        static int first_time = 1;
 145        int need_workaround;
 146        s32 *offp = (s32 *) start;
 147        u64 *wp;
 148
 149        need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0);
 150
 151        if (first_time) {
 152                first_time = 0;
 153                if (need_workaround)
 154                        printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n");
 155        }
 156        if (need_workaround)
 157                return;
 158
 159        while (offp < (s32 *) end) {
 160                wp = (u64 *) ia64_imva((char *) offp + *offp);
 161                wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
 162                wp[1] = 0x0084006880000200UL;
 163                wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
 164                wp[3] = 0x0004000000000200UL;
 165                ia64_fc(wp); ia64_fc(wp + 2);
 166                ++offp;
 167        }
 168        ia64_sync_i();
 169        ia64_srlz_i();
 170}
 171
 172extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
 173extern char ia64_native_fsys_bubble_down[];
 174struct pv_fsys_data pv_fsys_data __initdata = {
 175        .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
 176        .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
 177};
 178
 179unsigned long * __init
 180paravirt_get_fsyscall_table(void)
 181{
 182        return pv_fsys_data.fsyscall_table;
 183}
 184
 185char * __init
 186paravirt_get_fsys_bubble_down(void)
 187{
 188        return pv_fsys_data.fsys_bubble_down;
 189}
 190
 191static void __init
 192patch_fsyscall_table (unsigned long start, unsigned long end)
 193{
 194        u64 fsyscall_table = (u64)paravirt_get_fsyscall_table();
 195        s32 *offp = (s32 *) start;
 196        u64 ip;
 197
 198        while (offp < (s32 *) end) {
 199                ip = (u64) ia64_imva((char *) offp + *offp);
 200                ia64_patch_imm64(ip, fsyscall_table);
 201                ia64_fc((void *) ip);
 202                ++offp;
 203        }
 204        ia64_sync_i();
 205        ia64_srlz_i();
 206}
 207
 208static void __init
 209patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
 210{
 211        u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down();
 212        s32 *offp = (s32 *) start;
 213        u64 ip;
 214
 215        while (offp < (s32 *) end) {
 216                ip = (u64) offp + *offp;
 217                ia64_patch_imm60((u64) ia64_imva((void *) ip),
 218                                 (u64) (fsys_bubble_down - (ip & -16)) / 16);
 219                ia64_fc((void *) ip);
 220                ++offp;
 221        }
 222        ia64_sync_i();
 223        ia64_srlz_i();
 224}
 225
 226void __init
 227ia64_patch_gate (void)
 228{
 229#       define START(name)      paravirt_get_gate_patchlist(PV_GATE_START_##name)
 230#       define END(name)        paravirt_get_gate_patchlist(PV_GATE_END_##name)
 231
 232        patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL));
 233        patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN));
 234        ia64_patch_vtop(START(VTOP), END(VTOP));
 235        ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9));
 236}
 237
 238void ia64_patch_phys_stack_reg(unsigned long val)
 239{
 240        s32 * offp = (s32 *) __start___phys_stack_reg_patchlist;
 241        s32 * end = (s32 *) __end___phys_stack_reg_patchlist;
 242        u64 ip, mask, imm;
 243
 244        /* see instruction format A4: adds r1 = imm13, r3 */
 245        mask = (0x3fUL << 27) | (0x7f << 13);
 246        imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13;
 247
 248        while (offp < end) {
 249                ip = (u64) offp + *offp;
 250                ia64_patch(ip, mask, imm);
 251                ia64_fc((void *)ip);
 252                ++offp;
 253        }
 254        ia64_sync_i();
 255        ia64_srlz_i();
 256}
 257