linux/arch/powerpc/kernel/misc_64.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * This file contains miscellaneous low-level functions.
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *
   6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
   7 * and Paul Mackerras.
   8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
   9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
  10 */
  11
  12#include <linux/sys.h>
  13#include <asm/unistd.h>
  14#include <asm/errno.h>
  15#include <asm/processor.h>
  16#include <asm/page.h>
  17#include <asm/cache.h>
  18#include <asm/ppc_asm.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/cputable.h>
  21#include <asm/thread_info.h>
  22#include <asm/kexec.h>
  23#include <asm/ptrace.h>
  24#include <asm/mmu.h>
  25#include <asm/export.h>
  26#include <asm/feature-fixups.h>
  27
  28        .text
  29
  30_GLOBAL(call_do_softirq)
  31        mflr    r0
  32        std     r0,16(r1)
  33        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  34        mr      r1,r3
  35        bl      __do_softirq
  36        ld      r1,0(r1)
  37        ld      r0,16(r1)
  38        mtlr    r0
  39        blr
  40
  41_GLOBAL(call_do_irq)
  42        mflr    r0
  43        std     r0,16(r1)
  44        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
  45        mr      r1,r4
  46        bl      __do_irq
  47        ld      r1,0(r1)
  48        ld      r0,16(r1)
  49        mtlr    r0
  50        blr
  51
  52        .section        ".toc","aw"
  53PPC64_CACHES:
  54        .tc             ppc64_caches[TC],ppc64_caches
  55        .section        ".text"
  56
  57/*
  58 * Write any modified data cache blocks out to memory
  59 * and invalidate the corresponding instruction cache blocks.
  60 *
  61 * flush_icache_range(unsigned long start, unsigned long stop)
  62 *
  63 *   flush all bytes from start through stop-1 inclusive
  64 */
  65
  66_GLOBAL_TOC(flush_icache_range)
  67BEGIN_FTR_SECTION
  68        PURGE_PREFETCHED_INS
  69        blr
  70END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  71/*
  72 * Flush the data cache to memory 
  73 * 
  74 * Different systems have different cache line sizes
  75 * and in some cases i-cache and d-cache line sizes differ from
  76 * each other.
  77 */
  78        ld      r10,PPC64_CACHES@toc(r2)
  79        lwz     r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
  80        addi    r5,r7,-1
  81        andc    r6,r3,r5                /* round low to line bdy */
  82        subf    r8,r6,r4                /* compute length */
  83        add     r8,r8,r5                /* ensure we get enough */
  84        lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of cache block size */
  85        srw.    r8,r8,r9                /* compute line count */
  86        beqlr                           /* nothing to do? */
  87        mtctr   r8
  881:      dcbst   0,r6
  89        add     r6,r6,r7
  90        bdnz    1b
  91        sync
  92
  93/* Now invalidate the instruction cache */
  94        
  95        lwz     r7,ICACHEL1BLOCKSIZE(r10)       /* Get Icache block size */
  96        addi    r5,r7,-1
  97        andc    r6,r3,r5                /* round low to line bdy */
  98        subf    r8,r6,r4                /* compute length */
  99        add     r8,r8,r5
 100        lwz     r9,ICACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of Icache block size */
 101        srw.    r8,r8,r9                /* compute line count */
 102        beqlr                           /* nothing to do? */
 103        mtctr   r8
 1042:      icbi    0,r6
 105        add     r6,r6,r7
 106        bdnz    2b
 107        isync
 108        blr
 109_ASM_NOKPROBE_SYMBOL(flush_icache_range)
 110EXPORT_SYMBOL(flush_icache_range)
 111
 112/*
 113 * Flush a particular page from the data cache to RAM.
 114 * Note: this is necessary because the instruction cache does *not*
 115 * snoop from the data cache.
 116 *
 117 *      void __flush_dcache_icache(void *page)
 118 */
 119_GLOBAL(__flush_dcache_icache)
 120/*
 121 * Flush the data cache to memory 
 122 * 
 123 * Different systems have different cache line sizes
 124 */
 125
 126BEGIN_FTR_SECTION
 127        PURGE_PREFETCHED_INS
 128        blr
 129END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 130
 131/* Flush the dcache */
 132        ld      r7,PPC64_CACHES@toc(r2)
 133        clrrdi  r3,r3,PAGE_SHIFT                    /* Page align */
 134        lwz     r4,DCACHEL1BLOCKSPERPAGE(r7)    /* Get # dcache blocks per page */
 135        lwz     r5,DCACHEL1BLOCKSIZE(r7)        /* Get dcache block size */
 136        mr      r6,r3
 137        mtctr   r4
 1380:      dcbst   0,r6
 139        add     r6,r6,r5
 140        bdnz    0b
 141        sync
 142
 143/* Now invalidate the icache */ 
 144
 145        lwz     r4,ICACHEL1BLOCKSPERPAGE(r7)    /* Get # icache blocks per page */
 146        lwz     r5,ICACHEL1BLOCKSIZE(r7)        /* Get icache block size */
 147        mtctr   r4
 1481:      icbi    0,r3
 149        add     r3,r3,r5
 150        bdnz    1b
 151        isync
 152        blr
 153
 154_GLOBAL(__bswapdi2)
 155EXPORT_SYMBOL(__bswapdi2)
 156        srdi    r8,r3,32
 157        rlwinm  r7,r3,8,0xffffffff
 158        rlwimi  r7,r3,24,0,7
 159        rlwinm  r9,r8,8,0xffffffff
 160        rlwimi  r7,r3,24,16,23
 161        rlwimi  r9,r8,24,0,7
 162        rlwimi  r9,r8,24,16,23
 163        sldi    r7,r7,32
 164        or      r3,r7,r9
 165        blr
 166
 167
 168#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
 169_GLOBAL(rmci_on)
 170        sync
 171        isync
 172        li      r3,0x100
 173        rldicl  r3,r3,32,0
 174        mfspr   r5,SPRN_HID4
 175        or      r5,r5,r3
 176        sync
 177        mtspr   SPRN_HID4,r5
 178        isync
 179        slbia
 180        isync
 181        sync
 182        blr
 183
 184_GLOBAL(rmci_off)
 185        sync
 186        isync
 187        li      r3,0x100
 188        rldicl  r3,r3,32,0
 189        mfspr   r5,SPRN_HID4
 190        andc    r5,r5,r3
 191        sync
 192        mtspr   SPRN_HID4,r5
 193        isync
 194        slbia
 195        isync
 196        sync
 197        blr
 198#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
 199
 200#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
 201
 202/*
 203 * Do an IO access in real mode
 204 */
 205_GLOBAL(real_readb)
 206        mfmsr   r7
 207        ori     r0,r7,MSR_DR
 208        xori    r0,r0,MSR_DR
 209        sync
 210        mtmsrd  r0
 211        sync
 212        isync
 213        mfspr   r6,SPRN_HID4
 214        rldicl  r5,r6,32,0
 215        ori     r5,r5,0x100
 216        rldicl  r5,r5,32,0
 217        sync
 218        mtspr   SPRN_HID4,r5
 219        isync
 220        slbia
 221        isync
 222        lbz     r3,0(r3)
 223        sync
 224        mtspr   SPRN_HID4,r6
 225        isync
 226        slbia
 227        isync
 228        mtmsrd  r7
 229        sync
 230        isync
 231        blr
 232
 233        /*
 234 * Do an IO access in real mode
 235 */
 236_GLOBAL(real_writeb)
 237        mfmsr   r7
 238        ori     r0,r7,MSR_DR
 239        xori    r0,r0,MSR_DR
 240        sync
 241        mtmsrd  r0
 242        sync
 243        isync
 244        mfspr   r6,SPRN_HID4
 245        rldicl  r5,r6,32,0
 246        ori     r5,r5,0x100
 247        rldicl  r5,r5,32,0
 248        sync
 249        mtspr   SPRN_HID4,r5
 250        isync
 251        slbia
 252        isync
 253        stb     r3,0(r4)
 254        sync
 255        mtspr   SPRN_HID4,r6
 256        isync
 257        slbia
 258        isync
 259        mtmsrd  r7
 260        sync
 261        isync
 262        blr
 263#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
 264
 265#ifdef CONFIG_PPC_PASEMI
 266
 267_GLOBAL(real_205_readb)
 268        mfmsr   r7
 269        ori     r0,r7,MSR_DR
 270        xori    r0,r0,MSR_DR
 271        sync
 272        mtmsrd  r0
 273        sync
 274        isync
 275        LBZCIX(R3,R0,R3)
 276        isync
 277        mtmsrd  r7
 278        sync
 279        isync
 280        blr
 281
 282_GLOBAL(real_205_writeb)
 283        mfmsr   r7
 284        ori     r0,r7,MSR_DR
 285        xori    r0,r0,MSR_DR
 286        sync
 287        mtmsrd  r0
 288        sync
 289        isync
 290        STBCIX(R3,R0,R4)
 291        isync
 292        mtmsrd  r7
 293        sync
 294        isync
 295        blr
 296
 297#endif /* CONFIG_PPC_PASEMI */
 298
 299
 300#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
 301/*
 302 * SCOM access functions for 970 (FX only for now)
 303 *
 304 * unsigned long scom970_read(unsigned int address);
 305 * void scom970_write(unsigned int address, unsigned long value);
 306 *
 307 * The address passed in is the 24 bits register address. This code
 308 * is 970 specific and will not check the status bits, so you should
 309 * know what you are doing.
 310 */
 311_GLOBAL(scom970_read)
 312        /* interrupts off */
 313        mfmsr   r4
 314        ori     r0,r4,MSR_EE
 315        xori    r0,r0,MSR_EE
 316        mtmsrd  r0,1
 317
 318        /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
 319         * (including parity). On current CPUs they must be 0'd,
 320         * and finally or in RW bit
 321         */
 322        rlwinm  r3,r3,8,0,15
 323        ori     r3,r3,0x8000
 324
 325        /* do the actual scom read */
 326        sync
 327        mtspr   SPRN_SCOMC,r3
 328        isync
 329        mfspr   r3,SPRN_SCOMD
 330        isync
 331        mfspr   r0,SPRN_SCOMC
 332        isync
 333
 334        /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
 335         * that's the best we can do). Not implemented yet as we don't use
 336         * the scom on any of the bogus CPUs yet, but may have to be done
 337         * ultimately
 338         */
 339
 340        /* restore interrupts */
 341        mtmsrd  r4,1
 342        blr
 343
 344
 345_GLOBAL(scom970_write)
 346        /* interrupts off */
 347        mfmsr   r5
 348        ori     r0,r5,MSR_EE
 349        xori    r0,r0,MSR_EE
 350        mtmsrd  r0,1
 351
 352        /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
 353         * (including parity). On current CPUs they must be 0'd.
 354         */
 355
 356        rlwinm  r3,r3,8,0,15
 357
 358        sync
 359        mtspr   SPRN_SCOMD,r4      /* write data */
 360        isync
 361        mtspr   SPRN_SCOMC,r3      /* write command */
 362        isync
 363        mfspr   3,SPRN_SCOMC
 364        isync
 365
 366        /* restore interrupts */
 367        mtmsrd  r5,1
 368        blr
 369#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
 370
 371/* kexec_wait(phys_cpu)
 372 *
 373 * wait for the flag to change, indicating this kernel is going away but
 374 * the slave code for the next one is at addresses 0 to 100.
 375 *
 376 * This is used by all slaves, even those that did not find a matching
 377 * paca in the secondary startup code.
 378 *
 379 * Physical (hardware) cpu id should be in r3.
 380 */
 381_GLOBAL(kexec_wait)
 382        bl      1f
 3831:      mflr    r5
 384        addi    r5,r5,kexec_flag-1b
 385
 38699:     HMT_LOW
 387#ifdef CONFIG_KEXEC_CORE        /* use no memory without kexec */
 388        lwz     r4,0(r5)
 389        cmpwi   0,r4,0
 390        beq     99b
 391#ifdef CONFIG_PPC_BOOK3S_64
 392        li      r10,0x60
 393        mfmsr   r11
 394        clrrdi  r11,r11,1       /* Clear MSR_LE */
 395        mtsrr0  r10
 396        mtsrr1  r11
 397        rfid
 398#else
 399        /* Create TLB entry in book3e_secondary_core_init */
 400        li      r4,0
 401        ba      0x60
 402#endif
 403#endif
 404
 405/* this can be in text because we won't change it until we are
 406 * running in real anyways
 407 */
 408kexec_flag:
 409        .long   0
 410
 411
 412#ifdef CONFIG_KEXEC_CORE
 413#ifdef CONFIG_PPC_BOOK3E
 414/*
 415 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
 416 * for a core to identity map v:0 to p:0.  This current implementation
 417 * assumes that 1G is enough for kexec.
 418 */
 419kexec_create_tlb:
 420        /*
 421         * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
 422         * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
 423         */
 424        PPC_TLBILX_ALL(0,R0)
 425        sync
 426        isync
 427
 428        mfspr   r10,SPRN_TLB1CFG
 429        andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
 430        subi    r10,r10,1       /* Last entry: no conflict with kernel text */
 431        lis     r9,MAS0_TLBSEL(1)@h
 432        rlwimi  r9,r10,16,4,15          /* Setup MAS0 = TLBSEL | ESEL(r9) */
 433
 434/* Set up a temp identity mapping v:0 to p:0 and return to it. */
 435#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
 436#define M_IF_NEEDED     MAS2_M
 437#else
 438#define M_IF_NEEDED     0
 439#endif
 440        mtspr   SPRN_MAS0,r9
 441
 442        lis     r9,(MAS1_VALID|MAS1_IPROT)@h
 443        ori     r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 444        mtspr   SPRN_MAS1,r9
 445
 446        LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
 447        mtspr   SPRN_MAS2,r9
 448
 449        LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
 450        mtspr   SPRN_MAS3,r9
 451        li      r9,0
 452        mtspr   SPRN_MAS7,r9
 453
 454        tlbwe
 455        isync
 456        blr
 457#endif
 458
 459/* kexec_smp_wait(void)
 460 *
 461 * call with interrupts off
 462 * note: this is a terminal routine, it does not save lr
 463 *
 464 * get phys id from paca
 465 * switch to real mode
 466 * mark the paca as no longer used
 467 * join other cpus in kexec_wait(phys_id)
 468 */
 469_GLOBAL(kexec_smp_wait)
 470        lhz     r3,PACAHWCPUID(r13)
 471        bl      real_mode
 472
 473        li      r4,KEXEC_STATE_REAL_MODE
 474        stb     r4,PACAKEXECSTATE(r13)
 475        SYNC
 476
 477        b       kexec_wait
 478
 479/*
 480 * switch to real mode (turn mmu off)
 481 * we use the early kernel trick that the hardware ignores bits
 482 * 0 and 1 (big endian) of the effective address in real mode
 483 *
 484 * don't overwrite r3 here, it is live for kexec_wait above.
 485 */
 486real_mode:      /* assume normal blr return */
 487#ifdef CONFIG_PPC_BOOK3E
 488        /* Create an identity mapping. */
 489        b       kexec_create_tlb
 490#else
 4911:      li      r9,MSR_RI
 492        li      r10,MSR_DR|MSR_IR
 493        mflr    r11             /* return address to SRR0 */
 494        mfmsr   r12
 495        andc    r9,r12,r9
 496        andc    r10,r12,r10
 497
 498        mtmsrd  r9,1
 499        mtspr   SPRN_SRR1,r10
 500        mtspr   SPRN_SRR0,r11
 501        rfid
 502#endif
 503
 504/*
 505 * kexec_sequence(newstack, start, image, control, clear_all(),
 506                  copy_with_mmu_off)
 507 *
 508 * does the grungy work with stack switching and real mode switches
 509 * also does simple calls to other code
 510 */
 511
 512_GLOBAL(kexec_sequence)
 513        mflr    r0
 514        std     r0,16(r1)
 515
 516        /* switch stacks to newstack -- &kexec_stack.stack */
 517        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 518        mr      r1,r3
 519
 520        li      r0,0
 521        std     r0,16(r1)
 522
 523BEGIN_FTR_SECTION
 524        /*
 525         * This is the best time to turn AMR/IAMR off.
 526         * key 0 is used in radix for supervisor<->user
 527         * protection, but on hash key 0 is reserved
 528         * ideally we want to enter with a clean state.
 529         * NOTE, we rely on r0 being 0 from above.
 530         */
 531        mtspr   SPRN_IAMR,r0
 532BEGIN_FTR_SECTION_NESTED(42)
 533        mtspr   SPRN_AMOR,r0
 534END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 535END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 536
 537        /* save regs for local vars on new stack.
 538         * yes, we won't go back, but ...
 539         */
 540        std     r31,-8(r1)
 541        std     r30,-16(r1)
 542        std     r29,-24(r1)
 543        std     r28,-32(r1)
 544        std     r27,-40(r1)
 545        std     r26,-48(r1)
 546        std     r25,-56(r1)
 547
 548        stdu    r1,-STACK_FRAME_OVERHEAD-64(r1)
 549
 550        /* save args into preserved regs */
 551        mr      r31,r3                  /* newstack (both) */
 552        mr      r30,r4                  /* start (real) */
 553        mr      r29,r5                  /* image (virt) */
 554        mr      r28,r6                  /* control, unused */
 555        mr      r27,r7                  /* clear_all() fn desc */
 556        mr      r26,r8                  /* copy_with_mmu_off */
 557        lhz     r25,PACAHWCPUID(r13)    /* get our phys cpu from paca */
 558
 559        /* disable interrupts, we are overwriting kernel data next */
 560#ifdef CONFIG_PPC_BOOK3E
 561        wrteei  0
 562#else
 563        mfmsr   r3
 564        rlwinm  r3,r3,0,17,15
 565        mtmsrd  r3,1
 566#endif
 567
 568        /* We need to turn the MMU off unless we are in hash mode
 569         * under a hypervisor
 570         */
 571        cmpdi   r26,0
 572        beq     1f
 573        bl      real_mode
 5741:
 575        /* copy dest pages, flush whole dest image */
 576        mr      r3,r29
 577        bl      kexec_copy_flush        /* (image) */
 578
 579        /* turn off mmu now if not done earlier */
 580        cmpdi   r26,0
 581        bne     1f
 582        bl      real_mode
 583
 584        /* copy  0x100 bytes starting at start to 0 */
 5851:      li      r3,0
 586        mr      r4,r30          /* start, aka phys mem offset */
 587        li      r5,0x100
 588        li      r6,0
 589        bl      copy_and_flush  /* (dest, src, copy limit, start offset) */
 5901:      /* assume normal blr return */
 591
 592        /* release other cpus to the new kernel secondary start at 0x60 */
 593        mflr    r5
 594        li      r6,1
 595        stw     r6,kexec_flag-1b(5)
 596
 597        cmpdi   r27,0
 598        beq     1f
 599
 600        /* clear out hardware hash page table and tlb */
 601#ifdef PPC64_ELF_ABI_v1
 602        ld      r12,0(r27)              /* deref function descriptor */
 603#else
 604        mr      r12,r27
 605#endif
 606        mtctr   r12
 607        bctrl                           /* mmu_hash_ops.hpte_clear_all(void); */
 608
 609/*
 610 *   kexec image calling is:
 611 *      the first 0x100 bytes of the entry point are copied to 0
 612 *
 613 *      all slaves branch to slave = 0x60 (absolute)
 614 *              slave(phys_cpu_id);
 615 *
 616 *      master goes to start = entry point
 617 *              start(phys_cpu_id, start, 0);
 618 *
 619 *
 620 *   a wrapper is needed to call existing kernels, here is an approximate
 621 *   description of one method:
 622 *
 623 * v2: (2.6.10)
 624 *   start will be near the boot_block (maybe 0x100 bytes before it?)
 625 *   it will have a 0x60, which will b to boot_block, where it will wait
 626 *   and 0 will store phys into struct boot-block and load r3 from there,
 627 *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
 628 *
 629 * v1: (2.6.9)
 630 *    boot block will have all cpus scanning device tree to see if they
 631 *    are the boot cpu ?????
 632 *    other device tree differences (prop sizes, va vs pa, etc)...
 633 */
 6341:      mr      r3,r25  # my phys cpu
 635        mr      r4,r30  # start, aka phys mem offset
 636        mtlr    4
 637        li      r5,0
 638        blr     /* image->start(physid, image->start, 0); */
 639#endif /* CONFIG_KEXEC_CORE */
 640