linux/arch/powerpc/kernel/misc_64.S
<<
>>
Prefs
   1/*
   2 * This file contains miscellaneous low-level functions.
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
   6 * and Paul Mackerras.
   7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
   8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version
  13 * 2 of the License, or (at your option) any later version.
  14 *
  15 */
  16
  17#include <linux/sys.h>
  18#include <asm/unistd.h>
  19#include <asm/errno.h>
  20#include <asm/processor.h>
  21#include <asm/page.h>
  22#include <asm/cache.h>
  23#include <asm/ppc_asm.h>
  24#include <asm/asm-offsets.h>
  25#include <asm/cputable.h>
  26#include <asm/thread_info.h>
  27#include <asm/kexec.h>
  28#include <asm/ptrace.h>
  29#include <asm/mmu.h>
  30#include <asm/export.h>
  31
  32        .text
  33
  34_GLOBAL(call_do_softirq)
  35        mflr    r0
  36        std     r0,16(r1)
  37        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  38        mr      r1,r3
  39        bl      __do_softirq
  40        ld      r1,0(r1)
  41        ld      r0,16(r1)
  42        mtlr    r0
  43        blr
  44
  45_GLOBAL(call_do_irq)
  46        mflr    r0
  47        std     r0,16(r1)
  48        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
  49        mr      r1,r4
  50        bl      __do_irq
  51        ld      r1,0(r1)
  52        ld      r0,16(r1)
  53        mtlr    r0
  54        blr
  55
  56        .section        ".toc","aw"
  57PPC64_CACHES:
  58        .tc             ppc64_caches[TC],ppc64_caches
  59        .section        ".text"
  60
  61/*
  62 * Write any modified data cache blocks out to memory
  63 * and invalidate the corresponding instruction cache blocks.
  64 *
  65 * flush_icache_range(unsigned long start, unsigned long stop)
  66 *
  67 *   flush all bytes from start through stop-1 inclusive
  68 */
  69
  70_GLOBAL_TOC(flush_icache_range)
  71BEGIN_FTR_SECTION
  72        PURGE_PREFETCHED_INS
  73        blr
  74END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  75/*
  76 * Flush the data cache to memory 
  77 * 
  78 * Different systems have different cache line sizes
  79 * and in some cases i-cache and d-cache line sizes differ from
  80 * each other.
  81 */
  82        ld      r10,PPC64_CACHES@toc(r2)
  83        lwz     r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
  84        addi    r5,r7,-1
  85        andc    r6,r3,r5                /* round low to line bdy */
  86        subf    r8,r6,r4                /* compute length */
  87        add     r8,r8,r5                /* ensure we get enough */
  88        lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of cache block size */
  89        srw.    r8,r8,r9                /* compute line count */
  90        beqlr                           /* nothing to do? */
  91        mtctr   r8
  921:      dcbst   0,r6
  93        add     r6,r6,r7
  94        bdnz    1b
  95        sync
  96
  97/* Now invalidate the instruction cache */
  98        
  99        lwz     r7,ICACHEL1BLOCKSIZE(r10)       /* Get Icache block size */
 100        addi    r5,r7,-1
 101        andc    r6,r3,r5                /* round low to line bdy */
 102        subf    r8,r6,r4                /* compute length */
 103        add     r8,r8,r5
 104        lwz     r9,ICACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of Icache block size */
 105        srw.    r8,r8,r9                /* compute line count */
 106        beqlr                           /* nothing to do? */
 107        mtctr   r8
 1082:      icbi    0,r6
 109        add     r6,r6,r7
 110        bdnz    2b
 111        isync
 112        blr
 113_ASM_NOKPROBE_SYMBOL(flush_icache_range)
 114EXPORT_SYMBOL(flush_icache_range)
 115
 116/*
 117 * Like above, but only do the D-cache.
 118 *
 119 * flush_dcache_range(unsigned long start, unsigned long stop)
 120 *
 121 *    flush all bytes from start to stop-1 inclusive
 122 */
 123_GLOBAL_TOC(flush_dcache_range)
 124        ld      r10,PPC64_CACHES@toc(r2)
 125        lwz     r7,DCACHEL1BLOCKSIZE(r10)       /* Get dcache block size */
 126        addi    r5,r7,-1
 127        andc    r6,r3,r5                /* round low to line bdy */
 128        subf    r8,r6,r4                /* compute length */
 129        add     r8,r8,r5                /* ensure we get enough */
 130        lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
 131        srd.    r8,r8,r9                /* compute line count */
 132        beqlr                           /* nothing to do? */
 133        sync
 134        isync
 135        mtctr   r8
 1360:      dcbf    0,r6
 137        add     r6,r6,r7
 138        bdnz    0b
 139        sync
 140        isync
 141        blr
 142EXPORT_SYMBOL(flush_dcache_range)
 143
 144/*
 145 * Flush a particular page from the data cache to RAM.
 146 * Note: this is necessary because the instruction cache does *not*
 147 * snoop from the data cache.
 148 *
 149 *      void __flush_dcache_icache(void *page)
 150 */
 151_GLOBAL(__flush_dcache_icache)
 152/*
 153 * Flush the data cache to memory 
 154 * 
 155 * Different systems have different cache line sizes
 156 */
 157
 158BEGIN_FTR_SECTION
 159        PURGE_PREFETCHED_INS
 160        blr
 161END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 162
 163/* Flush the dcache */
 164        ld      r7,PPC64_CACHES@toc(r2)
 165        clrrdi  r3,r3,PAGE_SHIFT                    /* Page align */
 166        lwz     r4,DCACHEL1BLOCKSPERPAGE(r7)    /* Get # dcache blocks per page */
 167        lwz     r5,DCACHEL1BLOCKSIZE(r7)        /* Get dcache block size */
 168        mr      r6,r3
 169        mtctr   r4
 1700:      dcbst   0,r6
 171        add     r6,r6,r5
 172        bdnz    0b
 173        sync
 174
 175/* Now invalidate the icache */ 
 176
 177        lwz     r4,ICACHEL1BLOCKSPERPAGE(r7)    /* Get # icache blocks per page */
 178        lwz     r5,ICACHEL1BLOCKSIZE(r7)        /* Get icache block size */
 179        mtctr   r4
 1801:      icbi    0,r3
 181        add     r3,r3,r5
 182        bdnz    1b
 183        isync
 184        blr
 185
 186_GLOBAL(__bswapdi2)
 187EXPORT_SYMBOL(__bswapdi2)
 188        srdi    r8,r3,32
 189        rlwinm  r7,r3,8,0xffffffff
 190        rlwimi  r7,r3,24,0,7
 191        rlwinm  r9,r8,8,0xffffffff
 192        rlwimi  r7,r3,24,16,23
 193        rlwimi  r9,r8,24,0,7
 194        rlwimi  r9,r8,24,16,23
 195        sldi    r7,r7,32
 196        or      r3,r7,r9
 197        blr
 198
 199
 200#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
 201_GLOBAL(rmci_on)
 202        sync
 203        isync
 204        li      r3,0x100
 205        rldicl  r3,r3,32,0
 206        mfspr   r5,SPRN_HID4
 207        or      r5,r5,r3
 208        sync
 209        mtspr   SPRN_HID4,r5
 210        isync
 211        slbia
 212        isync
 213        sync
 214        blr
 215
 216_GLOBAL(rmci_off)
 217        sync
 218        isync
 219        li      r3,0x100
 220        rldicl  r3,r3,32,0
 221        mfspr   r5,SPRN_HID4
 222        andc    r5,r5,r3
 223        sync
 224        mtspr   SPRN_HID4,r5
 225        isync
 226        slbia
 227        isync
 228        sync
 229        blr
 230#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
 231
 232#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
 233
 234/*
 235 * Do an IO access in real mode
 236 */
 237_GLOBAL(real_readb)
 238        mfmsr   r7
 239        ori     r0,r7,MSR_DR
 240        xori    r0,r0,MSR_DR
 241        sync
 242        mtmsrd  r0
 243        sync
 244        isync
 245        mfspr   r6,SPRN_HID4
 246        rldicl  r5,r6,32,0
 247        ori     r5,r5,0x100
 248        rldicl  r5,r5,32,0
 249        sync
 250        mtspr   SPRN_HID4,r5
 251        isync
 252        slbia
 253        isync
 254        lbz     r3,0(r3)
 255        sync
 256        mtspr   SPRN_HID4,r6
 257        isync
 258        slbia
 259        isync
 260        mtmsrd  r7
 261        sync
 262        isync
 263        blr
 264
 265        /*
 266 * Do an IO access in real mode
 267 */
 268_GLOBAL(real_writeb)
 269        mfmsr   r7
 270        ori     r0,r7,MSR_DR
 271        xori    r0,r0,MSR_DR
 272        sync
 273        mtmsrd  r0
 274        sync
 275        isync
 276        mfspr   r6,SPRN_HID4
 277        rldicl  r5,r6,32,0
 278        ori     r5,r5,0x100
 279        rldicl  r5,r5,32,0
 280        sync
 281        mtspr   SPRN_HID4,r5
 282        isync
 283        slbia
 284        isync
 285        stb     r3,0(r4)
 286        sync
 287        mtspr   SPRN_HID4,r6
 288        isync
 289        slbia
 290        isync
 291        mtmsrd  r7
 292        sync
 293        isync
 294        blr
 295#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
 296
 297#ifdef CONFIG_PPC_PASEMI
 298
 299_GLOBAL(real_205_readb)
 300        mfmsr   r7
 301        ori     r0,r7,MSR_DR
 302        xori    r0,r0,MSR_DR
 303        sync
 304        mtmsrd  r0
 305        sync
 306        isync
 307        LBZCIX(R3,R0,R3)
 308        isync
 309        mtmsrd  r7
 310        sync
 311        isync
 312        blr
 313
 314_GLOBAL(real_205_writeb)
 315        mfmsr   r7
 316        ori     r0,r7,MSR_DR
 317        xori    r0,r0,MSR_DR
 318        sync
 319        mtmsrd  r0
 320        sync
 321        isync
 322        STBCIX(R3,R0,R4)
 323        isync
 324        mtmsrd  r7
 325        sync
 326        isync
 327        blr
 328
 329#endif /* CONFIG_PPC_PASEMI */
 330
 331
 332#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
 333/*
 334 * SCOM access functions for 970 (FX only for now)
 335 *
 336 * unsigned long scom970_read(unsigned int address);
 337 * void scom970_write(unsigned int address, unsigned long value);
 338 *
 339 * The address passed in is the 24 bits register address. This code
 340 * is 970 specific and will not check the status bits, so you should
 341 * know what you are doing.
 342 */
 343_GLOBAL(scom970_read)
 344        /* interrupts off */
 345        mfmsr   r4
 346        ori     r0,r4,MSR_EE
 347        xori    r0,r0,MSR_EE
 348        mtmsrd  r0,1
 349
 350        /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
 351         * (including parity). On current CPUs they must be 0'd,
 352         * and finally or in RW bit
 353         */
 354        rlwinm  r3,r3,8,0,15
 355        ori     r3,r3,0x8000
 356
 357        /* do the actual scom read */
 358        sync
 359        mtspr   SPRN_SCOMC,r3
 360        isync
 361        mfspr   r3,SPRN_SCOMD
 362        isync
 363        mfspr   r0,SPRN_SCOMC
 364        isync
 365
 366        /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
 367         * that's the best we can do). Not implemented yet as we don't use
 368         * the scom on any of the bogus CPUs yet, but may have to be done
 369         * ultimately
 370         */
 371
 372        /* restore interrupts */
 373        mtmsrd  r4,1
 374        blr
 375
 376
 377_GLOBAL(scom970_write)
 378        /* interrupts off */
 379        mfmsr   r5
 380        ori     r0,r5,MSR_EE
 381        xori    r0,r0,MSR_EE
 382        mtmsrd  r0,1
 383
 384        /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
 385         * (including parity). On current CPUs they must be 0'd.
 386         */
 387
 388        rlwinm  r3,r3,8,0,15
 389
 390        sync
 391        mtspr   SPRN_SCOMD,r4      /* write data */
 392        isync
 393        mtspr   SPRN_SCOMC,r3      /* write command */
 394        isync
 395        mfspr   3,SPRN_SCOMC
 396        isync
 397
 398        /* restore interrupts */
 399        mtmsrd  r5,1
 400        blr
 401#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
 402
 403/* kexec_wait(phys_cpu)
 404 *
 405 * wait for the flag to change, indicating this kernel is going away but
 406 * the slave code for the next one is at addresses 0 to 100.
 407 *
 408 * This is used by all slaves, even those that did not find a matching
 409 * paca in the secondary startup code.
 410 *
 411 * Physical (hardware) cpu id should be in r3.
 412 */
 413_GLOBAL(kexec_wait)
 414        bl      1f
 4151:      mflr    r5
 416        addi    r5,r5,kexec_flag-1b
 417
 41899:     HMT_LOW
 419#ifdef CONFIG_KEXEC_CORE        /* use no memory without kexec */
 420        lwz     r4,0(r5)
 421        cmpwi   0,r4,0
 422        beq     99b
 423#ifdef CONFIG_PPC_BOOK3S_64
 424        li      r10,0x60
 425        mfmsr   r11
 426        clrrdi  r11,r11,1       /* Clear MSR_LE */
 427        mtsrr0  r10
 428        mtsrr1  r11
 429        rfid
 430#else
 431        /* Create TLB entry in book3e_secondary_core_init */
 432        li      r4,0
 433        ba      0x60
 434#endif
 435#endif
 436
 437/* this can be in text because we won't change it until we are
 438 * running in real anyways
 439 */
 440kexec_flag:
 441        .long   0
 442
 443
 444#ifdef CONFIG_KEXEC_CORE
 445#ifdef CONFIG_PPC_BOOK3E
 446/*
 447 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
 448 * for a core to identity map v:0 to p:0.  This current implementation
 449 * assumes that 1G is enough for kexec.
 450 */
 451kexec_create_tlb:
 452        /*
 453         * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
 454         * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
 455         */
 456        PPC_TLBILX_ALL(0,R0)
 457        sync
 458        isync
 459
 460        mfspr   r10,SPRN_TLB1CFG
 461        andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
 462        subi    r10,r10,1       /* Last entry: no conflict with kernel text */
 463        lis     r9,MAS0_TLBSEL(1)@h
 464        rlwimi  r9,r10,16,4,15          /* Setup MAS0 = TLBSEL | ESEL(r9) */
 465
 466/* Set up a temp identity mapping v:0 to p:0 and return to it. */
 467#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
 468#define M_IF_NEEDED     MAS2_M
 469#else
 470#define M_IF_NEEDED     0
 471#endif
 472        mtspr   SPRN_MAS0,r9
 473
 474        lis     r9,(MAS1_VALID|MAS1_IPROT)@h
 475        ori     r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 476        mtspr   SPRN_MAS1,r9
 477
 478        LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
 479        mtspr   SPRN_MAS2,r9
 480
 481        LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
 482        mtspr   SPRN_MAS3,r9
 483        li      r9,0
 484        mtspr   SPRN_MAS7,r9
 485
 486        tlbwe
 487        isync
 488        blr
 489#endif
 490
 491/* kexec_smp_wait(void)
 492 *
 493 * call with interrupts off
 494 * note: this is a terminal routine, it does not save lr
 495 *
 496 * get phys id from paca
 497 * switch to real mode
 498 * mark the paca as no longer used
 499 * join other cpus in kexec_wait(phys_id)
 500 */
 501_GLOBAL(kexec_smp_wait)
 502        lhz     r3,PACAHWCPUID(r13)
 503        bl      real_mode
 504
 505        li      r4,KEXEC_STATE_REAL_MODE
 506        stb     r4,PACAKEXECSTATE(r13)
 507        SYNC
 508
 509        b       kexec_wait
 510
 511/*
 512 * switch to real mode (turn mmu off)
 513 * we use the early kernel trick that the hardware ignores bits
 514 * 0 and 1 (big endian) of the effective address in real mode
 515 *
 516 * don't overwrite r3 here, it is live for kexec_wait above.
 517 */
 518real_mode:      /* assume normal blr return */
 519#ifdef CONFIG_PPC_BOOK3E
 520        /* Create an identity mapping. */
 521        b       kexec_create_tlb
 522#else
 5231:      li      r9,MSR_RI
 524        li      r10,MSR_DR|MSR_IR
 525        mflr    r11             /* return address to SRR0 */
 526        mfmsr   r12
 527        andc    r9,r12,r9
 528        andc    r10,r12,r10
 529
 530        mtmsrd  r9,1
 531        mtspr   SPRN_SRR1,r10
 532        mtspr   SPRN_SRR0,r11
 533        rfid
 534#endif
 535
 536/*
 537 * kexec_sequence(newstack, start, image, control, clear_all(),
 538                  copy_with_mmu_off)
 539 *
 540 * does the grungy work with stack switching and real mode switches
 541 * also does simple calls to other code
 542 */
 543
 544_GLOBAL(kexec_sequence)
 545        mflr    r0
 546        std     r0,16(r1)
 547
 548        /* switch stacks to newstack -- &kexec_stack.stack */
 549        stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 550        mr      r1,r3
 551
 552        li      r0,0
 553        std     r0,16(r1)
 554
 555BEGIN_FTR_SECTION
 556        /*
 557         * This is the best time to turn AMR/IAMR off.
 558         * key 0 is used in radix for supervisor<->user
 559         * protection, but on hash key 0 is reserved
 560         * ideally we want to enter with a clean state.
 561         * NOTE, we rely on r0 being 0 from above.
 562         */
 563        mtspr   SPRN_IAMR,r0
 564BEGIN_FTR_SECTION_NESTED(42)
 565        mtspr   SPRN_AMOR,r0
 566END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 567END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 568
 569        /* save regs for local vars on new stack.
 570         * yes, we won't go back, but ...
 571         */
 572        std     r31,-8(r1)
 573        std     r30,-16(r1)
 574        std     r29,-24(r1)
 575        std     r28,-32(r1)
 576        std     r27,-40(r1)
 577        std     r26,-48(r1)
 578        std     r25,-56(r1)
 579
 580        stdu    r1,-STACK_FRAME_OVERHEAD-64(r1)
 581
 582        /* save args into preserved regs */
 583        mr      r31,r3                  /* newstack (both) */
 584        mr      r30,r4                  /* start (real) */
 585        mr      r29,r5                  /* image (virt) */
 586        mr      r28,r6                  /* control, unused */
 587        mr      r27,r7                  /* clear_all() fn desc */
 588        mr      r26,r8                  /* copy_with_mmu_off */
 589        lhz     r25,PACAHWCPUID(r13)    /* get our phys cpu from paca */
 590
 591        /* disable interrupts, we are overwriting kernel data next */
 592#ifdef CONFIG_PPC_BOOK3E
 593        wrteei  0
 594#else
 595        mfmsr   r3
 596        rlwinm  r3,r3,0,17,15
 597        mtmsrd  r3,1
 598#endif
 599
 600        /* We need to turn the MMU off unless we are in hash mode
 601         * under a hypervisor
 602         */
 603        cmpdi   r26,0
 604        beq     1f
 605        bl      real_mode
 6061:
 607        /* copy dest pages, flush whole dest image */
 608        mr      r3,r29
 609        bl      kexec_copy_flush        /* (image) */
 610
 611        /* turn off mmu now if not done earlier */
 612        cmpdi   r26,0
 613        bne     1f
 614        bl      real_mode
 615
 616        /* copy  0x100 bytes starting at start to 0 */
 6171:      li      r3,0
 618        mr      r4,r30          /* start, aka phys mem offset */
 619        li      r5,0x100
 620        li      r6,0
 621        bl      copy_and_flush  /* (dest, src, copy limit, start offset) */
 6221:      /* assume normal blr return */
 623
 624        /* release other cpus to the new kernel secondary start at 0x60 */
 625        mflr    r5
 626        li      r6,1
 627        stw     r6,kexec_flag-1b(5)
 628
 629        cmpdi   r27,0
 630        beq     1f
 631
 632        /* clear out hardware hash page table and tlb */
 633#ifdef PPC64_ELF_ABI_v1
 634        ld      r12,0(r27)              /* deref function descriptor */
 635#else
 636        mr      r12,r27
 637#endif
 638        mtctr   r12
 639        bctrl                           /* mmu_hash_ops.hpte_clear_all(void); */
 640
 641/*
 642 *   kexec image calling is:
 643 *      the first 0x100 bytes of the entry point are copied to 0
 644 *
 645 *      all slaves branch to slave = 0x60 (absolute)
 646 *              slave(phys_cpu_id);
 647 *
 648 *      master goes to start = entry point
 649 *              start(phys_cpu_id, start, 0);
 650 *
 651 *
 652 *   a wrapper is needed to call existing kernels, here is an approximate
 653 *   description of one method:
 654 *
 655 * v2: (2.6.10)
 656 *   start will be near the boot_block (maybe 0x100 bytes before it?)
 657 *   it will have a 0x60, which will b to boot_block, where it will wait
 658 *   and 0 will store phys into struct boot-block and load r3 from there,
 659 *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
 660 *
 661 * v1: (2.6.9)
 662 *    boot block will have all cpus scanning device tree to see if they
 663 *    are the boot cpu ?????
 664 *    other device tree differences (prop sizes, va vs pa, etc)...
 665 */
 6661:      mr      r3,r25  # my phys cpu
 667        mr      r4,r30  # start, aka phys mem offset
 668        mtlr    4
 669        li      r5,0
 670        blr     /* image->start(physid, image->start, 0); */
 671#endif /* CONFIG_KEXEC_CORE */
 672