uboot/arch/arm/lib/memset.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/lib/memset.S
   3 *
   4 *  Copyright (C) 1995-2000 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  ASM optimised string functions
  11 */
  12#include <linux/linkage.h>
  13#include <asm/assembler.h>
  14
  15        .text
  16        .align  5
  17
  18        .syntax unified
  19#if defined(CONFIG_SYS_THUMB_BUILD) && !defined(MEMSET_NO_THUMB_BUILD)
  20        .thumb
  21        .thumb_func
  22#endif
  23ENTRY(memset)
  24        ands    r3, r0, #3              @ 1 unaligned?
  25        mov     ip, r0                  @ preserve r0 as return value
  26        bne     6f                      @ 1
  27/*
  28 * we know that the pointer in ip is aligned to a word boundary.
  29 */
  301:      orr     r1, r1, r1, lsl #8
  31        orr     r1, r1, r1, lsl #16
  32        mov     r3, r1
  33        cmp     r2, #16
  34        blt     4f
  35
  36#if ! CALGN(1)+0
  37
  38/*
  39 * We need 2 extra registers for this loop - use r8 and the LR
  40 */
  41        stmfd   sp!, {r8, lr}
  42        mov     r8, r1
  43        mov     lr, r1
  44
  452:      subs    r2, r2, #64
  46        stmiage ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
  47        stmiage ip!, {r1, r3, r8, lr}
  48        stmiage ip!, {r1, r3, r8, lr}
  49        stmiage ip!, {r1, r3, r8, lr}
  50        bgt     2b
  51        ldmfdeq sp!, {r8, pc}           @ Now <64 bytes to go.
  52/*
  53 * No need to correct the count; we're only testing bits from now on
  54 */
  55        tst     r2, #32
  56        stmiane ip!, {r1, r3, r8, lr}
  57        stmiane ip!, {r1, r3, r8, lr}
  58        tst     r2, #16
  59        stmiane ip!, {r1, r3, r8, lr}
  60        ldmfd   sp!, {r8, lr}
  61
  62#else
  63
  64/*
  65 * This version aligns the destination pointer in order to write
  66 * whole cache lines at once.
  67 */
  68
  69        stmfd   sp!, {r4-r8, lr}
  70        mov     r4, r1
  71        mov     r5, r1
  72        mov     r6, r1
  73        mov     r7, r1
  74        mov     r8, r1
  75        mov     lr, r1
  76
  77        cmp     r2, #96
  78        tstgt   ip, #31
  79        ble     3f
  80
  81        and     r8, ip, #31
  82        rsb     r8, r8, #32
  83        sub     r2, r2, r8
  84        movs    r8, r8, lsl #(32 - 4)
  85        stmiacs ip!, {r4, r5, r6, r7}
  86        stmiami ip!, {r4, r5}
  87        tst     r8, #(1 << 30)
  88        mov     r8, r1
  89        strne   r1, [ip], #4
  90
  913:      subs    r2, r2, #64
  92        stmiage ip!, {r1, r3-r8, lr}
  93        stmiage ip!, {r1, r3-r8, lr}
  94        bgt     3b
  95        ldmfdeq sp!, {r4-r8, pc}
  96
  97        tst     r2, #32
  98        stmiane ip!, {r1, r3-r8, lr}
  99        tst     r2, #16
 100        stmiane ip!, {r4-r7}
 101        ldmfd   sp!, {r4-r8, lr}
 102
 103#endif
 104
 1054:      tst     r2, #8
 106        stmiane ip!, {r1, r3}
 107        tst     r2, #4
 108        strne   r1, [ip], #4
 109/*
 110 * When we get here, we've got less than 4 bytes to zero.  We
 111 * may have an unaligned pointer as well.
 112 */
 1135:      tst     r2, #2
 114        strbne  r1, [ip], #1
 115        strbne  r1, [ip], #1
 116        tst     r2, #1
 117        strbne  r1, [ip], #1
 118        ret     lr
 119
 1206:      subs    r2, r2, #4              @ 1 do we have enough
 121        blt     5b                      @ 1 bytes to align with?
 122        cmp     r3, #2                  @ 1
 123        strblt  r1, [ip], #1            @ 1
 124        strble  r1, [ip], #1            @ 1
 125        strb    r1, [ip], #1            @ 1
 126        add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
 127        b       1b
 128ENDPROC(memset)
 129