linux/arch/frv/lib/atomic-ops.S
<<
>>
Prefs
   1/* atomic-ops.S: kernel atomic operations
   2 *
   3 * For an explanation of how atomic ops work in this arch, see:
   4 *   Documentation/frv/atomic-ops.txt
   5 *
   6 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   7 * Written by David Howells (dhowells@redhat.com)
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <asm/spr-regs.h>
  16
  17        .text
  18        .balign 4
  19
  20###############################################################################
  21#
  22# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
  23#
  24###############################################################################
  25        .globl          atomic_test_and_ANDNOT_mask
  26        .type           atomic_test_and_ANDNOT_mask,@function
  27atomic_test_and_ANDNOT_mask:
  28        not.p           gr8,gr10
  290:
  30        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
  31        ckeq            icc3,cc7
  32        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
  33        orcr            cc7,cc7,cc3                     /* set CC3 to true */
  34        and             gr8,gr10,gr11
  35        cst.p           gr11,@(gr9,gr0)         ,cc3,#1
  36        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
  37        beq             icc3,#0,0b
  38        bralr
  39
  40        .size           atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
  41
  42###############################################################################
  43#
  44# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
  45#
  46###############################################################################
  47        .globl          atomic_test_and_OR_mask
  48        .type           atomic_test_and_OR_mask,@function
  49atomic_test_and_OR_mask:
  50        or.p            gr8,gr8,gr10
  510:
  52        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
  53        ckeq            icc3,cc7
  54        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
  55        orcr            cc7,cc7,cc3                     /* set CC3 to true */
  56        or              gr8,gr10,gr11
  57        cst.p           gr11,@(gr9,gr0)         ,cc3,#1
  58        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
  59        beq             icc3,#0,0b
  60        bralr
  61
  62        .size           atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
  63
  64###############################################################################
  65#
  66# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
  67#
  68###############################################################################
  69        .globl          atomic_test_and_XOR_mask
  70        .type           atomic_test_and_XOR_mask,@function
  71atomic_test_and_XOR_mask:
  72        or.p            gr8,gr8,gr10
  730:
  74        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
  75        ckeq            icc3,cc7
  76        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
  77        orcr            cc7,cc7,cc3                     /* set CC3 to true */
  78        xor             gr8,gr10,gr11
  79        cst.p           gr11,@(gr9,gr0)         ,cc3,#1
  80        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
  81        beq             icc3,#0,0b
  82        bralr
  83
  84        .size           atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
  85
  86###############################################################################
  87#
  88# int atomic_add_return(int i, atomic_t *v)
  89#
  90###############################################################################
  91        .globl          atomic_add_return
  92        .type           atomic_add_return,@function
  93atomic_add_return:
  94        or.p            gr8,gr8,gr10
  950:
  96        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
  97        ckeq            icc3,cc7
  98        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
  99        orcr            cc7,cc7,cc3                     /* set CC3 to true */
 100        add             gr8,gr10,gr8
 101        cst.p           gr8,@(gr9,gr0)          ,cc3,#1
 102        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
 103        beq             icc3,#0,0b
 104        bralr
 105
 106        .size           atomic_add_return, .-atomic_add_return
 107
 108###############################################################################
 109#
 110# int atomic_sub_return(int i, atomic_t *v)
 111#
 112###############################################################################
 113        .globl          atomic_sub_return
 114        .type           atomic_sub_return,@function
 115atomic_sub_return:
 116        or.p            gr8,gr8,gr10
 1170:
 118        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
 119        ckeq            icc3,cc7
 120        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
 121        orcr            cc7,cc7,cc3                     /* set CC3 to true */
 122        sub             gr8,gr10,gr8
 123        cst.p           gr8,@(gr9,gr0)          ,cc3,#1
 124        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
 125        beq             icc3,#0,0b
 126        bralr
 127
 128        .size           atomic_sub_return, .-atomic_sub_return
 129
 130###############################################################################
 131#
 132# uint32_t __xchg_32(uint32_t i, uint32_t *v)
 133#
 134###############################################################################
 135        .globl          __xchg_32
 136        .type           __xchg_32,@function
 137__xchg_32:
 138        or.p            gr8,gr8,gr10
 1390:
 140        orcc            gr0,gr0,gr0,icc3                /* set ICC3.Z */
 141        ckeq            icc3,cc7
 142        ld.p            @(gr9,gr0),gr8                  /* LD.P/ORCR must be atomic */
 143        orcr            cc7,cc7,cc3                     /* set CC3 to true */
 144        cst.p           gr10,@(gr9,gr0)         ,cc3,#1
 145        corcc           gr29,gr29,gr0           ,cc3,#1 /* clear ICC3.Z if store happens */
 146        beq             icc3,#0,0b
 147        bralr
 148
 149        .size           __xchg_32, .-__xchg_32
 150
 151###############################################################################
 152#
 153# uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new)
 154#
 155###############################################################################
 156        .globl          __cmpxchg_32
 157        .type           __cmpxchg_32,@function
 158__cmpxchg_32:
 159        or.p            gr8,gr8,gr11
 1600:
 161        orcc            gr0,gr0,gr0,icc3
 162        ckeq            icc3,cc7
 163        ld.p            @(gr11,gr0),gr8
 164        orcr            cc7,cc7,cc3
 165        subcc           gr8,gr9,gr7,icc0
 166        bnelr           icc0,#0
 167        cst.p           gr10,@(gr11,gr0)        ,cc3,#1
 168        corcc           gr29,gr29,gr0           ,cc3,#1
 169        beq             icc3,#0,0b
 170        bralr
 171
 172        .size           __cmpxchg_32, .-__cmpxchg_32
 173