linux/arch/metag/lib/usercopy.c
<<
>>
Prefs
   1/*
   2 * User address space access functions.
   3 * The non-inlined parts of asm-metag/uaccess.h are here.
   4 *
   5 * Copyright (C) 2006, Imagination Technologies.
   6 * Copyright (C) 2000, Axis Communications AB.
   7 *
   8 * Written by Hans-Peter Nilsson.
   9 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
  10 * Modified for Meta by Will Newton.
  11 */
  12
  13#include <linux/export.h>
  14#include <linux/uaccess.h>
  15#include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
  16
  17#define USE_RAPF
  18#define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
  19
  20
  21/* The "double write" in this code is because the Meta will not fault
  22 * immediately unless the memory pipe is forced to by e.g. a data stall or
  23 * another memory op. The second write should be discarded by the write
  24 * combiner so should have virtually no cost.
  25 */
  26
  27#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  28        asm volatile (                                           \
  29                COPY                                             \
  30                "1:\n"                                           \
  31                "       .section .fixup,\"ax\"\n"                \
  32                "       MOV D1Ar1,#0\n"                          \
  33                FIXUP                                            \
  34                "       MOVT    D1Ar1,#HI(1b)\n"                 \
  35                "       JUMP    D1Ar1,#LO(1b)\n"                 \
  36                "       .previous\n"                             \
  37                "       .section __ex_table,\"a\"\n"             \
  38                TENTRY                                           \
  39                "       .previous\n"                             \
  40                : "=r" (to), "=r" (from), "=r" (ret)             \
  41                : "0" (to), "1" (from), "2" (ret)                \
  42                : "D1Ar1", "memory")
  43
  44
  45#define __asm_copy_to_user_1(to, from, ret)     \
  46        __asm_copy_user_cont(to, from, ret,     \
  47                "       GETB D1Ar1,[%1++]\n"    \
  48                "       SETB [%0],D1Ar1\n"      \
  49                "2:     SETB [%0++],D1Ar1\n",   \
  50                "3:     ADD  %2,%2,#1\n",       \
  51                "       .long 2b,3b\n")
  52
  53#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  54        __asm_copy_user_cont(to, from, ret,             \
  55                "       GETW D1Ar1,[%1++]\n"            \
  56                "       SETW [%0],D1Ar1\n"              \
  57                "2:     SETW [%0++],D1Ar1\n" COPY,      \
  58                "3:     ADD  %2,%2,#2\n" FIXUP,         \
  59                "       .long 2b,3b\n" TENTRY)
  60
  61#define __asm_copy_to_user_2(to, from, ret) \
  62        __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
  63
  64#define __asm_copy_to_user_3(to, from, ret) \
  65        __asm_copy_to_user_2x_cont(to, from, ret,       \
  66                "       GETB D1Ar1,[%1++]\n"            \
  67                "       SETB [%0],D1Ar1\n"              \
  68                "4:     SETB [%0++],D1Ar1\n",           \
  69                "5:     ADD  %2,%2,#1\n",               \
  70                "       .long 4b,5b\n")
  71
  72#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  73        __asm_copy_user_cont(to, from, ret,             \
  74                "       GETD D1Ar1,[%1++]\n"            \
  75                "       SETD [%0],D1Ar1\n"              \
  76                "2:     SETD [%0++],D1Ar1\n" COPY,      \
  77                "3:     ADD  %2,%2,#4\n" FIXUP,         \
  78                "       .long 2b,3b\n" TENTRY)
  79
  80#define __asm_copy_to_user_4(to, from, ret) \
  81        __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
  82
  83#define __asm_copy_to_user_5(to, from, ret) \
  84        __asm_copy_to_user_4x_cont(to, from, ret,       \
  85                "       GETB D1Ar1,[%1++]\n"            \
  86                "       SETB [%0],D1Ar1\n"              \
  87                "4:     SETB [%0++],D1Ar1\n",           \
  88                "5:     ADD  %2,%2,#1\n",               \
  89                "       .long 4b,5b\n")
  90
  91#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  92        __asm_copy_to_user_4x_cont(to, from, ret,       \
  93                "       GETW D1Ar1,[%1++]\n"            \
  94                "       SETW [%0],D1Ar1\n"              \
  95                "4:     SETW [%0++],D1Ar1\n" COPY,      \
  96                "5:     ADD  %2,%2,#2\n" FIXUP,         \
  97                "       .long 4b,5b\n" TENTRY)
  98
  99#define __asm_copy_to_user_6(to, from, ret) \
 100        __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
 101
 102#define __asm_copy_to_user_7(to, from, ret) \
 103        __asm_copy_to_user_6x_cont(to, from, ret,       \
 104                "       GETB D1Ar1,[%1++]\n"            \
 105                "       SETB [%0],D1Ar1\n"              \
 106                "6:     SETB [%0++],D1Ar1\n",           \
 107                "7:     ADD  %2,%2,#1\n",               \
 108                "       .long 6b,7b\n")
 109
 110#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 111        __asm_copy_to_user_4x_cont(to, from, ret,       \
 112                "       GETD D1Ar1,[%1++]\n"            \
 113                "       SETD [%0],D1Ar1\n"              \
 114                "4:     SETD [%0++],D1Ar1\n" COPY,      \
 115                "5:     ADD  %2,%2,#4\n"  FIXUP,        \
 116                "       .long 4b,5b\n" TENTRY)
 117
 118#define __asm_copy_to_user_8(to, from, ret) \
 119        __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
 120
 121#define __asm_copy_to_user_9(to, from, ret) \
 122        __asm_copy_to_user_8x_cont(to, from, ret,       \
 123                "       GETB D1Ar1,[%1++]\n"            \
 124                "       SETB [%0],D1Ar1\n"              \
 125                "6:     SETB [%0++],D1Ar1\n",           \
 126                "7:     ADD  %2,%2,#1\n",               \
 127                "       .long 6b,7b\n")
 128
 129#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 130        __asm_copy_to_user_8x_cont(to, from, ret,       \
 131                "       GETW D1Ar1,[%1++]\n"            \
 132                "       SETW [%0],D1Ar1\n"              \
 133                "6:     SETW [%0++],D1Ar1\n" COPY,      \
 134                "7:     ADD  %2,%2,#2\n" FIXUP,         \
 135                "       .long 6b,7b\n" TENTRY)
 136
 137#define __asm_copy_to_user_10(to, from, ret) \
 138        __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
 139
 140#define __asm_copy_to_user_11(to, from, ret) \
 141        __asm_copy_to_user_10x_cont(to, from, ret,      \
 142                "       GETB D1Ar1,[%1++]\n"            \
 143                "       SETB [%0],D1Ar1\n"              \
 144                "8:     SETB [%0++],D1Ar1\n",           \
 145                "9:     ADD  %2,%2,#1\n",               \
 146                "       .long 8b,9b\n")
 147
 148#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 149        __asm_copy_to_user_8x_cont(to, from, ret,       \
 150                "       GETD D1Ar1,[%1++]\n"            \
 151                "       SETD [%0],D1Ar1\n"              \
 152                "6:     SETD [%0++],D1Ar1\n" COPY,      \
 153                "7:     ADD  %2,%2,#4\n" FIXUP,         \
 154                "       .long 6b,7b\n" TENTRY)
 155#define __asm_copy_to_user_12(to, from, ret) \
 156        __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
 157
 158#define __asm_copy_to_user_13(to, from, ret) \
 159        __asm_copy_to_user_12x_cont(to, from, ret,      \
 160                "       GETB D1Ar1,[%1++]\n"            \
 161                "       SETB [%0],D1Ar1\n"              \
 162                "8:     SETB [%0++],D1Ar1\n",           \
 163                "9:     ADD  %2,%2,#1\n",               \
 164                "       .long 8b,9b\n")
 165
 166#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 167        __asm_copy_to_user_12x_cont(to, from, ret,      \
 168                "       GETW D1Ar1,[%1++]\n"            \
 169                "       SETW [%0],D1Ar1\n"              \
 170                "8:     SETW [%0++],D1Ar1\n" COPY,      \
 171                "9:     ADD  %2,%2,#2\n" FIXUP,         \
 172                "       .long 8b,9b\n" TENTRY)
 173
 174#define __asm_copy_to_user_14(to, from, ret) \
 175        __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
 176
 177#define __asm_copy_to_user_15(to, from, ret) \
 178        __asm_copy_to_user_14x_cont(to, from, ret,      \
 179                "       GETB D1Ar1,[%1++]\n"            \
 180                "       SETB [%0],D1Ar1\n"              \
 181                "10:    SETB [%0++],D1Ar1\n",           \
 182                "11:    ADD  %2,%2,#1\n",               \
 183                "       .long 10b,11b\n")
 184
 185#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 186        __asm_copy_to_user_12x_cont(to, from, ret,      \
 187                "       GETD D1Ar1,[%1++]\n"            \
 188                "       SETD [%0],D1Ar1\n"              \
 189                "8:     SETD [%0++],D1Ar1\n" COPY,      \
 190                "9:     ADD  %2,%2,#4\n" FIXUP,         \
 191                "       .long 8b,9b\n" TENTRY)
 192
 193#define __asm_copy_to_user_16(to, from, ret) \
 194                __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
 195
 196#define __asm_copy_to_user_8x64(to, from, ret) \
 197        asm volatile (                                  \
 198                "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
 199                "       SETL [%0],D0Ar2,D1Ar1\n"        \
 200                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
 201                "1:\n"                                  \
 202                "       .section .fixup,\"ax\"\n"       \
 203                "3:     ADD  %2,%2,#8\n"                \
 204                "       MOVT    D0Ar2,#HI(1b)\n"        \
 205                "       JUMP    D0Ar2,#LO(1b)\n"        \
 206                "       .previous\n"                    \
 207                "       .section __ex_table,\"a\"\n"    \
 208                "       .long 2b,3b\n"                  \
 209                "       .previous\n"                    \
 210                : "=r" (to), "=r" (from), "=r" (ret)    \
 211                : "0" (to), "1" (from), "2" (ret)       \
 212                : "D1Ar1", "D0Ar2", "memory")
 213
 214/*
 215 *      optimized copying loop using RAPF when 64 bit aligned
 216 *
 217 *      n               will be automatically decremented inside the loop
 218 *      ret             will be left intact. if error occurs we will rewind
 219 *                      so that the original non optimized code will fill up
 220 *                      this value correctly.
 221 *
 222 *      on fault:
 223 *              >       n will hold total number of uncopied bytes
 224 *
 225 *              >       {'to','from'} will be rewind back so that
 226 *                      the non-optimized code will do the proper fix up
 227 *
 228 *      DCACHE drops the cacheline which helps in reducing cache
 229 *      pollution.
 230 *
 231 *      We introduce an extra SETL at the end of the loop to
 232 *      ensure we don't fall off the loop before we catch all
 233 *      erros.
 234 *
 235 *      NOTICE:
 236 *              LSM_STEP in TXSTATUS must be cleared in fix up code.
 237 *              since we're using M{S,G}ETL, a fault might happen at
 238 *              any address in the middle of M{S,G}ETL causing
 239 *              the value of LSM_STEP to be incorrect which can
 240 *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
 241 *              ie: if LSM_STEP was 1 when a fault occurs, the
 242 *              next call to M{S,G}ET{L,D} will skip the first
 243 *              copy/getting as it think that the first 1 has already
 244 *              been done.
 245 *
 246 */
 247#define __asm_copy_user_64bit_rapf_loop(                                \
 248                to, from, ret, n, id, FIXUP)                            \
 249        asm volatile (                                                  \
 250                ".balign 8\n"                                           \
 251                "MOV    RAPF, %1\n"                                     \
 252                "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
 253                "MOV    D0Ar6, #0\n"                                    \
 254                "LSR    D1Ar5, %3, #6\n"                                \
 255                "SUB    TXRPT, D1Ar5, #2\n"                             \
 256                "MOV    RAPF, %1\n"                                     \
 257                "$Lloop"id":\n"                                         \
 258                "ADD    RAPF, %1, #64\n"                                \
 259                "21:\n"                                                 \
 260                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 261                "22:\n"                                                 \
 262                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 263                "SUB    %3, %3, #32\n"                                  \
 264                "23:\n"                                                 \
 265                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 266                "24:\n"                                                 \
 267                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 268                "SUB    %3, %3, #32\n"                                  \
 269                "DCACHE [%1+#-64], D0Ar6\n"                             \
 270                "BR     $Lloop"id"\n"                                   \
 271                                                                        \
 272                "MOV    RAPF, %1\n"                                     \
 273                "25:\n"                                                 \
 274                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 275                "26:\n"                                                 \
 276                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 277                "SUB    %3, %3, #32\n"                                  \
 278                "27:\n"                                                 \
 279                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 280                "28:\n"                                                 \
 281                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 282                "SUB    %0, %0, #8\n"                                   \
 283                "29:\n"                                                 \
 284                "SETL   [%0++], D0.7, D1.7\n"                           \
 285                "SUB    %3, %3, #32\n"                                  \
 286                "1:"                                                    \
 287                "DCACHE [%1+#-64], D0Ar6\n"                             \
 288                "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
 289                "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
 290                "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
 291                "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
 292                "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
 293                "SUB A0StP, A0StP, #40\n"                               \
 294                "       .section .fixup,\"ax\"\n"                       \
 295                "4:\n"                                                  \
 296                "       ADD     %0, %0, #8\n"                           \
 297                "3:\n"                                                  \
 298                "       MOV     D0Ar2, TXSTATUS\n"                      \
 299                "       MOV     D1Ar1, TXSTATUS\n"                      \
 300                "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
 301                "       MOV     TXSTATUS, D1Ar1\n"                      \
 302                        FIXUP                                           \
 303                "       MOVT    D0Ar2,#HI(1b)\n"                        \
 304                "       JUMP    D0Ar2,#LO(1b)\n"                        \
 305                "       .previous\n"                                    \
 306                "       .section __ex_table,\"a\"\n"                    \
 307                "       .long 21b,3b\n"                                 \
 308                "       .long 22b,3b\n"                                 \
 309                "       .long 23b,3b\n"                                 \
 310                "       .long 24b,3b\n"                                 \
 311                "       .long 25b,3b\n"                                 \
 312                "       .long 26b,3b\n"                                 \
 313                "       .long 27b,3b\n"                                 \
 314                "       .long 28b,3b\n"                                 \
 315                "       .long 29b,4b\n"                                 \
 316                "       .previous\n"                                    \
 317                : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
 318                : "0" (to), "1" (from), "2" (ret), "3" (n)              \
 319                : "D1Ar1", "D0Ar2", "memory")
 320
 321/*      rewind 'to' and 'from'  pointers when a fault occurs
 322 *
 323 *      Rationale:
 324 *              A fault always occurs on writing to user buffer. A fault
 325 *              is at a single address, so we need to rewind by only 4
 326 *              bytes.
 327 *              Since we do a complete read from kernel buffer before
 328 *              writing, we need to rewind it also. The amount to be
 329 *              rewind equals the number of faulty writes in MSETD
 330 *              which is: [4 - (LSM_STEP-1)]*8
 331 *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
 332 *              and stored in D0Ar2
 333 *
 334 *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
 335 *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
 336 *                      a fault happens at the 4th write, LSM_STEP will be 0
 337 *                      instead of 4. The code copes with that.
 338 *
 339 *              n is updated by the number of successful writes, which is:
 340 *              n = n - (LSM_STEP-1)*8
 341 */
 342#define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
 343        __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
 344                "LSR    D0Ar2, D0Ar2, #8\n"                             \
 345                "AND    D0Ar2, D0Ar2, #0x7\n"                           \
 346                "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
 347                "SUB    D0Ar2, D0Ar2, #1\n"                             \
 348                "MOV    D1Ar1, #4\n"                                    \
 349                "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
 350                "LSL    D0Ar2, D0Ar2, #3\n"                             \
 351                "LSL    D1Ar1, D1Ar1, #3\n"                             \
 352                "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
 353                "SUB    %0, %0, #8\n"                                   \
 354                "SUB    %1,     %1,D0Ar2\n"                             \
 355                "SUB    %3, %3, D1Ar1\n")
 356
 357/*
 358 *      optimized copying loop using RAPF when 32 bit aligned
 359 *
 360 *      n               will be automatically decremented inside the loop
 361 *      ret             will be left intact. if error occurs we will rewind
 362 *                      so that the original non optimized code will fill up
 363 *                      this value correctly.
 364 *
 365 *      on fault:
 366 *              >       n will hold total number of uncopied bytes
 367 *
 368 *              >       {'to','from'} will be rewind back so that
 369 *                      the non-optimized code will do the proper fix up
 370 *
 371 *      DCACHE drops the cacheline which helps in reducing cache
 372 *      pollution.
 373 *
 374 *      We introduce an extra SETD at the end of the loop to
 375 *      ensure we don't fall off the loop before we catch all
 376 *      erros.
 377 *
 378 *      NOTICE:
 379 *              LSM_STEP in TXSTATUS must be cleared in fix up code.
 380 *              since we're using M{S,G}ETL, a fault might happen at
 381 *              any address in the middle of M{S,G}ETL causing
 382 *              the value of LSM_STEP to be incorrect which can
 383 *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
 384 *              ie: if LSM_STEP was 1 when a fault occurs, the
 385 *              next call to M{S,G}ET{L,D} will skip the first
 386 *              copy/getting as it think that the first 1 has already
 387 *              been done.
 388 *
 389 */
 390#define __asm_copy_user_32bit_rapf_loop(                                \
 391                        to,     from, ret, n, id, FIXUP)                \
 392        asm volatile (                                                  \
 393                ".balign 8\n"                                           \
 394                "MOV    RAPF, %1\n"                                     \
 395                "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
 396                "MOV    D0Ar6, #0\n"                                    \
 397                "LSR    D1Ar5, %3, #6\n"                                \
 398                "SUB    TXRPT, D1Ar5, #2\n"                             \
 399                "MOV    RAPF, %1\n"                                     \
 400        "$Lloop"id":\n"                                                 \
 401                "ADD    RAPF, %1, #64\n"                                \
 402                "21:\n"                                                 \
 403                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 404                "22:\n"                                                 \
 405                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 406                "SUB    %3, %3, #16\n"                                  \
 407                "23:\n"                                                 \
 408                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 409                "24:\n"                                                 \
 410                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 411                "SUB    %3, %3, #16\n"                                  \
 412                "25:\n"                                                 \
 413                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 414                "26:\n"                                                 \
 415                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 416                "SUB    %3, %3, #16\n"                                  \
 417                "27:\n"                                                 \
 418                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 419                "28:\n"                                                 \
 420                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 421                "SUB    %3, %3, #16\n"                                  \
 422                "DCACHE [%1+#-64], D0Ar6\n"                             \
 423                "BR     $Lloop"id"\n"                                   \
 424                                                                        \
 425                "MOV    RAPF, %1\n"                                     \
 426                "29:\n"                                                 \
 427                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 428                "30:\n"                                                 \
 429                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 430                "SUB    %3, %3, #16\n"                                  \
 431                "31:\n"                                                 \
 432                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 433                "32:\n"                                                 \
 434                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 435                "SUB    %3, %3, #16\n"                                  \
 436                "33:\n"                                                 \
 437                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 438                "34:\n"                                                 \
 439                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 440                "SUB    %3, %3, #16\n"                                  \
 441                "35:\n"                                                 \
 442                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
 443                "36:\n"                                                 \
 444                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
 445                "SUB    %0, %0, #4\n"                                   \
 446                "37:\n"                                                 \
 447                "SETD   [%0++], D0.7\n"                                 \
 448                "SUB    %3, %3, #16\n"                                  \
 449                "1:"                                                    \
 450                "DCACHE [%1+#-64], D0Ar6\n"                             \
 451                "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
 452                "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
 453                "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
 454                "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
 455                "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
 456                "SUB A0StP, A0StP, #40\n"                               \
 457                "       .section .fixup,\"ax\"\n"                       \
 458                "4:\n"                                                  \
 459                "       ADD             %0, %0, #4\n"                   \
 460                "3:\n"                                                  \
 461                "       MOV     D0Ar2, TXSTATUS\n"                      \
 462                "       MOV     D1Ar1, TXSTATUS\n"                      \
 463                "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
 464                "       MOV     TXSTATUS, D1Ar1\n"                      \
 465                        FIXUP                                           \
 466                "       MOVT    D0Ar2,#HI(1b)\n"                        \
 467                "       JUMP    D0Ar2,#LO(1b)\n"                        \
 468                "       .previous\n"                                    \
 469                "       .section __ex_table,\"a\"\n"                    \
 470                "       .long 21b,3b\n"                                 \
 471                "       .long 22b,3b\n"                                 \
 472                "       .long 23b,3b\n"                                 \
 473                "       .long 24b,3b\n"                                 \
 474                "       .long 25b,3b\n"                                 \
 475                "       .long 26b,3b\n"                                 \
 476                "       .long 27b,3b\n"                                 \
 477                "       .long 28b,3b\n"                                 \
 478                "       .long 29b,3b\n"                                 \
 479                "       .long 30b,3b\n"                                 \
 480                "       .long 31b,3b\n"                                 \
 481                "       .long 32b,3b\n"                                 \
 482                "       .long 33b,3b\n"                                 \
 483                "       .long 34b,3b\n"                                 \
 484                "       .long 35b,3b\n"                                 \
 485                "       .long 36b,3b\n"                                 \
 486                "       .long 37b,4b\n"                                 \
 487                "       .previous\n"                                    \
 488                : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
 489                : "0" (to), "1" (from), "2" (ret), "3" (n)              \
 490                : "D1Ar1", "D0Ar2", "memory")
 491
 492/*      rewind 'to' and 'from'  pointers when a fault occurs
 493 *
 494 *      Rationale:
 495 *              A fault always occurs on writing to user buffer. A fault
 496 *              is at a single address, so we need to rewind by only 4
 497 *              bytes.
 498 *              Since we do a complete read from kernel buffer before
 499 *              writing, we need to rewind it also. The amount to be
 500 *              rewind equals the number of faulty writes in MSETD
 501 *              which is: [4 - (LSM_STEP-1)]*4
 502 *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
 503 *              and stored in D0Ar2
 504 *
 505 *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
 506 *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
 507 *                      a fault happens at the 4th write, LSM_STEP will be 0
 508 *                      instead of 4. The code copes with that.
 509 *
 510 *              n is updated by the number of successful writes, which is:
 511 *              n = n - (LSM_STEP-1)*4
 512 */
 513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
 514        __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
 515                "LSR    D0Ar2, D0Ar2, #8\n"                             \
 516                "AND    D0Ar2, D0Ar2, #0x7\n"                           \
 517                "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
 518                "SUB    D0Ar2, D0Ar2, #1\n"                             \
 519                "MOV    D1Ar1, #4\n"                                    \
 520                "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
 521                "LSL    D0Ar2, D0Ar2, #2\n"                             \
 522                "LSL    D1Ar1, D1Ar1, #2\n"                             \
 523                "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
 524                "SUB    %0, %0, #4\n"                                   \
 525                "SUB    %1,     %1,     D0Ar2\n"                        \
 526                "SUB    %3, %3, D1Ar1\n")
 527
 528unsigned long __copy_user(void __user *pdst, const void *psrc,
 529                          unsigned long n)
 530{
 531        register char __user *dst asm ("A0.2") = pdst;
 532        register const char *src asm ("A1.2") = psrc;
 533        unsigned long retn = 0;
 534
 535        if (n == 0)
 536                return 0;
 537
 538        if ((unsigned long) src & 1) {
 539                __asm_copy_to_user_1(dst, src, retn);
 540                n--;
 541        }
 542        if ((unsigned long) dst & 1) {
 543                /* Worst case - byte copy */
 544                while (n > 0) {
 545                        __asm_copy_to_user_1(dst, src, retn);
 546                        n--;
 547                }
 548        }
 549        if (((unsigned long) src & 2) && n >= 2) {
 550                __asm_copy_to_user_2(dst, src, retn);
 551                n -= 2;
 552        }
 553        if ((unsigned long) dst & 2) {
 554                /* Second worst case - word copy */
 555                while (n >= 2) {
 556                        __asm_copy_to_user_2(dst, src, retn);
 557                        n -= 2;
 558                }
 559        }
 560
 561#ifdef USE_RAPF
 562        /* 64 bit copy loop */
 563        if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
 564                if (n >= RAPF_MIN_BUF_SIZE) {
 565                        /* copy user using 64 bit rapf copy */
 566                        __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
 567                                                        n, "64cu");
 568                }
 569                while (n >= 8) {
 570                        __asm_copy_to_user_8x64(dst, src, retn);
 571                        n -= 8;
 572                }
 573        }
 574        if (n >= RAPF_MIN_BUF_SIZE) {
 575                /* copy user using 32 bit rapf copy */
 576                __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
 577        }
 578#else
 579        /* 64 bit copy loop */
 580        if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
 581                while (n >= 8) {
 582                        __asm_copy_to_user_8x64(dst, src, retn);
 583                        n -= 8;
 584                }
 585        }
 586#endif
 587
 588        while (n >= 16) {
 589                __asm_copy_to_user_16(dst, src, retn);
 590                n -= 16;
 591        }
 592
 593        while (n >= 4) {
 594                __asm_copy_to_user_4(dst, src, retn);
 595                n -= 4;
 596        }
 597
 598        switch (n) {
 599        case 0:
 600                break;
 601        case 1:
 602                __asm_copy_to_user_1(dst, src, retn);
 603                break;
 604        case 2:
 605                __asm_copy_to_user_2(dst, src, retn);
 606                break;
 607        case 3:
 608                __asm_copy_to_user_3(dst, src, retn);
 609                break;
 610        }
 611
 612        return retn;
 613}
 614EXPORT_SYMBOL(__copy_user);
 615
 616#define __asm_copy_from_user_1(to, from, ret) \
 617        __asm_copy_user_cont(to, from, ret,     \
 618                "       GETB D1Ar1,[%1++]\n"    \
 619                "2:     SETB [%0++],D1Ar1\n",   \
 620                "3:     ADD  %2,%2,#1\n"        \
 621                "       SETB [%0++],D1Ar1\n",   \
 622                "       .long 2b,3b\n")
 623
 624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 625        __asm_copy_user_cont(to, from, ret,             \
 626                "       GETW D1Ar1,[%1++]\n"            \
 627                "2:     SETW [%0++],D1Ar1\n" COPY,      \
 628                "3:     ADD  %2,%2,#2\n"                \
 629                "       SETW [%0++],D1Ar1\n" FIXUP,     \
 630                "       .long 2b,3b\n" TENTRY)
 631
 632#define __asm_copy_from_user_2(to, from, ret) \
 633        __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
 634
 635#define __asm_copy_from_user_3(to, from, ret)           \
 636        __asm_copy_from_user_2x_cont(to, from, ret,     \
 637                "       GETB D1Ar1,[%1++]\n"            \
 638                "4:     SETB [%0++],D1Ar1\n",           \
 639                "5:     ADD  %2,%2,#1\n"                \
 640                "       SETB [%0++],D1Ar1\n",           \
 641                "       .long 4b,5b\n")
 642
 643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 644        __asm_copy_user_cont(to, from, ret,             \
 645                "       GETD D1Ar1,[%1++]\n"            \
 646                "2:     SETD [%0++],D1Ar1\n" COPY,      \
 647                "3:     ADD  %2,%2,#4\n"                \
 648                "       SETD [%0++],D1Ar1\n" FIXUP,     \
 649                "       .long 2b,3b\n" TENTRY)
 650
 651#define __asm_copy_from_user_4(to, from, ret) \
 652        __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
 653
 654#define __asm_copy_from_user_5(to, from, ret) \
 655        __asm_copy_from_user_4x_cont(to, from, ret,     \
 656                "       GETB D1Ar1,[%1++]\n"            \
 657                "4:     SETB [%0++],D1Ar1\n",           \
 658                "5:     ADD  %2,%2,#1\n"                \
 659                "       SETB [%0++],D1Ar1\n",           \
 660                "       .long 4b,5b\n")
 661
 662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 663        __asm_copy_from_user_4x_cont(to, from, ret,     \
 664                "       GETW D1Ar1,[%1++]\n"            \
 665                "4:     SETW [%0++],D1Ar1\n" COPY,      \
 666                "5:     ADD  %2,%2,#2\n"                \
 667                "       SETW [%0++],D1Ar1\n" FIXUP,     \
 668                "       .long 4b,5b\n" TENTRY)
 669
 670#define __asm_copy_from_user_6(to, from, ret) \
 671        __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
 672
 673#define __asm_copy_from_user_7(to, from, ret) \
 674        __asm_copy_from_user_6x_cont(to, from, ret,     \
 675                "       GETB D1Ar1,[%1++]\n"            \
 676                "6:     SETB [%0++],D1Ar1\n",           \
 677                "7:     ADD  %2,%2,#1\n"                \
 678                "       SETB [%0++],D1Ar1\n",           \
 679                "       .long 6b,7b\n")
 680
 681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 682        __asm_copy_from_user_4x_cont(to, from, ret,     \
 683                "       GETD D1Ar1,[%1++]\n"            \
 684                "4:     SETD [%0++],D1Ar1\n" COPY,      \
 685                "5:     ADD  %2,%2,#4\n"                        \
 686                "       SETD [%0++],D1Ar1\n" FIXUP,             \
 687                "       .long 4b,5b\n" TENTRY)
 688
 689#define __asm_copy_from_user_8(to, from, ret) \
 690        __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
 691
 692#define __asm_copy_from_user_9(to, from, ret) \
 693        __asm_copy_from_user_8x_cont(to, from, ret,     \
 694                "       GETB D1Ar1,[%1++]\n"            \
 695                "6:     SETB [%0++],D1Ar1\n",           \
 696                "7:     ADD  %2,%2,#1\n"                \
 697                "       SETB [%0++],D1Ar1\n",           \
 698                "       .long 6b,7b\n")
 699
 700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 701        __asm_copy_from_user_8x_cont(to, from, ret,     \
 702                "       GETW D1Ar1,[%1++]\n"            \
 703                "6:     SETW [%0++],D1Ar1\n" COPY,      \
 704                "7:     ADD  %2,%2,#2\n"                \
 705                "       SETW [%0++],D1Ar1\n" FIXUP,     \
 706                "       .long 6b,7b\n" TENTRY)
 707
 708#define __asm_copy_from_user_10(to, from, ret) \
 709        __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
 710
 711#define __asm_copy_from_user_11(to, from, ret)          \
 712        __asm_copy_from_user_10x_cont(to, from, ret,    \
 713                "       GETB D1Ar1,[%1++]\n"            \
 714                "8:     SETB [%0++],D1Ar1\n",           \
 715                "9:     ADD  %2,%2,#1\n"                \
 716                "       SETB [%0++],D1Ar1\n",           \
 717                "       .long 8b,9b\n")
 718
 719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 720        __asm_copy_from_user_8x_cont(to, from, ret,     \
 721                "       GETD D1Ar1,[%1++]\n"            \
 722                "6:     SETD [%0++],D1Ar1\n" COPY,      \
 723                "7:     ADD  %2,%2,#4\n"                \
 724                "       SETD [%0++],D1Ar1\n" FIXUP,     \
 725                "       .long 6b,7b\n" TENTRY)
 726
 727#define __asm_copy_from_user_12(to, from, ret) \
 728        __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
 729
 730#define __asm_copy_from_user_13(to, from, ret) \
 731        __asm_copy_from_user_12x_cont(to, from, ret,    \
 732                "       GETB D1Ar1,[%1++]\n"            \
 733                "8:     SETB [%0++],D1Ar1\n",           \
 734                "9:     ADD  %2,%2,#1\n"                \
 735                "       SETB [%0++],D1Ar1\n",           \
 736                "       .long 8b,9b\n")
 737
 738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 739        __asm_copy_from_user_12x_cont(to, from, ret,    \
 740                "       GETW D1Ar1,[%1++]\n"            \
 741                "8:     SETW [%0++],D1Ar1\n" COPY,      \
 742                "9:     ADD  %2,%2,#2\n"                \
 743                "       SETW [%0++],D1Ar1\n" FIXUP,     \
 744                "       .long 8b,9b\n" TENTRY)
 745
 746#define __asm_copy_from_user_14(to, from, ret) \
 747        __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
 748
 749#define __asm_copy_from_user_15(to, from, ret) \
 750        __asm_copy_from_user_14x_cont(to, from, ret,    \
 751                "       GETB D1Ar1,[%1++]\n"            \
 752                "10:    SETB [%0++],D1Ar1\n",           \
 753                "11:    ADD  %2,%2,#1\n"                \
 754                "       SETB [%0++],D1Ar1\n",           \
 755                "       .long 10b,11b\n")
 756
 757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 758        __asm_copy_from_user_12x_cont(to, from, ret,    \
 759                "       GETD D1Ar1,[%1++]\n"            \
 760                "8:     SETD [%0++],D1Ar1\n" COPY,      \
 761                "9:     ADD  %2,%2,#4\n"                \
 762                "       SETD [%0++],D1Ar1\n" FIXUP,     \
 763                "       .long 8b,9b\n" TENTRY)
 764
 765#define __asm_copy_from_user_16(to, from, ret) \
 766        __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
 767
 768#define __asm_copy_from_user_8x64(to, from, ret) \
 769        asm volatile (                          \
 770                "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
 771                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
 772                "1:\n"                                  \
 773                "       .section .fixup,\"ax\"\n"       \
 774                "       MOV D1Ar1,#0\n"                 \
 775                "       MOV D0Ar2,#0\n"                 \
 776                "3:     ADD  %2,%2,#8\n"                \
 777                "       SETL [%0++],D0Ar2,D1Ar1\n"      \
 778                "       MOVT    D0Ar2,#HI(1b)\n"        \
 779                "       JUMP    D0Ar2,#LO(1b)\n"        \
 780                "       .previous\n"                    \
 781                "       .section __ex_table,\"a\"\n"    \
 782                "       .long 2b,3b\n"                  \
 783                "       .previous\n"                    \
 784                : "=a" (to), "=r" (from), "=r" (ret)    \
 785                : "0" (to), "1" (from), "2" (ret)       \
 786                : "D1Ar1", "D0Ar2", "memory")
 787
 788/*      rewind 'from' pointer when a fault occurs
 789 *
 790 *      Rationale:
 791 *              A fault occurs while reading from user buffer, which is the
 792 *              source. Since the fault is at a single address, we only
 793 *              need to rewind by 8 bytes.
 794 *              Since we don't write to kernel buffer until we read first,
 795 *              the kernel buffer is at the right state and needn't be
 796 *              corrected.
 797 */
 798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
 799        __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
 800                "SUB    %1, %1, #8\n")
 801
 802/*      rewind 'from' pointer when a fault occurs
 803 *
 804 *      Rationale:
 805 *              A fault occurs while reading from user buffer, which is the
 806 *              source. Since the fault is at a single address, we only
 807 *              need to rewind by 4 bytes.
 808 *              Since we don't write to kernel buffer until we read first,
 809 *              the kernel buffer is at the right state and needn't be
 810 *              corrected.
 811 */
 812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
 813        __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
 814                "SUB    %1, %1, #4\n")
 815
 816
 817/* Copy from user to kernel, zeroing the bytes that were inaccessible in
 818   userland.  The return-value is the number of bytes that were
 819   inaccessible.  */
 820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
 821                                  unsigned long n)
 822{
 823        register char *dst asm ("A0.2") = pdst;
 824        register const char __user *src asm ("A1.2") = psrc;
 825        unsigned long retn = 0;
 826
 827        if (n == 0)
 828                return 0;
 829
 830        if ((unsigned long) src & 1) {
 831                __asm_copy_from_user_1(dst, src, retn);
 832                n--;
 833        }
 834        if ((unsigned long) dst & 1) {
 835                /* Worst case - byte copy */
 836                while (n > 0) {
 837                        __asm_copy_from_user_1(dst, src, retn);
 838                        n--;
 839                        if (retn)
 840                                goto copy_exception_bytes;
 841                }
 842        }
 843        if (((unsigned long) src & 2) && n >= 2) {
 844                __asm_copy_from_user_2(dst, src, retn);
 845                n -= 2;
 846        }
 847        if ((unsigned long) dst & 2) {
 848                /* Second worst case - word copy */
 849                while (n >= 2) {
 850                        __asm_copy_from_user_2(dst, src, retn);
 851                        n -= 2;
 852                        if (retn)
 853                                goto copy_exception_bytes;
 854                }
 855        }
 856
 857        /* We only need one check after the unalignment-adjustments,
 858           because if both adjustments were done, either both or
 859           neither reference had an exception.  */
 860        if (retn != 0)
 861                goto copy_exception_bytes;
 862
 863#ifdef USE_RAPF
 864        /* 64 bit copy loop */
 865        if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
 866                if (n >= RAPF_MIN_BUF_SIZE) {
 867                        /* Copy using fast 64bit rapf */
 868                        __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
 869                                                        n, "64cuz");
 870                }
 871                while (n >= 8) {
 872                        __asm_copy_from_user_8x64(dst, src, retn);
 873                        n -= 8;
 874                        if (retn)
 875                                goto copy_exception_bytes;
 876                }
 877        }
 878
 879        if (n >= RAPF_MIN_BUF_SIZE) {
 880                /* Copy using fast 32bit rapf */
 881                __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
 882                                                n, "32cuz");
 883        }
 884#else
 885        /* 64 bit copy loop */
 886        if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
 887                while (n >= 8) {
 888                        __asm_copy_from_user_8x64(dst, src, retn);
 889                        n -= 8;
 890                        if (retn)
 891                                goto copy_exception_bytes;
 892                }
 893        }
 894#endif
 895
 896        while (n >= 4) {
 897                __asm_copy_from_user_4(dst, src, retn);
 898                n -= 4;
 899
 900                if (retn)
 901                        goto copy_exception_bytes;
 902        }
 903
 904        /* If we get here, there were no memory read faults.  */
 905        switch (n) {
 906                /* These copies are at least "naturally aligned" (so we don't
 907                   have to check each byte), due to the src alignment code.
 908                   The *_3 case *will* get the correct count for retn.  */
 909        case 0:
 910                /* This case deliberately left in (if you have doubts check the
 911                   generated assembly code).  */
 912                break;
 913        case 1:
 914                __asm_copy_from_user_1(dst, src, retn);
 915                break;
 916        case 2:
 917                __asm_copy_from_user_2(dst, src, retn);
 918                break;
 919        case 3:
 920                __asm_copy_from_user_3(dst, src, retn);
 921                break;
 922        }
 923
 924        /* If we get here, retn correctly reflects the number of failing
 925           bytes.  */
 926        return retn;
 927
 928 copy_exception_bytes:
 929        /* We already have "retn" bytes cleared, and need to clear the
 930           remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
 931           memset is preferred here, since this isn't speed-critical code and
 932           we'd rather have this a leaf-function than calling memset.  */
 933        {
 934                char *endp;
 935                for (endp = dst + n; dst < endp; dst++)
 936                        *dst = 0;
 937        }
 938
 939        return retn + n;
 940}
 941EXPORT_SYMBOL(__copy_user_zeroing);
 942
 943#define __asm_clear_8x64(to, ret) \
 944        asm volatile (                                  \
 945                "       MOV  D0Ar2,#0\n"                \
 946                "       MOV  D1Ar1,#0\n"                \
 947                "       SETL [%0],D0Ar2,D1Ar1\n"        \
 948                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
 949                "1:\n"                                  \
 950                "       .section .fixup,\"ax\"\n"       \
 951                "3:     ADD  %1,%1,#8\n"                \
 952                "       MOVT    D0Ar2,#HI(1b)\n"        \
 953                "       JUMP    D0Ar2,#LO(1b)\n"        \
 954                "       .previous\n"                    \
 955                "       .section __ex_table,\"a\"\n"    \
 956                "       .long 2b,3b\n"                  \
 957                "       .previous\n"                    \
 958                : "=r" (to), "=r" (ret) \
 959                : "0" (to), "1" (ret)   \
 960                : "D1Ar1", "D0Ar2", "memory")
 961
 962/* Zero userspace.  */
 963
 964#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
 965        asm volatile (                                  \
 966                "       MOV D1Ar1,#0\n"                 \
 967                        CLEAR                           \
 968                "1:\n"                                  \
 969                "       .section .fixup,\"ax\"\n"       \
 970                        FIXUP                           \
 971                "       MOVT    D1Ar1,#HI(1b)\n"        \
 972                "       JUMP    D1Ar1,#LO(1b)\n"        \
 973                "       .previous\n"                    \
 974                "       .section __ex_table,\"a\"\n"    \
 975                        TENTRY                          \
 976                "       .previous"                      \
 977                : "=r" (to), "=r" (ret)                 \
 978                : "0" (to), "1" (ret)                   \
 979                : "D1Ar1", "memory")
 980
 981#define __asm_clear_1(to, ret) \
 982        __asm_clear(to, ret,                    \
 983                "       SETB [%0],D1Ar1\n"      \
 984                "2:     SETB [%0++],D1Ar1\n",   \
 985                "3:     ADD  %1,%1,#1\n",       \
 986                "       .long 2b,3b\n")
 987
 988#define __asm_clear_2(to, ret) \
 989        __asm_clear(to, ret,                    \
 990                "       SETW [%0],D1Ar1\n"      \
 991                "2:     SETW [%0++],D1Ar1\n",   \
 992                "3:     ADD  %1,%1,#2\n",       \
 993                "       .long 2b,3b\n")
 994
 995#define __asm_clear_3(to, ret) \
 996        __asm_clear(to, ret,                    \
 997                 "2:    SETW [%0++],D1Ar1\n"    \
 998                 "      SETB [%0],D1Ar1\n"      \
 999                 "3:    SETB [%0++],D1Ar1\n",   \
1000                 "4:    ADD  %1,%1,#2\n"        \
1001                 "5:    ADD  %1,%1,#1\n",       \
1002                 "      .long 2b,4b\n"          \
1003                 "      .long 3b,5b\n")
1004
1005#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1006        __asm_clear(to, ret,                            \
1007                "       SETD [%0],D1Ar1\n"              \
1008                "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
1009                "3:     ADD  %1,%1,#4\n" FIXUP,         \
1010                "       .long 2b,3b\n" TENTRY)
1011
1012#define __asm_clear_4(to, ret) \
1013        __asm_clear_4x_cont(to, ret, "", "", "")
1014
1015#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1016        __asm_clear_4x_cont(to, ret,                    \
1017                "       SETD [%0],D1Ar1\n"              \
1018                "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
1019                "5:     ADD  %1,%1,#4\n" FIXUP,         \
1020                "       .long 4b,5b\n" TENTRY)
1021
1022#define __asm_clear_8(to, ret) \
1023        __asm_clear_8x_cont(to, ret, "", "", "")
1024
1025#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1026        __asm_clear_8x_cont(to, ret,                    \
1027                "       SETD [%0],D1Ar1\n"              \
1028                "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
1029                "7:     ADD  %1,%1,#4\n" FIXUP,         \
1030                "       .long 6b,7b\n" TENTRY)
1031
1032#define __asm_clear_12(to, ret) \
1033        __asm_clear_12x_cont(to, ret, "", "", "")
1034
1035#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1036        __asm_clear_12x_cont(to, ret,                   \
1037                "       SETD [%0],D1Ar1\n"              \
1038                "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
1039                "9:     ADD  %1,%1,#4\n" FIXUP,         \
1040                "       .long 8b,9b\n" TENTRY)
1041
1042#define __asm_clear_16(to, ret) \
1043        __asm_clear_16x_cont(to, ret, "", "", "")
1044
1045unsigned long __do_clear_user(void __user *pto, unsigned long pn)
1046{
1047        register char __user *dst asm ("D0Re0") = pto;
1048        register unsigned long n asm ("D1Re0") = pn;
1049        register unsigned long retn asm ("D0Ar6") = 0;
1050
1051        if ((unsigned long) dst & 1) {
1052                __asm_clear_1(dst, retn);
1053                n--;
1054        }
1055
1056        if ((unsigned long) dst & 2) {
1057                __asm_clear_2(dst, retn);
1058                n -= 2;
1059        }
1060
1061        /* 64 bit copy loop */
1062        if (!((__force unsigned long) dst & 7)) {
1063                while (n >= 8) {
1064                        __asm_clear_8x64(dst, retn);
1065                        n -= 8;
1066                }
1067        }
1068
1069        while (n >= 16) {
1070                __asm_clear_16(dst, retn);
1071                n -= 16;
1072        }
1073
1074        while (n >= 4) {
1075                __asm_clear_4(dst, retn);
1076                n -= 4;
1077        }
1078
1079        switch (n) {
1080        case 0:
1081                break;
1082        case 1:
1083                __asm_clear_1(dst, retn);
1084                break;
1085        case 2:
1086                __asm_clear_2(dst, retn);
1087                break;
1088        case 3:
1089                __asm_clear_3(dst, retn);
1090                break;
1091        }
1092
1093        return retn;
1094}
1095EXPORT_SYMBOL(__do_clear_user);
1096
1097unsigned char __get_user_asm_b(const void __user *addr, long *err)
1098{
1099        register unsigned char x asm ("D0Re0") = 0;
1100        asm volatile (
1101                "       GETB %0,[%2]\n"
1102                "1:\n"
1103                "       GETB %0,[%2]\n"
1104                "2:\n"
1105                "       .section .fixup,\"ax\"\n"
1106                "3:     MOV     D0FrT,%3\n"
1107                "       SETD    [%1],D0FrT\n"
1108                "       MOVT    D0FrT,#HI(2b)\n"
1109                "       JUMP    D0FrT,#LO(2b)\n"
1110                "       .previous\n"
1111                "       .section __ex_table,\"a\"\n"
1112                "       .long 1b,3b\n"
1113                "       .previous\n"
1114                : "=r" (x)
1115                : "r" (err), "r" (addr), "P" (-EFAULT)
1116                : "D0FrT");
1117        return x;
1118}
1119EXPORT_SYMBOL(__get_user_asm_b);
1120
1121unsigned short __get_user_asm_w(const void __user *addr, long *err)
1122{
1123        register unsigned short x asm ("D0Re0") = 0;
1124        asm volatile (
1125                "       GETW %0,[%2]\n"
1126                "1:\n"
1127                "       GETW %0,[%2]\n"
1128                "2:\n"
1129                "       .section .fixup,\"ax\"\n"
1130                "3:     MOV     D0FrT,%3\n"
1131                "       SETD    [%1],D0FrT\n"
1132                "       MOVT    D0FrT,#HI(2b)\n"
1133                "       JUMP    D0FrT,#LO(2b)\n"
1134                "       .previous\n"
1135                "       .section __ex_table,\"a\"\n"
1136                "       .long 1b,3b\n"
1137                "       .previous\n"
1138                : "=r" (x)
1139                : "r" (err), "r" (addr), "P" (-EFAULT)
1140                : "D0FrT");
1141        return x;
1142}
1143EXPORT_SYMBOL(__get_user_asm_w);
1144
1145unsigned int __get_user_asm_d(const void __user *addr, long *err)
1146{
1147        register unsigned int x asm ("D0Re0") = 0;
1148        asm volatile (
1149                "       GETD %0,[%2]\n"
1150                "1:\n"
1151                "       GETD %0,[%2]\n"
1152                "2:\n"
1153                "       .section .fixup,\"ax\"\n"
1154                "3:     MOV     D0FrT,%3\n"
1155                "       SETD    [%1],D0FrT\n"
1156                "       MOVT    D0FrT,#HI(2b)\n"
1157                "       JUMP    D0FrT,#LO(2b)\n"
1158                "       .previous\n"
1159                "       .section __ex_table,\"a\"\n"
1160                "       .long 1b,3b\n"
1161                "       .previous\n"
1162                : "=r" (x)
1163                : "r" (err), "r" (addr), "P" (-EFAULT)
1164                : "D0FrT");
1165        return x;
1166}
1167EXPORT_SYMBOL(__get_user_asm_d);
1168
1169long __put_user_asm_b(unsigned int x, void __user *addr)
1170{
1171        register unsigned int err asm ("D0Re0") = 0;
1172        asm volatile (
1173                "       MOV  %0,#0\n"
1174                "       SETB [%2],%1\n"
1175                "1:\n"
1176                "       SETB [%2],%1\n"
1177                "2:\n"
1178                ".section .fixup,\"ax\"\n"
1179                "3:     MOV     %0,%3\n"
1180                "       MOVT    D0FrT,#HI(2b)\n"
1181                "       JUMP    D0FrT,#LO(2b)\n"
1182                ".previous\n"
1183                ".section __ex_table,\"a\"\n"
1184                "       .long 1b,3b\n"
1185                ".previous"
1186                : "=r"(err)
1187                : "d" (x), "a" (addr), "P"(-EFAULT)
1188                : "D0FrT");
1189        return err;
1190}
1191EXPORT_SYMBOL(__put_user_asm_b);
1192
1193long __put_user_asm_w(unsigned int x, void __user *addr)
1194{
1195        register unsigned int err asm ("D0Re0") = 0;
1196        asm volatile (
1197                "       MOV  %0,#0\n"
1198                "       SETW [%2],%1\n"
1199                "1:\n"
1200                "       SETW [%2],%1\n"
1201                "2:\n"
1202                ".section .fixup,\"ax\"\n"
1203                "3:     MOV     %0,%3\n"
1204                "       MOVT    D0FrT,#HI(2b)\n"
1205                "       JUMP    D0FrT,#LO(2b)\n"
1206                ".previous\n"
1207                ".section __ex_table,\"a\"\n"
1208                "       .long 1b,3b\n"
1209                ".previous"
1210                : "=r"(err)
1211                : "d" (x), "a" (addr), "P"(-EFAULT)
1212                : "D0FrT");
1213        return err;
1214}
1215EXPORT_SYMBOL(__put_user_asm_w);
1216
1217long __put_user_asm_d(unsigned int x, void __user *addr)
1218{
1219        register unsigned int err asm ("D0Re0") = 0;
1220        asm volatile (
1221                "       MOV  %0,#0\n"
1222                "       SETD [%2],%1\n"
1223                "1:\n"
1224                "       SETD [%2],%1\n"
1225                "2:\n"
1226                ".section .fixup,\"ax\"\n"
1227                "3:     MOV     %0,%3\n"
1228                "       MOVT    D0FrT,#HI(2b)\n"
1229                "       JUMP    D0FrT,#LO(2b)\n"
1230                ".previous\n"
1231                ".section __ex_table,\"a\"\n"
1232                "       .long 1b,3b\n"
1233                ".previous"
1234                : "=r"(err)
1235                : "d" (x), "a" (addr), "P"(-EFAULT)
1236                : "D0FrT");
1237        return err;
1238}
1239EXPORT_SYMBOL(__put_user_asm_d);
1240
1241long __put_user_asm_l(unsigned long long x, void __user *addr)
1242{
1243        register unsigned int err asm ("D0Re0") = 0;
1244        asm volatile (
1245                "       MOV  %0,#0\n"
1246                "       SETL [%2],%1,%t1\n"
1247                "1:\n"
1248                "       SETL [%2],%1,%t1\n"
1249                "2:\n"
1250                ".section .fixup,\"ax\"\n"
1251                "3:     MOV     %0,%3\n"
1252                "       MOVT    D0FrT,#HI(2b)\n"
1253                "       JUMP    D0FrT,#LO(2b)\n"
1254                ".previous\n"
1255                ".section __ex_table,\"a\"\n"
1256                "       .long 1b,3b\n"
1257                ".previous"
1258                : "=r"(err)
1259                : "d" (x), "a" (addr), "P"(-EFAULT)
1260                : "D0FrT");
1261        return err;
1262}
1263EXPORT_SYMBOL(__put_user_asm_l);
1264
1265long strnlen_user(const char __user *src, long count)
1266{
1267        long res;
1268
1269        if (!access_ok(VERIFY_READ, src, 0))
1270                return 0;
1271
1272        asm volatile (" MOV     D0Ar4, %1\n"
1273                      " MOV     D0Ar6, %2\n"
1274                      "0:\n"
1275                      " SUBS    D0FrT, D0Ar6, #0\n"
1276                      " SUB     D0Ar6, D0Ar6, #1\n"
1277                      " BLE     2f\n"
1278                      " GETB    D0FrT, [D0Ar4+#1++]\n"
1279                      "1:\n"
1280                      " TST     D0FrT, #255\n"
1281                      " BNE     0b\n"
1282                      "2:\n"
1283                      " SUB     %0, %2, D0Ar6\n"
1284                      "3:\n"
1285                      " .section .fixup,\"ax\"\n"
1286                      "4:\n"
1287                      " MOV     %0, #0\n"
1288                      " MOVT    D0FrT,#HI(3b)\n"
1289                      " JUMP    D0FrT,#LO(3b)\n"
1290                      " .previous\n"
1291                      " .section __ex_table,\"a\"\n"
1292                      " .long 1b,4b\n"
1293                      " .previous\n"
1294                      : "=r" (res)
1295                      : "r" (src), "r" (count)
1296                      : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1297
1298        return res;
1299}
1300EXPORT_SYMBOL(strnlen_user);
1301
1302long __strncpy_from_user(char *dst, const char __user *src, long count)
1303{
1304        long res;
1305
1306        if (count == 0)
1307                return 0;
1308
1309        /*
1310         * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1311         *  So do we.
1312         *
1313         *  This code is deduced from:
1314         *
1315         *      char tmp2;
1316         *      long tmp1, tmp3;
1317         *      tmp1 = count;
1318         *      while ((*dst++ = (tmp2 = *src++)) != 0
1319         *             && --tmp1)
1320         *        ;
1321         *
1322         *      res = count - tmp1;
1323         *
1324         *  with tweaks.
1325         */
1326
1327        asm volatile (" MOV  %0,%3\n"
1328                      "1:\n"
1329                      " GETB D0FrT,[%2++]\n"
1330                      "2:\n"
1331                      " CMP  D0FrT,#0\n"
1332                      " SETB [%1++],D0FrT\n"
1333                      " BEQ  3f\n"
1334                      " SUBS %0,%0,#1\n"
1335                      " BNZ  1b\n"
1336                      "3:\n"
1337                      " SUB  %0,%3,%0\n"
1338                      "4:\n"
1339                      " .section .fixup,\"ax\"\n"
1340                      "5:\n"
1341                      " MOV  %0,%7\n"
1342                      " MOVT    D0FrT,#HI(4b)\n"
1343                      " JUMP    D0FrT,#LO(4b)\n"
1344                      " .previous\n"
1345                      " .section __ex_table,\"a\"\n"
1346                      " .long 2b,5b\n"
1347                      " .previous"
1348                      : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1349                      : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1350                      : "D0FrT", "memory", "cc");
1351
1352        return res;
1353}
1354EXPORT_SYMBOL(__strncpy_from_user);
1355