linux/arch/arm/mm/copypage-xsc3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/mm/copypage-xsc3.S
   4 *
   5 *  Copyright (C) 2004 Intel Corp.
   6 *
   7 * Adapted for 3rd gen XScale core, no more mini-dcache
   8 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
   9 */
  10#include <linux/init.h>
  11#include <linux/highmem.h>
  12
  13/*
  14 * General note:
  15 *  We don't really want write-allocate cache behaviour for these functions
  16 *  since that will just eat through 8K of the cache.
  17 */
  18
  19/*
  20 * XSC3 optimised copy_user_highpage
  21 *
  22 * The source page may have some clean entries in the cache already, but we
  23 * can safely ignore them - break_cow() will flush them out of the cache
  24 * if we eventually end up using our copied page.
  25 *
  26 */
  27static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
  28{
  29        int tmp;
  30
  31        asm volatile ("\
  32        pld     [%1, #0]                        \n\
  33        pld     [%1, #32]                       \n\
  341:      pld     [%1, #64]                       \n\
  35        pld     [%1, #96]                       \n\
  36                                                \n\
  372:      ldrd    r2, r3, [%1], #8                \n\
  38        ldrd    r4, r5, [%1], #8                \n\
  39        mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
  40        strd    r2, r3, [%0], #8                \n\
  41        ldrd    r2, r3, [%1], #8                \n\
  42        strd    r4, r5, [%0], #8                \n\
  43        ldrd    r4, r5, [%1], #8                \n\
  44        strd    r2, r3, [%0], #8                \n\
  45        strd    r4, r5, [%0], #8                \n\
  46        ldrd    r2, r3, [%1], #8                \n\
  47        ldrd    r4, r5, [%1], #8                \n\
  48        mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
  49        strd    r2, r3, [%0], #8                \n\
  50        ldrd    r2, r3, [%1], #8                \n\
  51        subs    %2, %2, #1                      \n\
  52        strd    r4, r5, [%0], #8                \n\
  53        ldrd    r4, r5, [%1], #8                \n\
  54        strd    r2, r3, [%0], #8                \n\
  55        strd    r4, r5, [%0], #8                \n\
  56        bgt     1b                              \n\
  57        beq     2b                              "
  58        : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
  59        : "2" (PAGE_SIZE / 64 - 1)
  60        : "r2", "r3", "r4", "r5");
  61}
  62
  63void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
  64        unsigned long vaddr, struct vm_area_struct *vma)
  65{
  66        void *kto, *kfrom;
  67
  68        kto = kmap_atomic(to);
  69        kfrom = kmap_atomic(from);
  70        flush_cache_page(vma, vaddr, page_to_pfn(from));
  71        xsc3_mc_copy_user_page(kto, kfrom);
  72        kunmap_atomic(kfrom);
  73        kunmap_atomic(kto);
  74}
  75
  76/*
  77 * XScale optimised clear_user_page
  78 */
  79void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
  80{
  81        void *ptr, *kaddr = kmap_atomic(page);
  82        asm volatile ("\
  83        mov     r1, %2                          \n\
  84        mov     r2, #0                          \n\
  85        mov     r3, #0                          \n\
  861:      mcr     p15, 0, %0, c7, c6, 1           @ invalidate line\n\
  87        strd    r2, r3, [%0], #8                \n\
  88        strd    r2, r3, [%0], #8                \n\
  89        strd    r2, r3, [%0], #8                \n\
  90        strd    r2, r3, [%0], #8                \n\
  91        subs    r1, r1, #1                      \n\
  92        bne     1b"
  93        : "=r" (ptr)
  94        : "0" (kaddr), "I" (PAGE_SIZE / 32)
  95        : "r1", "r2", "r3");
  96        kunmap_atomic(kaddr);
  97}
  98
  99struct cpu_user_fns xsc3_mc_user_fns __initdata = {
 100        .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
 101        .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
 102};
 103