linux/arch/parisc/include/asm/cacheflush.h
<<
>>
Prefs
   1#ifndef _PARISC_CACHEFLUSH_H
   2#define _PARISC_CACHEFLUSH_H
   3
   4#include <linux/mm.h>
   5#include <linux/uaccess.h>
   6#include <asm/tlbflush.h>
   7
   8/* The usual comment is "Caches aren't brain-dead on the <architecture>".
   9 * Unfortunately, that doesn't apply to PA-RISC. */
  10
  11/* Internal implementation */
  12void flush_data_cache_local(void *);  /* flushes local data-cache only */
  13void flush_instruction_cache_local(void *); /* flushes local code-cache only */
  14#ifdef CONFIG_SMP
  15void flush_data_cache(void); /* flushes data-cache only (all processors) */
  16void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
  17#else
  18#define flush_data_cache() flush_data_cache_local(NULL)
  19#define flush_instruction_cache() flush_instruction_cache_local(NULL)
  20#endif
  21
  22#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  23
  24void flush_user_icache_range_asm(unsigned long, unsigned long);
  25void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  26void flush_user_dcache_range_asm(unsigned long, unsigned long);
  27void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  28void flush_kernel_dcache_page_asm(void *);
  29void flush_kernel_icache_page(void *);
  30void flush_user_dcache_range(unsigned long, unsigned long);
  31void flush_user_icache_range(unsigned long, unsigned long);
  32
  33/* Cache flush operations */
  34
  35void flush_cache_all_local(void);
  36void flush_cache_all(void);
  37void flush_cache_mm(struct mm_struct *mm);
  38
  39#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  40void flush_kernel_dcache_page_addr(void *addr);
  41static inline void flush_kernel_dcache_page(struct page *page)
  42{
  43        flush_kernel_dcache_page_addr(page_address(page));
  44}
  45
  46#define flush_kernel_dcache_range(start,size) \
  47        flush_kernel_dcache_range_asm((start), (start)+(size));
  48/* vmap range flushes and invalidates.  Architecturally, we don't need
  49 * the invalidate, because the CPU should refuse to speculate once an
  50 * area has been flushed, so invalidate is left empty */
  51static inline void flush_kernel_vmap_range(void *vaddr, int size)
  52{
  53        unsigned long start = (unsigned long)vaddr;
  54
  55        flush_kernel_dcache_range_asm(start, start + size);
  56}
  57static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  58{
  59        unsigned long start = (unsigned long)vaddr;
  60        void *cursor = vaddr;
  61
  62        for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
  63                struct page *page = vmalloc_to_page(cursor);
  64
  65                if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
  66                        flush_kernel_dcache_page(page);
  67        }
  68        flush_kernel_dcache_range_asm(start, start + size);
  69}
  70
  71#define flush_cache_vmap(start, end)            flush_cache_all()
  72#define flush_cache_vunmap(start, end)          flush_cache_all()
  73
  74#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  75extern void flush_dcache_page(struct page *page);
  76
  77#define flush_dcache_mmap_lock(mapping) \
  78        spin_lock_irq(&(mapping)->tree_lock)
  79#define flush_dcache_mmap_unlock(mapping) \
  80        spin_unlock_irq(&(mapping)->tree_lock)
  81
  82#define flush_icache_page(vma,page)     do {            \
  83        flush_kernel_dcache_page(page);                 \
  84        flush_kernel_icache_page(page_address(page));   \
  85} while (0)
  86
  87#define flush_icache_range(s,e)         do {            \
  88        flush_kernel_dcache_range_asm(s,e);             \
  89        flush_kernel_icache_range_asm(s,e);             \
  90} while (0)
  91
  92#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  93do { \
  94        flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  95        memcpy(dst, src, len); \
  96        flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  97} while (0)
  98
  99#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 100do { \
 101        flush_cache_page(vma, vaddr, page_to_pfn(page)); \
 102        memcpy(dst, src, len); \
 103} while (0)
 104
 105void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
 106void flush_cache_range(struct vm_area_struct *vma,
 107                unsigned long start, unsigned long end);
 108
 109/* defined in pacache.S exported in cache.c used by flush_anon_page */
 110void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 111
 112#define ARCH_HAS_FLUSH_ANON_PAGE
 113static inline void
 114flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 115{
 116        if (PageAnon(page)) {
 117                flush_tlb_page(vma, vmaddr);
 118                preempt_disable();
 119                flush_dcache_page_asm(page_to_phys(page), vmaddr);
 120                preempt_enable();
 121        }
 122}
 123
 124#ifdef CONFIG_DEBUG_RODATA
 125void mark_rodata_ro(void);
 126#endif
 127
 128#ifdef CONFIG_PA8X00
 129/* Only pa8800, pa8900 needs this */
 130
 131#include <asm/kmap_types.h>
 132
 133#define ARCH_HAS_KMAP
 134
 135void kunmap_parisc(void *addr);
 136
 137static inline void *kmap(struct page *page)
 138{
 139        might_sleep();
 140        return page_address(page);
 141}
 142
 143static inline void kunmap(struct page *page)
 144{
 145        kunmap_parisc(page_address(page));
 146}
 147
 148static inline void *kmap_atomic(struct page *page)
 149{
 150        pagefault_disable();
 151        return page_address(page);
 152}
 153
 154static inline void __kunmap_atomic(void *addr)
 155{
 156        kunmap_parisc(addr);
 157        pagefault_enable();
 158}
 159
 160#define kmap_atomic_prot(page, prot)    kmap_atomic(page)
 161#define kmap_atomic_pfn(pfn)    kmap_atomic(pfn_to_page(pfn))
 162#define kmap_atomic_to_page(ptr)        virt_to_page(ptr)
 163#endif
 164
 165#endif /* _PARISC_CACHEFLUSH_H */
 166
 167