1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
23#ifndef CONFIG_MMU
24
25#include <linux/pagemap.h>
26#include <asm-generic/tlb.h>
27
28#else
29
30#include <asm/pgalloc.h>
31
32
33
34
35
36struct mmu_gather {
37 struct mm_struct *mm;
38 unsigned int fullmm;
39 unsigned long range_start;
40 unsigned long range_end;
41};
42
43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
44
45static inline struct mmu_gather *
46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
47{
48 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
49
50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush;
52
53 return tlb;
54}
55
56static inline void
57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
58{
59 if (tlb->fullmm)
60 flush_tlb_mm(tlb->mm);
61
62
63 check_pgt_cache();
64
65 put_cpu_var(mmu_gathers);
66}
67
68
69
70
71static inline void
72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
73{
74 if (!tlb->fullmm) {
75 if (addr < tlb->range_start)
76 tlb->range_start = addr;
77 if (addr + PAGE_SIZE > tlb->range_end)
78 tlb->range_end = addr + PAGE_SIZE;
79 }
80}
81
82
83
84
85
86
87static inline void
88tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
89{
90 if (!tlb->fullmm) {
91 flush_cache_range(vma, vma->vm_start, vma->vm_end);
92 tlb->range_start = TASK_SIZE;
93 tlb->range_end = 0;
94 }
95}
96
97static inline void
98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
99{
100 if (!tlb->fullmm && tlb->range_end > 0)
101 flush_tlb_range(vma, tlb->range_start, tlb->range_end);
102}
103
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
107
108#define tlb_migrate_finish(mm) do { } while (0)
109
110#endif
111#endif
112