1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
16#include <linux/hugetlb_inline.h>
17#include <asm/pgalloc.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20
21
22
23
24
25
26#ifndef nmi_uaccess_okay
27# define nmi_uaccess_okay() true
28#endif
29
30#ifdef CONFIG_MMU
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175#ifdef CONFIG_MMU_GATHER_TABLE_FREE
176
177struct mmu_table_batch {
178#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
179 struct rcu_head rcu;
180#endif
181 unsigned int nr;
182 void *tables[0];
183};
184
185#define MAX_TABLE_BATCH \
186 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
187
188extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
189
190#else
191
192
193
194
195
196#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
197
198#endif
199
200#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
201
202
203
204
205#ifndef tlb_needs_table_invalidate
206#define tlb_needs_table_invalidate() (true)
207#endif
208
209#else
210
211#ifdef tlb_needs_table_invalidate
212#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
213#endif
214
215#endif
216
217
218#ifndef CONFIG_MMU_GATHER_NO_GATHER
219
220
221
222
223#define MMU_GATHER_BUNDLE 8
224
225struct mmu_gather_batch {
226 struct mmu_gather_batch *next;
227 unsigned int nr;
228 unsigned int max;
229 struct page *pages[0];
230};
231
232#define MAX_GATHER_BATCH \
233 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
234
235
236
237
238
239
240
241#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
242
243extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
244 int page_size);
245#endif
246
247
248
249
250
251struct mmu_gather {
252 struct mm_struct *mm;
253
254#ifdef CONFIG_MMU_GATHER_TABLE_FREE
255 struct mmu_table_batch *batch;
256#endif
257
258 unsigned long start;
259 unsigned long end;
260
261
262
263
264 unsigned int fullmm : 1;
265
266
267
268
269
270 unsigned int need_flush_all : 1;
271
272
273
274
275 unsigned int freed_tables : 1;
276
277
278
279
280 unsigned int cleared_ptes : 1;
281 unsigned int cleared_pmds : 1;
282 unsigned int cleared_puds : 1;
283 unsigned int cleared_p4ds : 1;
284
285
286
287
288 unsigned int vma_exec : 1;
289 unsigned int vma_huge : 1;
290
291 unsigned int batch_count;
292
293#ifndef CONFIG_MMU_GATHER_NO_GATHER
294 struct mmu_gather_batch *active;
295 struct mmu_gather_batch local;
296 struct page *__pages[MMU_GATHER_BUNDLE];
297
298#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
299 unsigned int page_size;
300#endif
301#endif
302};
303
304void tlb_flush_mmu(struct mmu_gather *tlb);
305
306static inline void __tlb_adjust_range(struct mmu_gather *tlb,
307 unsigned long address,
308 unsigned int range_size)
309{
310 tlb->start = min(tlb->start, address);
311 tlb->end = max(tlb->end, address + range_size);
312}
313
314static inline void __tlb_reset_range(struct mmu_gather *tlb)
315{
316 if (tlb->fullmm) {
317 tlb->start = tlb->end = ~0;
318 } else {
319 tlb->start = TASK_SIZE;
320 tlb->end = 0;
321 }
322 tlb->freed_tables = 0;
323 tlb->cleared_ptes = 0;
324 tlb->cleared_pmds = 0;
325 tlb->cleared_puds = 0;
326 tlb->cleared_p4ds = 0;
327
328
329
330
331
332}
333
334#ifdef CONFIG_MMU_GATHER_NO_RANGE
335
336#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
337#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
338#endif
339
340
341
342
343
344
345
346
347
348static inline void tlb_flush(struct mmu_gather *tlb)
349{
350 if (tlb->end)
351 flush_tlb_mm(tlb->mm);
352}
353
354static inline void
355tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
356
357#define tlb_end_vma tlb_end_vma
358static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
359
360#else
361
362#ifndef tlb_flush
363
364#if defined(tlb_start_vma) || defined(tlb_end_vma)
365#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
366#endif
367
368
369
370
371
372
373static inline void tlb_flush(struct mmu_gather *tlb)
374{
375 if (tlb->fullmm || tlb->need_flush_all) {
376 flush_tlb_mm(tlb->mm);
377 } else if (tlb->end) {
378 struct vm_area_struct vma = {
379 .vm_mm = tlb->mm,
380 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
381 (tlb->vma_huge ? VM_HUGETLB : 0),
382 };
383
384 flush_tlb_range(&vma, tlb->start, tlb->end);
385 }
386}
387
388static inline void
389tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
390{
391
392
393
394
395
396
397
398
399
400
401
402 tlb->vma_huge = is_vm_hugetlb_page(vma);
403 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
404}
405
406#else
407
408static inline void
409tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
410
411#endif
412
413#endif
414
415static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
416{
417
418
419
420
421 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
422 tlb->cleared_puds || tlb->cleared_p4ds))
423 return;
424
425 tlb_flush(tlb);
426 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
427 __tlb_reset_range(tlb);
428}
429
430static inline void tlb_remove_page_size(struct mmu_gather *tlb,
431 struct page *page, int page_size)
432{
433 if (__tlb_remove_page_size(tlb, page, page_size))
434 tlb_flush_mmu(tlb);
435}
436
437static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
438{
439 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
440}
441
442
443
444
445
446static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
447{
448 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
449}
450
451static inline void tlb_change_page_size(struct mmu_gather *tlb,
452 unsigned int page_size)
453{
454#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
455 if (tlb->page_size && tlb->page_size != page_size) {
456 if (!tlb->fullmm && !tlb->need_flush_all)
457 tlb_flush_mmu(tlb);
458 }
459
460 tlb->page_size = page_size;
461#endif
462}
463
464static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
465{
466 if (tlb->cleared_ptes)
467 return PAGE_SHIFT;
468 if (tlb->cleared_pmds)
469 return PMD_SHIFT;
470 if (tlb->cleared_puds)
471 return PUD_SHIFT;
472 if (tlb->cleared_p4ds)
473 return P4D_SHIFT;
474
475 return PAGE_SHIFT;
476}
477
478static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
479{
480 return 1UL << tlb_get_unmap_shift(tlb);
481}
482
483
484
485
486
487
488#ifndef tlb_start_vma
489static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
490{
491 if (tlb->fullmm)
492 return;
493
494 tlb_update_vma_flags(tlb, vma);
495 flush_cache_range(vma, vma->vm_start, vma->vm_end);
496}
497#endif
498
499#ifndef tlb_end_vma
500static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
501{
502 if (tlb->fullmm)
503 return;
504
505
506
507
508
509
510
511 tlb_flush_mmu_tlbonly(tlb);
512}
513#endif
514
515#ifndef __tlb_remove_tlb_entry
516#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
517#endif
518
519
520
521
522
523
524
525
526#define tlb_remove_tlb_entry(tlb, ptep, address) \
527 do { \
528 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
529 tlb->cleared_ptes = 1; \
530 __tlb_remove_tlb_entry(tlb, ptep, address); \
531 } while (0)
532
533#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
534 do { \
535 unsigned long _sz = huge_page_size(h); \
536 __tlb_adjust_range(tlb, address, _sz); \
537 if (_sz == PMD_SIZE) \
538 tlb->cleared_pmds = 1; \
539 else if (_sz == PUD_SIZE) \
540 tlb->cleared_puds = 1; \
541 __tlb_remove_tlb_entry(tlb, ptep, address); \
542 } while (0)
543
544
545
546
547
548#ifndef __tlb_remove_pmd_tlb_entry
549#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
550#endif
551
552#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
553 do { \
554 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
555 tlb->cleared_pmds = 1; \
556 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
557 } while (0)
558
559
560
561
562
563#ifndef __tlb_remove_pud_tlb_entry
564#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
565#endif
566
567#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
568 do { \
569 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
570 tlb->cleared_puds = 1; \
571 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
572 } while (0)
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592#ifndef pte_free_tlb
593#define pte_free_tlb(tlb, ptep, address) \
594 do { \
595 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
596 tlb->freed_tables = 1; \
597 tlb->cleared_pmds = 1; \
598 __pte_free_tlb(tlb, ptep, address); \
599 } while (0)
600#endif
601
602#ifndef pmd_free_tlb
603#define pmd_free_tlb(tlb, pmdp, address) \
604 do { \
605 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
606 tlb->freed_tables = 1; \
607 tlb->cleared_puds = 1; \
608 __pmd_free_tlb(tlb, pmdp, address); \
609 } while (0)
610#endif
611
612#ifndef pud_free_tlb
613#define pud_free_tlb(tlb, pudp, address) \
614 do { \
615 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
616 tlb->freed_tables = 1; \
617 tlb->cleared_p4ds = 1; \
618 __pud_free_tlb(tlb, pudp, address); \
619 } while (0)
620#endif
621
622#ifndef p4d_free_tlb
623#define p4d_free_tlb(tlb, pudp, address) \
624 do { \
625 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
626 tlb->freed_tables = 1; \
627 __p4d_free_tlb(tlb, pudp, address); \
628 } while (0)
629#endif
630
631#endif
632
633#endif
634