1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
16#include <linux/hugetlb_inline.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20
21
22
23
24
25#ifndef nmi_uaccess_okay
26# define nmi_uaccess_okay() true
27#endif
28
29#ifdef CONFIG_MMU
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#ifdef CONFIG_MMU_GATHER_TABLE_FREE
177
178struct mmu_table_batch {
179#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
180 struct rcu_head rcu;
181#endif
182 unsigned int nr;
183 void *tables[0];
184};
185
186#define MAX_TABLE_BATCH \
187 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
188
189extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
190
191#else
192
193
194
195
196
197#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
198
199#endif
200
201#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
202
203
204
205
206#ifndef tlb_needs_table_invalidate
207#define tlb_needs_table_invalidate() (true)
208#endif
209
210#else
211
212#ifdef tlb_needs_table_invalidate
213#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214#endif
215
216#endif
217
218
219#ifndef CONFIG_MMU_GATHER_NO_GATHER
220
221
222
223
224#define MMU_GATHER_BUNDLE 8
225
226struct mmu_gather_batch {
227 struct mmu_gather_batch *next;
228 unsigned int nr;
229 unsigned int max;
230 struct page *pages[0];
231};
232
233#define MAX_GATHER_BATCH \
234 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
235
236
237
238
239
240
241
242#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
243
244extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
245 int page_size);
246#endif
247
248
249
250
251
252struct mmu_gather {
253 struct mm_struct *mm;
254
255#ifdef CONFIG_MMU_GATHER_TABLE_FREE
256 struct mmu_table_batch *batch;
257#endif
258
259 unsigned long start;
260 unsigned long end;
261
262
263
264
265 unsigned int fullmm : 1;
266
267
268
269
270
271 unsigned int need_flush_all : 1;
272
273
274
275
276 unsigned int freed_tables : 1;
277
278
279
280
281 unsigned int cleared_ptes : 1;
282 unsigned int cleared_pmds : 1;
283 unsigned int cleared_puds : 1;
284 unsigned int cleared_p4ds : 1;
285
286
287
288
289 unsigned int vma_exec : 1;
290 unsigned int vma_huge : 1;
291
292 unsigned int batch_count;
293
294#ifndef CONFIG_MMU_GATHER_NO_GATHER
295 struct mmu_gather_batch *active;
296 struct mmu_gather_batch local;
297 struct page *__pages[MMU_GATHER_BUNDLE];
298
299#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
300 unsigned int page_size;
301#endif
302#endif
303};
304
305void tlb_flush_mmu(struct mmu_gather *tlb);
306
307static inline void __tlb_adjust_range(struct mmu_gather *tlb,
308 unsigned long address,
309 unsigned int range_size)
310{
311 tlb->start = min(tlb->start, address);
312 tlb->end = max(tlb->end, address + range_size);
313}
314
315static inline void __tlb_reset_range(struct mmu_gather *tlb)
316{
317 if (tlb->fullmm) {
318 tlb->start = tlb->end = ~0;
319 } else {
320 tlb->start = TASK_SIZE;
321 tlb->end = 0;
322 }
323 tlb->freed_tables = 0;
324 tlb->cleared_ptes = 0;
325 tlb->cleared_pmds = 0;
326 tlb->cleared_puds = 0;
327 tlb->cleared_p4ds = 0;
328
329
330
331
332
333}
334
335#ifdef CONFIG_MMU_GATHER_NO_RANGE
336
337#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
338#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
339#endif
340
341
342
343
344
345
346
347
348
349static inline void tlb_flush(struct mmu_gather *tlb)
350{
351 if (tlb->end)
352 flush_tlb_mm(tlb->mm);
353}
354
355static inline void
356tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
357
358#define tlb_end_vma tlb_end_vma
359static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
360
361#else
362
363#ifndef tlb_flush
364
365#if defined(tlb_start_vma) || defined(tlb_end_vma)
366#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
367#endif
368
369
370
371
372
373
374static inline void tlb_flush(struct mmu_gather *tlb)
375{
376 if (tlb->fullmm || tlb->need_flush_all) {
377 flush_tlb_mm(tlb->mm);
378 } else if (tlb->end) {
379 struct vm_area_struct vma = {
380 .vm_mm = tlb->mm,
381 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
382 (tlb->vma_huge ? VM_HUGETLB : 0),
383 };
384
385 flush_tlb_range(&vma, tlb->start, tlb->end);
386 }
387}
388
389static inline void
390tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
391{
392
393
394
395
396
397
398
399
400
401
402
403 tlb->vma_huge = is_vm_hugetlb_page(vma);
404 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
405}
406
407#else
408
409static inline void
410tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
411
412#endif
413
414#endif
415
416static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
417{
418
419
420
421
422 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
423 tlb->cleared_puds || tlb->cleared_p4ds))
424 return;
425
426 tlb_flush(tlb);
427 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
428 __tlb_reset_range(tlb);
429}
430
431static inline void tlb_remove_page_size(struct mmu_gather *tlb,
432 struct page *page, int page_size)
433{
434 if (__tlb_remove_page_size(tlb, page, page_size))
435 tlb_flush_mmu(tlb);
436}
437
438static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
439{
440 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
441}
442
443
444
445
446
447static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
448{
449 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
450}
451
452static inline void tlb_change_page_size(struct mmu_gather *tlb,
453 unsigned int page_size)
454{
455#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
456 if (tlb->page_size && tlb->page_size != page_size) {
457 if (!tlb->fullmm && !tlb->need_flush_all)
458 tlb_flush_mmu(tlb);
459 }
460
461 tlb->page_size = page_size;
462#endif
463}
464
465static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
466{
467 if (tlb->cleared_ptes)
468 return PAGE_SHIFT;
469 if (tlb->cleared_pmds)
470 return PMD_SHIFT;
471 if (tlb->cleared_puds)
472 return PUD_SHIFT;
473 if (tlb->cleared_p4ds)
474 return P4D_SHIFT;
475
476 return PAGE_SHIFT;
477}
478
479static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
480{
481 return 1UL << tlb_get_unmap_shift(tlb);
482}
483
484
485
486
487
488
489#ifndef tlb_start_vma
490static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
491{
492 if (tlb->fullmm)
493 return;
494
495 tlb_update_vma_flags(tlb, vma);
496 flush_cache_range(vma, vma->vm_start, vma->vm_end);
497}
498#endif
499
500#ifndef tlb_end_vma
501static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
502{
503 if (tlb->fullmm)
504 return;
505
506
507
508
509
510
511
512 tlb_flush_mmu_tlbonly(tlb);
513}
514#endif
515
516
517
518
519
520static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
521 unsigned long address, unsigned long size)
522{
523 __tlb_adjust_range(tlb, address, size);
524 tlb->cleared_ptes = 1;
525}
526
527static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
528 unsigned long address, unsigned long size)
529{
530 __tlb_adjust_range(tlb, address, size);
531 tlb->cleared_pmds = 1;
532}
533
534static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
535 unsigned long address, unsigned long size)
536{
537 __tlb_adjust_range(tlb, address, size);
538 tlb->cleared_puds = 1;
539}
540
541static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
542 unsigned long address, unsigned long size)
543{
544 __tlb_adjust_range(tlb, address, size);
545 tlb->cleared_p4ds = 1;
546}
547
548#ifndef __tlb_remove_tlb_entry
549#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
550#endif
551
552
553
554
555
556
557
558
559#define tlb_remove_tlb_entry(tlb, ptep, address) \
560 do { \
561 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
562 __tlb_remove_tlb_entry(tlb, ptep, address); \
563 } while (0)
564
565#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
566 do { \
567 unsigned long _sz = huge_page_size(h); \
568 if (_sz == PMD_SIZE) \
569 tlb_flush_pmd_range(tlb, address, _sz); \
570 else if (_sz == PUD_SIZE) \
571 tlb_flush_pud_range(tlb, address, _sz); \
572 __tlb_remove_tlb_entry(tlb, ptep, address); \
573 } while (0)
574
575
576
577
578
579#ifndef __tlb_remove_pmd_tlb_entry
580#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
581#endif
582
583#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
584 do { \
585 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
586 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
587 } while (0)
588
589
590
591
592
593#ifndef __tlb_remove_pud_tlb_entry
594#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
595#endif
596
597#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
598 do { \
599 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
600 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
601 } while (0)
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621#ifndef pte_free_tlb
622#define pte_free_tlb(tlb, ptep, address) \
623 do { \
624 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
625 tlb->freed_tables = 1; \
626 __pte_free_tlb(tlb, ptep, address); \
627 } while (0)
628#endif
629
630#ifndef pmd_free_tlb
631#define pmd_free_tlb(tlb, pmdp, address) \
632 do { \
633 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
634 tlb->freed_tables = 1; \
635 __pmd_free_tlb(tlb, pmdp, address); \
636 } while (0)
637#endif
638
639#ifndef pud_free_tlb
640#define pud_free_tlb(tlb, pudp, address) \
641 do { \
642 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
643 tlb->freed_tables = 1; \
644 __pud_free_tlb(tlb, pudp, address); \
645 } while (0)
646#endif
647
648#ifndef p4d_free_tlb
649#define p4d_free_tlb(tlb, pudp, address) \
650 do { \
651 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
652 tlb->freed_tables = 1; \
653 __p4d_free_tlb(tlb, pudp, address); \
654 } while (0)
655#endif
656
657#endif
658
659#endif
660