1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
16#include <asm/pgalloc.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20
21
22
23
24
25#ifndef nmi_uaccess_okay
26# define nmi_uaccess_okay() true
27#endif
28
29#ifdef CONFIG_MMU
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152#ifdef CONFIG_HAVE_RCU_TABLE_FREE
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181struct mmu_table_batch {
182 struct rcu_head rcu;
183 unsigned int nr;
184 void *tables[0];
185};
186
187#define MAX_TABLE_BATCH \
188 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
189
190extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
191
192#endif
193
194#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
195
196
197
198
199#define MMU_GATHER_BUNDLE 8
200
201struct mmu_gather_batch {
202 struct mmu_gather_batch *next;
203 unsigned int nr;
204 unsigned int max;
205 struct page *pages[0];
206};
207
208#define MAX_GATHER_BATCH \
209 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
210
211
212
213
214
215
216
217#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
218
219extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
220 int page_size);
221#endif
222
223
224
225
226
227struct mmu_gather {
228 struct mm_struct *mm;
229
230#ifdef CONFIG_HAVE_RCU_TABLE_FREE
231 struct mmu_table_batch *batch;
232#endif
233
234 unsigned long start;
235 unsigned long end;
236
237
238
239
240 unsigned int fullmm : 1;
241
242
243
244
245
246 unsigned int need_flush_all : 1;
247
248
249
250
251 unsigned int freed_tables : 1;
252
253
254
255
256 unsigned int cleared_ptes : 1;
257 unsigned int cleared_pmds : 1;
258 unsigned int cleared_puds : 1;
259 unsigned int cleared_p4ds : 1;
260
261
262
263
264 unsigned int vma_exec : 1;
265 unsigned int vma_huge : 1;
266
267 unsigned int batch_count;
268
269#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
270 struct mmu_gather_batch *active;
271 struct mmu_gather_batch local;
272 struct page *__pages[MMU_GATHER_BUNDLE];
273
274#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
275 unsigned int page_size;
276#endif
277#endif
278};
279
280void arch_tlb_gather_mmu(struct mmu_gather *tlb,
281 struct mm_struct *mm, unsigned long start, unsigned long end);
282void tlb_flush_mmu(struct mmu_gather *tlb);
283void arch_tlb_finish_mmu(struct mmu_gather *tlb,
284 unsigned long start, unsigned long end, bool force);
285
286static inline void __tlb_adjust_range(struct mmu_gather *tlb,
287 unsigned long address,
288 unsigned int range_size)
289{
290 tlb->start = min(tlb->start, address);
291 tlb->end = max(tlb->end, address + range_size);
292}
293
294static inline void __tlb_reset_range(struct mmu_gather *tlb)
295{
296 if (tlb->fullmm) {
297 tlb->start = tlb->end = ~0;
298 } else {
299 tlb->start = TASK_SIZE;
300 tlb->end = 0;
301 }
302 tlb->freed_tables = 0;
303 tlb->cleared_ptes = 0;
304 tlb->cleared_pmds = 0;
305 tlb->cleared_puds = 0;
306 tlb->cleared_p4ds = 0;
307
308
309
310
311
312}
313
314#ifdef CONFIG_MMU_GATHER_NO_RANGE
315
316#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
317#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
318#endif
319
320
321
322
323
324
325
326
327
328static inline void tlb_flush(struct mmu_gather *tlb)
329{
330 if (tlb->end)
331 flush_tlb_mm(tlb->mm);
332}
333
334static inline void
335tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
336
337#define tlb_end_vma tlb_end_vma
338static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
339
340#else
341
342#ifndef tlb_flush
343
344#if defined(tlb_start_vma) || defined(tlb_end_vma)
345#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
346#endif
347
348
349
350
351
352
353static inline void tlb_flush(struct mmu_gather *tlb)
354{
355 if (tlb->fullmm || tlb->need_flush_all) {
356 flush_tlb_mm(tlb->mm);
357 } else if (tlb->end) {
358 struct vm_area_struct vma = {
359 .vm_mm = tlb->mm,
360 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
361 (tlb->vma_huge ? VM_HUGETLB : 0),
362 };
363
364 flush_tlb_range(&vma, tlb->start, tlb->end);
365 }
366}
367
368static inline void
369tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
370{
371
372
373
374
375
376
377
378
379
380
381
382 tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
383 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
384}
385
386#else
387
388static inline void
389tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
390
391#endif
392
393#endif
394
395static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
396{
397 if (!tlb->end)
398 return;
399
400 tlb_flush(tlb);
401 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
402 __tlb_reset_range(tlb);
403}
404
405static inline void tlb_remove_page_size(struct mmu_gather *tlb,
406 struct page *page, int page_size)
407{
408 if (__tlb_remove_page_size(tlb, page, page_size))
409 tlb_flush_mmu(tlb);
410}
411
412static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
413{
414 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
415}
416
417
418
419
420
421static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
422{
423 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
424}
425
426static inline void tlb_change_page_size(struct mmu_gather *tlb,
427 unsigned int page_size)
428{
429#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
430 if (tlb->page_size && tlb->page_size != page_size) {
431 if (!tlb->fullmm)
432 tlb_flush_mmu(tlb);
433 }
434
435 tlb->page_size = page_size;
436#endif
437}
438
439static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
440{
441 if (tlb->cleared_ptes)
442 return PAGE_SHIFT;
443 if (tlb->cleared_pmds)
444 return PMD_SHIFT;
445 if (tlb->cleared_puds)
446 return PUD_SHIFT;
447 if (tlb->cleared_p4ds)
448 return P4D_SHIFT;
449
450 return PAGE_SHIFT;
451}
452
453static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
454{
455 return 1UL << tlb_get_unmap_shift(tlb);
456}
457
458
459
460
461
462
463#ifndef tlb_start_vma
464static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
465{
466 if (tlb->fullmm)
467 return;
468
469 tlb_update_vma_flags(tlb, vma);
470 flush_cache_range(vma, vma->vm_start, vma->vm_end);
471}
472#endif
473
474#ifndef tlb_end_vma
475static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
476{
477 if (tlb->fullmm)
478 return;
479
480
481
482
483
484
485
486 tlb_flush_mmu_tlbonly(tlb);
487}
488#endif
489
490#ifndef __tlb_remove_tlb_entry
491#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
492#endif
493
494
495
496
497
498
499
500
501#define tlb_remove_tlb_entry(tlb, ptep, address) \
502 do { \
503 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
504 tlb->cleared_ptes = 1; \
505 __tlb_remove_tlb_entry(tlb, ptep, address); \
506 } while (0)
507
508#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
509 do { \
510 unsigned long _sz = huge_page_size(h); \
511 __tlb_adjust_range(tlb, address, _sz); \
512 if (_sz == PMD_SIZE) \
513 tlb->cleared_pmds = 1; \
514 else if (_sz == PUD_SIZE) \
515 tlb->cleared_puds = 1; \
516 __tlb_remove_tlb_entry(tlb, ptep, address); \
517 } while (0)
518
519
520
521
522
523#ifndef __tlb_remove_pmd_tlb_entry
524#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
525#endif
526
527#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
528 do { \
529 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
530 tlb->cleared_pmds = 1; \
531 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
532 } while (0)
533
534
535
536
537
538#ifndef __tlb_remove_pud_tlb_entry
539#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
540#endif
541
542#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
543 do { \
544 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
545 tlb->cleared_puds = 1; \
546 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
547 } while (0)
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567#ifndef pte_free_tlb
568#define pte_free_tlb(tlb, ptep, address) \
569 do { \
570 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
571 tlb->freed_tables = 1; \
572 tlb->cleared_pmds = 1; \
573 __pte_free_tlb(tlb, ptep, address); \
574 } while (0)
575#endif
576
577#ifndef pmd_free_tlb
578#define pmd_free_tlb(tlb, pmdp, address) \
579 do { \
580 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
581 tlb->freed_tables = 1; \
582 tlb->cleared_puds = 1; \
583 __pmd_free_tlb(tlb, pmdp, address); \
584 } while (0)
585#endif
586
587#ifndef __ARCH_HAS_4LEVEL_HACK
588#ifndef pud_free_tlb
589#define pud_free_tlb(tlb, pudp, address) \
590 do { \
591 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
592 tlb->freed_tables = 1; \
593 tlb->cleared_p4ds = 1; \
594 __pud_free_tlb(tlb, pudp, address); \
595 } while (0)
596#endif
597#endif
598
599#ifndef __ARCH_HAS_5LEVEL_HACK
600#ifndef p4d_free_tlb
601#define p4d_free_tlb(tlb, pudp, address) \
602 do { \
603 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
604 tlb->freed_tables = 1; \
605 __p4d_free_tlb(tlb, pudp, address); \
606 } while (0)
607#endif
608#endif
609
610#endif
611
612#endif
613