1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/highmem.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/spinlock.h>
24#include <linux/cpumask.h>
25#include <linux/module.h>
26#include <linux/io.h>
27#include <linux/vmalloc.h>
28#include <linux/smp.h>
29
30#include <asm/pgtable.h>
31#include <asm/pgalloc.h>
32#include <asm/fixmap.h>
33#include <asm/tlb.h>
34#include <asm/tlbflush.h>
35#include <asm/homecache.h>
36
37#define K(x) ((x) << (PAGE_SHIFT-10))
38
39
40
41
42
43void show_mem(unsigned int filter)
44{
45 struct zone *zone;
46
47 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
48 (global_page_state(NR_ACTIVE_ANON) +
49 global_page_state(NR_ACTIVE_FILE)),
50 (global_page_state(NR_INACTIVE_ANON) +
51 global_page_state(NR_INACTIVE_FILE)),
52 global_page_state(NR_FILE_DIRTY),
53 global_page_state(NR_WRITEBACK),
54 global_page_state(NR_UNSTABLE_NFS),
55 global_page_state(NR_FREE_PAGES),
56 (global_page_state(NR_SLAB_RECLAIMABLE) +
57 global_page_state(NR_SLAB_UNRECLAIMABLE)),
58 global_page_state(NR_FILE_MAPPED),
59 global_page_state(NR_PAGETABLE),
60 global_page_state(NR_BOUNCE),
61 global_page_state(NR_FILE_PAGES),
62 get_nr_swap_pages());
63
64 for_each_zone(zone) {
65 unsigned long flags, order, total = 0, largest_order = -1;
66
67 if (!populated_zone(zone))
68 continue;
69
70 spin_lock_irqsave(&zone->lock, flags);
71 for (order = 0; order < MAX_ORDER; order++) {
72 int nr = zone->free_area[order].nr_free;
73 total += nr << order;
74 if (nr)
75 largest_order = order;
76 }
77 spin_unlock_irqrestore(&zone->lock, flags);
78 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
79 zone_to_nid(zone), zone->name,
80 K(total), largest_order ? K(1UL) << largest_order : 0);
81 }
82}
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98void shatter_huge_page(unsigned long addr)
99{
100 pgd_t *pgd;
101 pud_t *pud;
102 pmd_t *pmd;
103 unsigned long flags = 0;
104#ifdef __PAGETABLE_PMD_FOLDED
105 struct list_head *pos;
106#endif
107
108
109 addr &= HPAGE_MASK;
110 BUG_ON(pgd_addr_invalid(addr));
111 BUG_ON(addr < PAGE_OFFSET);
112 pgd = swapper_pg_dir + pgd_index(addr);
113 pud = pud_offset(pgd, addr);
114 BUG_ON(!pud_present(*pud));
115 pmd = pmd_offset(pud, addr);
116 BUG_ON(!pmd_present(*pmd));
117 if (!pmd_huge_page(*pmd))
118 return;
119
120 spin_lock_irqsave(&init_mm.page_table_lock, flags);
121 if (!pmd_huge_page(*pmd)) {
122
123 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
124 return;
125 }
126
127
128 pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
129
130#ifdef __PAGETABLE_PMD_FOLDED
131
132 spin_lock(&pgd_lock);
133 list_for_each(pos, &pgd_list) {
134 pmd_t *copy_pmd;
135 pgd = list_to_pgd(pos) + pgd_index(addr);
136 pud = pud_offset(pgd, addr);
137 copy_pmd = pmd_offset(pud, addr);
138 __set_pmd(copy_pmd, *pmd);
139 }
140 spin_unlock(&pgd_lock);
141#endif
142
143
144 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
145 cpu_possible_mask, NULL, 0);
146
147
148 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165DEFINE_SPINLOCK(pgd_lock);
166LIST_HEAD(pgd_list);
167
168static inline void pgd_list_add(pgd_t *pgd)
169{
170 list_add(pgd_to_list(pgd), &pgd_list);
171}
172
173static inline void pgd_list_del(pgd_t *pgd)
174{
175 list_del(pgd_to_list(pgd));
176}
177
178#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
179#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
180
181static void pgd_ctor(pgd_t *pgd)
182{
183 unsigned long flags;
184
185 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
186 spin_lock_irqsave(&pgd_lock, flags);
187
188#ifndef __tilegx__
189
190
191
192
193
194 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
195#endif
196
197 memcpy(pgd + KERNEL_PGD_INDEX_START,
198 swapper_pg_dir + KERNEL_PGD_INDEX_START,
199 KERNEL_PGD_PTRS * sizeof(pgd_t));
200
201 pgd_list_add(pgd);
202 spin_unlock_irqrestore(&pgd_lock, flags);
203}
204
205static void pgd_dtor(pgd_t *pgd)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&pgd_lock, flags);
210 pgd_list_del(pgd);
211 spin_unlock_irqrestore(&pgd_lock, flags);
212}
213
214pgd_t *pgd_alloc(struct mm_struct *mm)
215{
216 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
217 if (pgd)
218 pgd_ctor(pgd);
219 return pgd;
220}
221
222void pgd_free(struct mm_struct *mm, pgd_t *pgd)
223{
224 pgd_dtor(pgd);
225 kmem_cache_free(pgd_cache, pgd);
226}
227
228
229#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
230
231struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
232 int order)
233{
234 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
235 struct page *p;
236 int i;
237
238 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
239 if (p == NULL)
240 return NULL;
241
242 if (!pgtable_page_ctor(p)) {
243 __free_pages(p, L2_USER_PGTABLE_ORDER);
244 return NULL;
245 }
246
247
248
249
250
251
252 for (i = 1; i < order; ++i) {
253 init_page_count(p+i);
254 inc_zone_page_state(p+i, NR_PAGETABLE);
255 }
256
257 return p;
258}
259
260
261
262
263
264
265void pgtable_free(struct mm_struct *mm, struct page *p, int order)
266{
267 int i;
268
269 pgtable_page_dtor(p);
270 __free_page(p);
271
272 for (i = 1; i < order; ++i) {
273 __free_page(p+i);
274 dec_zone_page_state(p+i, NR_PAGETABLE);
275 }
276}
277
278void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
279 unsigned long address, int order)
280{
281 int i;
282
283 pgtable_page_dtor(pte);
284 tlb_remove_page(tlb, pte);
285
286 for (i = 1; i < order; ++i) {
287 tlb_remove_page(tlb, pte + i);
288 dec_zone_page_state(pte + i, NR_PAGETABLE);
289 }
290}
291
292#ifndef __tilegx__
293
294
295
296
297
298int ptep_test_and_clear_young(struct vm_area_struct *vma,
299 unsigned long addr, pte_t *ptep)
300{
301#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
302# error Code assumes HV_PTE "accessed" bit in second byte
303#endif
304 u8 *tmp = (u8 *)ptep;
305 u8 second_byte = tmp[1];
306 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
307 return 0;
308 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
309 return 1;
310}
311
312
313
314
315
316
317void ptep_set_wrprotect(struct mm_struct *mm,
318 unsigned long addr, pte_t *ptep)
319{
320#if HV_PTE_INDEX_WRITABLE < 32
321# error Code assumes HV_PTE "writable" bit in high word
322#endif
323 u32 *tmp = (u32 *)ptep;
324 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
325}
326
327#endif
328
329
330
331
332
333
334
335
336
337
338
339
340pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
341{
342 pgd_t *pgd;
343 pud_t *pud;
344 pmd_t *pmd;
345
346 if (pgd_addr_invalid(addr))
347 return NULL;
348
349 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
350 pud = pud_offset(pgd, addr);
351 if (!pud_present(*pud))
352 return NULL;
353 if (pud_huge_page(*pud))
354 return (pte_t *)pud;
355 pmd = pmd_offset(pud, addr);
356 if (!pmd_present(*pmd))
357 return NULL;
358 if (pmd_huge_page(*pmd))
359 return (pte_t *)pmd;
360 return pte_offset_kernel(pmd, addr);
361}
362EXPORT_SYMBOL(virt_to_pte);
363
364pte_t *virt_to_kpte(unsigned long kaddr)
365{
366 BUG_ON(kaddr < PAGE_OFFSET);
367 return virt_to_pte(NULL, kaddr);
368}
369EXPORT_SYMBOL(virt_to_kpte);
370
371pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
372{
373 unsigned int width = smp_width;
374 int x = cpu % width;
375 int y = cpu / width;
376 BUG_ON(y >= smp_height);
377 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
378 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
379 BUG_ON(!cpu_is_valid_lotar(cpu));
380 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
381}
382
383int get_remote_cache_cpu(pgprot_t prot)
384{
385 HV_LOTAR lotar = hv_pte_get_lotar(prot);
386 int x = HV_LOTAR_X(lotar);
387 int y = HV_LOTAR_Y(lotar);
388 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
389 return x + y * smp_width;
390}
391
392
393
394
395int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
396{
397 struct page *page = virt_to_page(va);
398 pte_t null_pte = { 0 };
399
400 *cpa = __pa(va);
401
402
403 *pte = pte_set_home(null_pte, page_home(page));
404
405 return 0;
406}
407EXPORT_SYMBOL(va_to_cpa_and_pte);
408
409void __set_pte(pte_t *ptep, pte_t pte)
410{
411#ifdef __tilegx__
412 *ptep = pte;
413#else
414# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
415# error Must write the present and migrating bits last
416# endif
417 if (pte_present(pte)) {
418 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
419 barrier();
420 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
421 } else {
422 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
423 barrier();
424 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
425 }
426#endif
427}
428
429void set_pte(pte_t *ptep, pte_t pte)
430{
431 if (pte_present(pte) &&
432 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
433
434 unsigned long pfn = pte_pfn(pte);
435 if (pfn_valid(pfn)) {
436
437 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
438 } else if (hv_pte_get_mode(pte) == 0) {
439
440 panic("set_pte(): out-of-range PFN and mode 0\n");
441 }
442 }
443
444 __set_pte(ptep, pte);
445}
446
447
448static inline int mm_is_priority_cached(struct mm_struct *mm)
449{
450 return mm->context.priority_cached != 0;
451}
452
453
454
455
456
457void start_mm_caching(struct mm_struct *mm)
458{
459 if (!mm_is_priority_cached(mm)) {
460 mm->context.priority_cached = -1UL;
461 hv_set_caching(-1UL);
462 }
463}
464
465
466
467
468
469
470
471
472
473
474
475
476static unsigned long update_priority_cached(struct mm_struct *mm)
477{
478 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
479 struct vm_area_struct *vm;
480 for (vm = mm->mmap; vm; vm = vm->vm_next) {
481 if (hv_pte_get_cached_priority(vm->vm_page_prot))
482 break;
483 }
484 if (vm == NULL)
485 mm->context.priority_cached = 0;
486 up_write(&mm->mmap_sem);
487 }
488 return mm->context.priority_cached;
489}
490
491
492void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
493{
494 if (!mm_is_priority_cached(next)) {
495
496
497
498
499 if (mm_is_priority_cached(prev))
500 hv_set_caching(0);
501 } else {
502 hv_set_caching(update_priority_cached(next));
503 }
504}
505
506#if CHIP_HAS_MMIO()
507
508
509void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
510 pgprot_t home)
511{
512 void *addr;
513 struct vm_struct *area;
514 unsigned long offset, last_addr;
515 pgprot_t pgprot;
516
517
518 last_addr = phys_addr + size - 1;
519 if (!size || last_addr < phys_addr)
520 return NULL;
521
522
523 pgprot = PAGE_KERNEL;
524 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
525 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
526
527
528
529
530 offset = phys_addr & ~PAGE_MASK;
531 phys_addr &= PAGE_MASK;
532 size = PAGE_ALIGN(last_addr+1) - phys_addr;
533
534
535
536
537 area = get_vm_area(size, VM_IOREMAP );
538 if (!area)
539 return NULL;
540 area->phys_addr = phys_addr;
541 addr = area->addr;
542 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
543 phys_addr, pgprot)) {
544 free_vm_area(area);
545 return NULL;
546 }
547 return (__force void __iomem *) (offset + (char *)addr);
548}
549EXPORT_SYMBOL(ioremap_prot);
550
551
552void iounmap(volatile void __iomem *addr_in)
553{
554 volatile void __iomem *addr = (volatile void __iomem *)
555 (PAGE_MASK & (unsigned long __force)addr_in);
556#if 1
557 vunmap((void * __force)addr);
558#else
559
560
561 struct vm_struct *p, *o;
562
563
564
565
566
567
568 p = find_vm_area((void *)addr);
569
570 if (!p) {
571 pr_err("iounmap: bad address %p\n", addr);
572 dump_stack();
573 return;
574 }
575
576
577 o = remove_vm_area((void *)addr);
578 BUG_ON(p != o || o == NULL);
579 kfree(p);
580#endif
581}
582EXPORT_SYMBOL(iounmap);
583
584#endif
585