1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/highmem.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/spinlock.h>
24#include <linux/cpumask.h>
25#include <linux/module.h>
26#include <linux/io.h>
27#include <linux/vmalloc.h>
28#include <linux/smp.h>
29
30#include <asm/pgtable.h>
31#include <asm/pgalloc.h>
32#include <asm/fixmap.h>
33#include <asm/tlb.h>
34#include <asm/tlbflush.h>
35#include <asm/homecache.h>
36
37#define K(x) ((x) << (PAGE_SHIFT-10))
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53void shatter_huge_page(unsigned long addr)
54{
55 pgd_t *pgd;
56 pud_t *pud;
57 pmd_t *pmd;
58 unsigned long flags = 0;
59#ifdef __PAGETABLE_PMD_FOLDED
60 struct list_head *pos;
61#endif
62
63
64 addr &= HPAGE_MASK;
65 BUG_ON(pgd_addr_invalid(addr));
66 BUG_ON(addr < PAGE_OFFSET);
67 pgd = swapper_pg_dir + pgd_index(addr);
68 pud = pud_offset(pgd, addr);
69 BUG_ON(!pud_present(*pud));
70 pmd = pmd_offset(pud, addr);
71 BUG_ON(!pmd_present(*pmd));
72 if (!pmd_huge_page(*pmd))
73 return;
74
75 spin_lock_irqsave(&init_mm.page_table_lock, flags);
76 if (!pmd_huge_page(*pmd)) {
77
78 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
79 return;
80 }
81
82
83 pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
84
85#ifdef __PAGETABLE_PMD_FOLDED
86
87 spin_lock(&pgd_lock);
88 list_for_each(pos, &pgd_list) {
89 pmd_t *copy_pmd;
90 pgd = list_to_pgd(pos) + pgd_index(addr);
91 pud = pud_offset(pgd, addr);
92 copy_pmd = pmd_offset(pud, addr);
93 __set_pmd(copy_pmd, *pmd);
94 }
95 spin_unlock(&pgd_lock);
96#endif
97
98
99 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
100 cpu_possible_mask, NULL, 0);
101
102
103 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
104}
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120DEFINE_SPINLOCK(pgd_lock);
121LIST_HEAD(pgd_list);
122
123static inline void pgd_list_add(pgd_t *pgd)
124{
125 list_add(pgd_to_list(pgd), &pgd_list);
126}
127
128static inline void pgd_list_del(pgd_t *pgd)
129{
130 list_del(pgd_to_list(pgd));
131}
132
133#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
134#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
135
136static void pgd_ctor(pgd_t *pgd)
137{
138 unsigned long flags;
139
140 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
141 spin_lock_irqsave(&pgd_lock, flags);
142
143#ifndef __tilegx__
144
145
146
147
148
149 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
150#endif
151
152 memcpy(pgd + KERNEL_PGD_INDEX_START,
153 swapper_pg_dir + KERNEL_PGD_INDEX_START,
154 KERNEL_PGD_PTRS * sizeof(pgd_t));
155
156 pgd_list_add(pgd);
157 spin_unlock_irqrestore(&pgd_lock, flags);
158}
159
160static void pgd_dtor(pgd_t *pgd)
161{
162 unsigned long flags;
163
164 spin_lock_irqsave(&pgd_lock, flags);
165 pgd_list_del(pgd);
166 spin_unlock_irqrestore(&pgd_lock, flags);
167}
168
169pgd_t *pgd_alloc(struct mm_struct *mm)
170{
171 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
172 if (pgd)
173 pgd_ctor(pgd);
174 return pgd;
175}
176
177void pgd_free(struct mm_struct *mm, pgd_t *pgd)
178{
179 pgd_dtor(pgd);
180 kmem_cache_free(pgd_cache, pgd);
181}
182
183
184#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
185
186struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
187 int order)
188{
189 gfp_t flags = GFP_KERNEL|__GFP_ZERO;
190 struct page *p;
191 int i;
192
193 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
194 if (p == NULL)
195 return NULL;
196
197 if (!pgtable_page_ctor(p)) {
198 __free_pages(p, L2_USER_PGTABLE_ORDER);
199 return NULL;
200 }
201
202
203
204
205
206
207 for (i = 1; i < order; ++i) {
208 init_page_count(p+i);
209 inc_zone_page_state(p+i, NR_PAGETABLE);
210 }
211
212 return p;
213}
214
215
216
217
218
219
220void pgtable_free(struct mm_struct *mm, struct page *p, int order)
221{
222 int i;
223
224 pgtable_page_dtor(p);
225 __free_page(p);
226
227 for (i = 1; i < order; ++i) {
228 __free_page(p+i);
229 dec_zone_page_state(p+i, NR_PAGETABLE);
230 }
231}
232
233void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
234 unsigned long address, int order)
235{
236 int i;
237
238 pgtable_page_dtor(pte);
239 tlb_remove_page(tlb, pte);
240
241 for (i = 1; i < order; ++i) {
242 tlb_remove_page(tlb, pte + i);
243 dec_zone_page_state(pte + i, NR_PAGETABLE);
244 }
245}
246
247#ifndef __tilegx__
248
249
250
251
252
253int ptep_test_and_clear_young(struct vm_area_struct *vma,
254 unsigned long addr, pte_t *ptep)
255{
256#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
257# error Code assumes HV_PTE "accessed" bit in second byte
258#endif
259 u8 *tmp = (u8 *)ptep;
260 u8 second_byte = tmp[1];
261 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
262 return 0;
263 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
264 return 1;
265}
266
267
268
269
270
271
272void ptep_set_wrprotect(struct mm_struct *mm,
273 unsigned long addr, pte_t *ptep)
274{
275#if HV_PTE_INDEX_WRITABLE < 32
276# error Code assumes HV_PTE "writable" bit in high word
277#endif
278 u32 *tmp = (u32 *)ptep;
279 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
280}
281
282#endif
283
284
285
286
287
288
289
290
291
292
293
294
295pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
296{
297 pgd_t *pgd;
298 pud_t *pud;
299 pmd_t *pmd;
300
301 if (pgd_addr_invalid(addr))
302 return NULL;
303
304 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
305 pud = pud_offset(pgd, addr);
306 if (!pud_present(*pud))
307 return NULL;
308 if (pud_huge_page(*pud))
309 return (pte_t *)pud;
310 pmd = pmd_offset(pud, addr);
311 if (!pmd_present(*pmd))
312 return NULL;
313 if (pmd_huge_page(*pmd))
314 return (pte_t *)pmd;
315 return pte_offset_kernel(pmd, addr);
316}
317EXPORT_SYMBOL(virt_to_pte);
318
319pte_t *virt_to_kpte(unsigned long kaddr)
320{
321 BUG_ON(kaddr < PAGE_OFFSET);
322 return virt_to_pte(NULL, kaddr);
323}
324EXPORT_SYMBOL(virt_to_kpte);
325
326pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
327{
328 unsigned int width = smp_width;
329 int x = cpu % width;
330 int y = cpu / width;
331 BUG_ON(y >= smp_height);
332 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
333 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
334 BUG_ON(!cpu_is_valid_lotar(cpu));
335 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
336}
337
338int get_remote_cache_cpu(pgprot_t prot)
339{
340 HV_LOTAR lotar = hv_pte_get_lotar(prot);
341 int x = HV_LOTAR_X(lotar);
342 int y = HV_LOTAR_Y(lotar);
343 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
344 return x + y * smp_width;
345}
346
347
348
349
350int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
351{
352 struct page *page = virt_to_page(va);
353 pte_t null_pte = { 0 };
354
355 *cpa = __pa(va);
356
357
358 *pte = pte_set_home(null_pte, page_home(page));
359
360 return 0;
361}
362EXPORT_SYMBOL(va_to_cpa_and_pte);
363
364void __set_pte(pte_t *ptep, pte_t pte)
365{
366#ifdef __tilegx__
367 *ptep = pte;
368#else
369# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
370# error Must write the present and migrating bits last
371# endif
372 if (pte_present(pte)) {
373 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
374 barrier();
375 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
376 } else {
377 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
378 barrier();
379 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
380 }
381#endif
382}
383
384void set_pte(pte_t *ptep, pte_t pte)
385{
386 if (pte_present(pte) &&
387 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
388
389 unsigned long pfn = pte_pfn(pte);
390 if (pfn_valid(pfn)) {
391
392 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
393 } else if (hv_pte_get_mode(pte) == 0) {
394
395 panic("set_pte(): out-of-range PFN and mode 0\n");
396 }
397 }
398
399 __set_pte(ptep, pte);
400}
401
402
403static inline int mm_is_priority_cached(struct mm_struct *mm)
404{
405 return mm->context.priority_cached != 0;
406}
407
408
409
410
411
412void start_mm_caching(struct mm_struct *mm)
413{
414 if (!mm_is_priority_cached(mm)) {
415 mm->context.priority_cached = -1UL;
416 hv_set_caching(-1UL);
417 }
418}
419
420
421
422
423
424
425
426
427
428
429
430
431static unsigned long update_priority_cached(struct mm_struct *mm)
432{
433 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
434 struct vm_area_struct *vm;
435 for (vm = mm->mmap; vm; vm = vm->vm_next) {
436 if (hv_pte_get_cached_priority(vm->vm_page_prot))
437 break;
438 }
439 if (vm == NULL)
440 mm->context.priority_cached = 0;
441 up_write(&mm->mmap_sem);
442 }
443 return mm->context.priority_cached;
444}
445
446
447void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
448{
449 if (!mm_is_priority_cached(next)) {
450
451
452
453
454 if (mm_is_priority_cached(prev))
455 hv_set_caching(0);
456 } else {
457 hv_set_caching(update_priority_cached(next));
458 }
459}
460
461#if CHIP_HAS_MMIO()
462
463
464void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
465 pgprot_t home)
466{
467 void *addr;
468 struct vm_struct *area;
469 unsigned long offset, last_addr;
470 pgprot_t pgprot;
471
472
473 last_addr = phys_addr + size - 1;
474 if (!size || last_addr < phys_addr)
475 return NULL;
476
477
478 pgprot = PAGE_KERNEL;
479 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
480 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
481
482
483
484
485 offset = phys_addr & ~PAGE_MASK;
486 phys_addr &= PAGE_MASK;
487 size = PAGE_ALIGN(last_addr+1) - phys_addr;
488
489
490
491
492 area = get_vm_area(size, VM_IOREMAP );
493 if (!area)
494 return NULL;
495 area->phys_addr = phys_addr;
496 addr = area->addr;
497 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
498 phys_addr, pgprot)) {
499 free_vm_area(area);
500 return NULL;
501 }
502 return (__force void __iomem *) (offset + (char *)addr);
503}
504EXPORT_SYMBOL(ioremap_prot);
505
506#if !defined(CONFIG_PCI) || !defined(CONFIG_TILEGX)
507
508
509void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
510{
511 return NULL;
512}
513EXPORT_SYMBOL(ioremap);
514
515#endif
516
517
518void iounmap(volatile void __iomem *addr_in)
519{
520 volatile void __iomem *addr = (volatile void __iomem *)
521 (PAGE_MASK & (unsigned long __force)addr_in);
522#if 1
523 vunmap((void * __force)addr);
524#else
525
526
527 struct vm_struct *p, *o;
528
529
530
531
532
533
534 p = find_vm_area((void *)addr);
535
536 if (!p) {
537 pr_err("iounmap: bad address %p\n", addr);
538 dump_stack();
539 return;
540 }
541
542
543 o = remove_vm_area((void *)addr);
544 BUG_ON(p != o || o == NULL);
545 kfree(p);
546#endif
547}
548EXPORT_SYMBOL(iounmap);
549
550#endif
551