1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/list.h>
21#include <linux/bootmem.h>
22#include <linux/rmap.h>
23#include <linux/pagemap.h>
24#include <linux/mutex.h>
25#include <linux/interrupt.h>
26#include <linux/sysctl.h>
27#include <linux/pagevec.h>
28#include <linux/ptrace.h>
29#include <linux/timex.h>
30#include <linux/cache.h>
31#include <linux/smp.h>
32#include <linux/module.h>
33#include <linux/hugetlb.h>
34
35#include <asm/page.h>
36#include <asm/sections.h>
37#include <asm/tlbflush.h>
38#include <asm/pgalloc.h>
39#include <asm/homecache.h>
40
41#include <arch/sim.h>
42
43#include "migrate.h"
44
45
46
47
48
49
50static int __write_once noallocl2;
51static int __init set_noallocl2(char *str)
52{
53 noallocl2 = 1;
54 return 0;
55}
56early_param("noallocl2", set_noallocl2);
57
58
59
60
61
62
63
64static void hv_flush_update(const struct cpumask *cache_cpumask,
65 struct cpumask *tlb_cpumask,
66 unsigned long tlb_va, unsigned long tlb_length,
67 HV_Remote_ASID *asids, int asidcount)
68{
69 struct cpumask mask;
70 int i, cpu;
71
72 cpumask_clear(&mask);
73 if (cache_cpumask)
74 cpumask_or(&mask, &mask, cache_cpumask);
75 if (tlb_cpumask && tlb_length) {
76 cpumask_or(&mask, &mask, tlb_cpumask);
77 }
78
79 for (i = 0; i < asidcount; ++i)
80 cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
81
82
83
84
85
86 for_each_cpu(cpu, &mask)
87 ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
108 const struct cpumask *cache_cpumask_orig,
109 HV_VirtAddr tlb_va, unsigned long tlb_length,
110 unsigned long tlb_pgsize,
111 const struct cpumask *tlb_cpumask_orig,
112 HV_Remote_ASID *asids, int asidcount)
113{
114 int rc;
115 struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
116 struct cpumask *cache_cpumask, *tlb_cpumask;
117 HV_PhysAddr cache_pa;
118 char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
119
120 mb();
121
122
123
124
125 if (cache_cpumask_orig && cache_control) {
126 cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
127 cache_cpumask = &cache_cpumask_copy;
128 } else {
129 cpumask_clear(&cache_cpumask_copy);
130 cache_cpumask = NULL;
131 }
132 if (cache_cpumask == NULL)
133 cache_control = 0;
134 if (tlb_cpumask_orig && tlb_length) {
135 cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
136 tlb_cpumask = &tlb_cpumask_copy;
137 } else {
138 cpumask_clear(&tlb_cpumask_copy);
139 tlb_cpumask = NULL;
140 }
141
142 hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
143 asids, asidcount);
144 cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
145 rc = hv_flush_remote(cache_pa, cache_control,
146 cpumask_bits(cache_cpumask),
147 tlb_va, tlb_length, tlb_pgsize,
148 cpumask_bits(tlb_cpumask),
149 asids, asidcount);
150 if (rc == 0)
151 return;
152 cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
153 cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
154
155 pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
156 cache_pa, cache_control, cache_cpumask, cache_buf,
157 (unsigned long)tlb_va, tlb_length, tlb_pgsize,
158 tlb_cpumask, tlb_buf, asids, asidcount, rc);
159 panic("Unsafe to continue.");
160}
161
162static void homecache_finv_page_va(void* va, int home)
163{
164 int cpu = get_cpu();
165 if (home == cpu) {
166 finv_buffer_local(va, PAGE_SIZE);
167 } else if (home == PAGE_HOME_HASH) {
168 finv_buffer_remote(va, PAGE_SIZE, 1);
169 } else {
170 BUG_ON(home < 0 || home >= NR_CPUS);
171 finv_buffer_remote(va, PAGE_SIZE, 0);
172 }
173 put_cpu();
174}
175
176void homecache_finv_map_page(struct page *page, int home)
177{
178 unsigned long flags;
179 unsigned long va;
180 pte_t *ptep;
181 pte_t pte;
182
183 if (home == PAGE_HOME_UNCACHED)
184 return;
185 local_irq_save(flags);
186#ifdef CONFIG_HIGHMEM
187 va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
188 (KM_TYPE_NR * smp_processor_id()));
189#else
190 va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
191#endif
192 ptep = virt_to_kpte(va);
193 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
194 __set_pte(ptep, pte_set_home(pte, home));
195 homecache_finv_page_va((void *)va, home);
196 __pte_clear(ptep);
197 hv_flush_page(va, PAGE_SIZE);
198#ifdef CONFIG_HIGHMEM
199 kmap_atomic_idx_pop();
200#endif
201 local_irq_restore(flags);
202}
203
204static void homecache_finv_page_home(struct page *page, int home)
205{
206 if (!PageHighMem(page) && home == page_home(page))
207 homecache_finv_page_va(page_address(page), home);
208 else
209 homecache_finv_map_page(page, home);
210}
211
212static inline bool incoherent_home(int home)
213{
214 return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
215}
216
217static void homecache_finv_page_internal(struct page *page, int force_map)
218{
219 int home = page_home(page);
220 if (home == PAGE_HOME_UNCACHED)
221 return;
222 if (incoherent_home(home)) {
223 int cpu;
224 for_each_cpu(cpu, &cpu_cacheable_map)
225 homecache_finv_map_page(page, cpu);
226 } else if (force_map) {
227
228 homecache_finv_map_page(page, home);
229 } else {
230 homecache_finv_page_home(page, home);
231 }
232 sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
233}
234
235void homecache_finv_page(struct page *page)
236{
237 homecache_finv_page_internal(page, 0);
238}
239
240void homecache_evict(const struct cpumask *mask)
241{
242 flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
243}
244
245
246static int pte_to_home(pte_t pte)
247{
248 if (hv_pte_get_nc(pte))
249 return PAGE_HOME_IMMUTABLE;
250 switch (hv_pte_get_mode(pte)) {
251 case HV_PTE_MODE_CACHE_TILE_L3:
252 return get_remote_cache_cpu(pte);
253 case HV_PTE_MODE_CACHE_NO_L3:
254 return PAGE_HOME_INCOHERENT;
255 case HV_PTE_MODE_UNCACHED:
256 return PAGE_HOME_UNCACHED;
257 case HV_PTE_MODE_CACHE_HASH_L3:
258 return PAGE_HOME_HASH;
259 }
260 panic("Bad PTE %#llx\n", pte.val);
261}
262
263
264pte_t pte_set_home(pte_t pte, int home)
265{
266
267 if (pte_file(pte))
268 return pte;
269
270#if CHIP_HAS_MMIO()
271
272 if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
273 return pte;
274#endif
275
276
277
278
279
280
281
282
283
284
285 if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
286 pte = hv_pte_clear_nc(pte);
287 pr_err("non-immutable page incoherently referenced: %#llx\n",
288 pte.val);
289 }
290
291 switch (home) {
292
293 case PAGE_HOME_UNCACHED:
294 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
295 break;
296
297 case PAGE_HOME_INCOHERENT:
298 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
299 break;
300
301 case PAGE_HOME_IMMUTABLE:
302
303
304
305
306 BUG_ON(hv_pte_get_writable(pte));
307 if (pte_get_forcecache(pte)) {
308
309 if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
310 && pte_get_anyhome(pte)) {
311 pte = hv_pte_set_mode(pte,
312 HV_PTE_MODE_CACHE_NO_L3);
313 }
314 } else
315 if (hash_default)
316 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
317 else
318 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
319 pte = hv_pte_set_nc(pte);
320 break;
321
322 case PAGE_HOME_HASH:
323 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
324 break;
325
326 default:
327 BUG_ON(home < 0 || home >= NR_CPUS ||
328 !cpu_is_valid_lotar(home));
329 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
330 pte = set_remote_cache_cpu(pte, home);
331 break;
332 }
333
334 if (noallocl2)
335 pte = hv_pte_set_no_alloc_l2(pte);
336
337
338 if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
339 hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
340 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
341 }
342
343
344 BUG_ON(hv_pte_get_mode(pte) == 0);
345
346 return pte;
347}
348EXPORT_SYMBOL(pte_set_home);
349
350
351
352
353
354
355
356
357int page_home(struct page *page)
358{
359 if (PageHighMem(page)) {
360 return PAGE_HOME_HASH;
361 } else {
362 unsigned long kva = (unsigned long)page_address(page);
363 return pte_to_home(*virt_to_kpte(kva));
364 }
365}
366EXPORT_SYMBOL(page_home);
367
368void homecache_change_page_home(struct page *page, int order, int home)
369{
370 int i, pages = (1 << order);
371 unsigned long kva;
372
373 BUG_ON(PageHighMem(page));
374 BUG_ON(page_count(page) > 1);
375 BUG_ON(page_mapcount(page) != 0);
376 kva = (unsigned long) page_address(page);
377 flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
378 kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
379 NULL, 0);
380
381 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
382 pte_t *ptep = virt_to_kpte(kva);
383 pte_t pteval = *ptep;
384 BUG_ON(!pte_present(pteval) || pte_huge(pteval));
385 __set_pte(ptep, pte_set_home(pteval, home));
386 }
387}
388EXPORT_SYMBOL(homecache_change_page_home);
389
390struct page *homecache_alloc_pages(gfp_t gfp_mask,
391 unsigned int order, int home)
392{
393 struct page *page;
394 BUG_ON(gfp_mask & __GFP_HIGHMEM);
395 page = alloc_pages(gfp_mask, order);
396 if (page)
397 homecache_change_page_home(page, order, home);
398 return page;
399}
400EXPORT_SYMBOL(homecache_alloc_pages);
401
402struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
403 unsigned int order, int home)
404{
405 struct page *page;
406 BUG_ON(gfp_mask & __GFP_HIGHMEM);
407 page = alloc_pages_node(nid, gfp_mask, order);
408 if (page)
409 homecache_change_page_home(page, order, home);
410 return page;
411}
412
413void __homecache_free_pages(struct page *page, unsigned int order)
414{
415 if (put_page_testzero(page)) {
416 homecache_change_page_home(page, order, PAGE_HOME_HASH);
417 if (order == 0) {
418 free_hot_cold_page(page, false);
419 } else {
420 init_page_count(page);
421 __free_pages(page, order);
422 }
423 }
424}
425EXPORT_SYMBOL(__homecache_free_pages);
426
427void homecache_free_pages(unsigned long addr, unsigned int order)
428{
429 if (addr != 0) {
430 VM_BUG_ON(!virt_addr_valid((void *)addr));
431 __homecache_free_pages(virt_to_page((void *)addr), order);
432 }
433}
434EXPORT_SYMBOL(homecache_free_pages);
435