1
2
3
4
5
6#include <asm/cacheflush.h>
7
8#ifdef CONFIG_SMP
9
10#include <asm/sbi.h>
11
12static void ipi_remote_fence_i(void *info)
13{
14 return local_flush_icache_all();
15}
16
17void flush_icache_all(void)
18{
19 if (IS_ENABLED(CONFIG_RISCV_SBI))
20 sbi_remote_fence_i(NULL);
21 else
22 on_each_cpu(ipi_remote_fence_i, NULL, 1);
23}
24EXPORT_SYMBOL(flush_icache_all);
25
26
27
28
29
30
31
32
33
34
35
36void flush_icache_mm(struct mm_struct *mm, bool local)
37{
38 unsigned int cpu;
39 cpumask_t others, *mask;
40
41 preempt_disable();
42
43
44 mask = &mm->context.icache_stale_mask;
45 cpumask_setall(mask);
46
47 cpu = smp_processor_id();
48 cpumask_clear_cpu(cpu, mask);
49 local_flush_icache_all();
50
51
52
53
54
55 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
56 local |= cpumask_empty(&others);
57 if (mm == current->active_mm && local) {
58
59
60
61
62
63
64
65
66 smp_mb();
67 } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
68 cpumask_t hartid_mask;
69
70 riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
71 sbi_remote_fence_i(cpumask_bits(&hartid_mask));
72 } else {
73 on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
74 }
75
76 preempt_enable();
77}
78
79#endif
80
81#ifdef CONFIG_MMU
82void flush_icache_pte(pte_t pte)
83{
84 struct page *page = pte_page(pte);
85
86 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
87 flush_icache_all();
88}
89#endif
90