1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __ISP_MMU_H__
24#define __ISP_MMU_H__
25
26#include <linux/types.h>
27#include <linux/mutex.h>
28#include <linux/slab.h>
29
30
31
32
33
34#define ISP_PAGE_OFFSET 12
35#define ISP_PAGE_SIZE BIT(ISP_PAGE_OFFSET)
36#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
37
38#define ISP_L1PT_OFFSET 22
39#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
40
41#define ISP_L2PT_OFFSET 12
42#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
43
44#define ISP_L1PT_PTES 1024
45#define ISP_L2PT_PTES 1024
46
47#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
48 >> ISP_L1PT_OFFSET)
49
50#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
51 >> ISP_L2PT_OFFSET)
52
53#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE - 1)) \
54 & ISP_PAGE_MASK)
55
56#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
57 ((l1_idx) << ISP_L1PT_OFFSET) | \
58 ((l2_idx) << ISP_L2PT_OFFSET) | \
59 (offset)\
60} while (0)
61
62#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
63#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
64 >> ISP_PAGE_OFFSET)
65#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
66
67struct isp_mmu;
68
69struct isp_mmu_client {
70
71
72
73
74
75
76
77
78
79 char *name;
80 unsigned int pte_valid_mask;
81 unsigned int null_pte;
82
83
84
85
86
87
88 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
89
90
91
92
93
94
95
96
97
98
99
100 void (*tlb_flush_range)(struct isp_mmu *mmu,
101 unsigned int addr, unsigned int size);
102 void (*tlb_flush_all)(struct isp_mmu *mmu);
103 unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
104 phys_addr_t phys);
105 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
106 unsigned int pte);
107
108};
109
110struct isp_mmu {
111 struct isp_mmu_client *driver;
112 unsigned int l1_pte;
113 int l2_pgt_refcount[ISP_L1PT_PTES];
114 phys_addr_t base_address;
115
116 struct mutex pt_mutex;
117};
118
119
120#define ISP_PTE_VALID_MASK(mmu) \
121 ((mmu)->driver->pte_valid_mask)
122
123#define ISP_PTE_VALID(mmu, pte) \
124 ((pte) & ISP_PTE_VALID_MASK(mmu))
125
126#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
127#define PAGE_VALID(page) ((page) != NULL_PAGE)
128
129
130
131
132int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
133
134
135
136void isp_mmu_exit(struct isp_mmu *mmu);
137
138
139
140
141
142
143
144
145
146
147
148int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
149 phys_addr_t phys, unsigned int pgnr);
150
151void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
152 unsigned int pgnr);
153
154static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
155{
156 if (mmu->driver && mmu->driver->tlb_flush_all)
157 mmu->driver->tlb_flush_all(mmu);
158}
159
160#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
161
162static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
163 unsigned int start, unsigned int size)
164{
165 if (mmu->driver && mmu->driver->tlb_flush_range)
166 mmu->driver->tlb_flush_range(mmu, start, size);
167}
168
169#endif
170