1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef __HMM_BO_H__
22#define __HMM_BO_H__
23
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/spinlock.h>
28#include <linux/mutex.h>
29#include "mmu/isp_mmu.h"
30#include "hmm/hmm_common.h"
31#include "ia_css_types.h"
32
33#define check_bodev_null_return(bdev, exp) \
34 check_null_return(bdev, exp, \
35 "NULL hmm_bo_device.\n")
36
37#define check_bodev_null_return_void(bdev) \
38 check_null_return_void(bdev, \
39 "NULL hmm_bo_device.\n")
40
41#define check_bo_status_yes_goto(bo, _status, label) \
42 var_not_equal_goto((bo->status & (_status)), (_status), \
43 label, \
44 "HMM buffer status not contain %s.\n", \
45 #_status)
46
47#define check_bo_status_no_goto(bo, _status, label) \
48 var_equal_goto((bo->status & (_status)), (_status), \
49 label, \
50 "HMM buffer status contains %s.\n", \
51 #_status)
52
53#define rbtree_node_to_hmm_bo(root_node) \
54 container_of((root_node), struct hmm_buffer_object, node)
55
56#define list_to_hmm_bo(list_ptr) \
57 list_entry((list_ptr), struct hmm_buffer_object, list)
58
59#define kref_to_hmm_bo(kref_ptr) \
60 list_entry((kref_ptr), struct hmm_buffer_object, kref)
61
62#define check_bo_null_return(bo, exp) \
63 check_null_return(bo, exp, "NULL hmm buffer object.\n")
64
65#define check_bo_null_return_void(bo) \
66 check_null_return_void(bo, "NULL hmm buffer object.\n")
67
68#define HMM_MAX_ORDER 3
69#define HMM_MIN_ORDER 0
70
71#define ISP_VM_START 0x0
72#define ISP_VM_SIZE (0x7FFFFFFF)
73#define ISP_PTR_NULL NULL
74
75#define HMM_BO_DEVICE_INITED 0x1
76
77enum hmm_bo_type {
78 HMM_BO_PRIVATE,
79 HMM_BO_SHARE,
80 HMM_BO_USER,
81 HMM_BO_LAST,
82};
83
84enum hmm_page_type {
85 HMM_PAGE_TYPE_RESERVED,
86 HMM_PAGE_TYPE_DYNAMIC,
87 HMM_PAGE_TYPE_GENERAL,
88};
89
90#define HMM_BO_MASK 0x1
91#define HMM_BO_FREE 0x0
92#define HMM_BO_ALLOCED 0x1
93#define HMM_BO_PAGE_ALLOCED 0x2
94#define HMM_BO_BINDED 0x4
95#define HMM_BO_MMAPED 0x8
96#define HMM_BO_VMAPED 0x10
97#define HMM_BO_VMAPED_CACHED 0x20
98#define HMM_BO_ACTIVE 0x1000
99#define HMM_BO_MEM_TYPE_USER 0x1
100#define HMM_BO_MEM_TYPE_PFN 0x2
101
102struct hmm_bo_device {
103 struct isp_mmu mmu;
104
105
106 unsigned int start;
107 unsigned int pgnr;
108 unsigned int size;
109
110
111 spinlock_t list_lock;
112 int flag;
113
114
115 struct list_head entire_bo_list;
116
117 struct rb_root allocated_rbtree;
118
119 struct rb_root free_rbtree;
120 struct mutex rbtree_mutex;
121 struct kmem_cache *bo_cache;
122};
123
124struct hmm_page_object {
125 struct page *page;
126 enum hmm_page_type type;
127};
128
129struct hmm_buffer_object {
130 struct hmm_bo_device *bdev;
131 struct list_head list;
132 struct kref kref;
133
134 struct page **pages;
135
136
137 struct mutex mutex;
138 enum hmm_bo_type type;
139 struct hmm_page_object *page_obj;
140 int from_highmem;
141 int mmap_count;
142 int status;
143 int mem_type;
144 void *vmap_addr;
145
146 struct rb_node node;
147 unsigned int start;
148 unsigned int end;
149 unsigned int pgnr;
150
151
152
153
154
155
156
157
158 struct hmm_buffer_object *prev;
159 struct hmm_buffer_object *next;
160};
161
162struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
163 unsigned int pgnr);
164
165void hmm_bo_release(struct hmm_buffer_object *bo);
166
167int hmm_bo_device_init(struct hmm_bo_device *bdev,
168 struct isp_mmu_client *mmu_driver,
169 unsigned int vaddr_start, unsigned int size);
170
171
172
173
174void hmm_bo_device_exit(struct hmm_bo_device *bdev);
175
176
177
178
179int hmm_bo_device_inited(struct hmm_bo_device *bdev);
180
181
182
183
184void hmm_bo_ref(struct hmm_buffer_object *bo);
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219void hmm_bo_unref(struct hmm_buffer_object *bo);
220
221
222
223
224
225
226
227
228int hmm_bo_allocated(struct hmm_buffer_object *bo);
229
230
231
232
233
234
235
236int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
237 enum hmm_bo_type type, int from_highmem,
238 const void __user *userptr, bool cached);
239void hmm_bo_free_pages(struct hmm_buffer_object *bo);
240int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
241
242
243
244
245int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
246 struct hmm_page_object **page_obj, int *pgnr);
247
248
249
250
251int hmm_bo_bind(struct hmm_buffer_object *bo);
252void hmm_bo_unbind(struct hmm_buffer_object *bo);
253int hmm_bo_binded(struct hmm_buffer_object *bo);
254
255
256
257
258
259void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
260
261
262
263
264
265void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
266
267
268
269
270void hmm_bo_vunmap(struct hmm_buffer_object *bo);
271
272
273
274
275
276
277
278
279
280int hmm_bo_mmap(struct vm_area_struct *vma,
281 struct hmm_buffer_object *bo);
282
283extern struct hmm_pool dynamic_pool;
284extern struct hmm_pool reserved_pool;
285
286
287
288
289
290struct hmm_buffer_object *hmm_bo_device_search_start(
291 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
292
293
294
295
296
297
298
299struct hmm_buffer_object *hmm_bo_device_search_in_range(
300 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
301
302
303
304
305
306struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
307 struct hmm_bo_device *bdev, const void *vaddr);
308
309#endif
310