1
2
3
4
5
6
7
8#include <linux/dma-buf.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/uaccess.h>
12#include <uapi/linux/xlnx-ai-engine.h>
13
14#include "ai-engine-internal.h"
15
16#define aie_cal_reg_goffset(adev, loc, regoff) ({ \
17 struct aie_device *_adev = (adev); \
18 struct aie_location *_loc = &(loc); \
19 (_loc->col << _adev->col_shift) + \
20 (_loc->row << _adev->row_shift) + (regoff); \
21 })
22
23#define aie_cal_reg_pa(adev, loc, regoff) ({ \
24 struct aie_device *__adev = (adev); \
25 __adev->res->start + aie_cal_reg_goffset(__adev, loc, regoff); \
26 })
27
28static struct sg_table *
29aie_mem_map_dma_buf(struct dma_buf_attachment *attachment,
30 enum dma_data_direction direction)
31{
32
33
34
35
36
37 (void)attachment;
38 (void)direction;
39 dev_warn(attachment->dev,
40 "AI engine memory map dma buf is not implemented.\n");
41 return NULL;
42}
43
44static void aie_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
45 struct sg_table *table,
46 enum dma_data_direction direction)
47{
48
49
50
51
52
53 (void)attachment;
54 (void)table;
55 (void)direction;
56 dev_warn(attachment->dev,
57 "AI engine memory unmap dma buf is not implemented.\n");
58}
59
60static int aie_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
61{
62 struct aie_part_mem *pmem = dmabuf->priv;
63 struct aie_mem *mem = &pmem->mem;
64 struct aie_partition *apart = pmem->apart;
65 struct aie_location loc;
66 unsigned long addr = vma->vm_start;
67 unsigned long offset = vma->vm_pgoff * PAGE_SIZE, moffset = 0;
68 unsigned long remainder = vma->vm_end - addr;
69 size_t msize = mem->size;
70
71 if (remainder + offset > pmem->size)
72 return -EINVAL;
73
74 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
75 for (loc.col = mem->range.start.col;
76 loc.col < mem->range.start.col + mem->range.size.col; loc.col++) {
77 for (loc.row = mem->range.start.row;
78 loc.row < mem->range.start.row + mem->range.size.row;
79 loc.row++) {
80 unsigned long toffset, len;
81 phys_addr_t mempa;
82 int ret;
83
84 remainder = vma->vm_end - addr;
85 if (!remainder)
86 return 0;
87
88 if (moffset + msize < offset) {
89 moffset += msize;
90 continue;
91 }
92
93
94
95
96
97
98 toffset = offset - moffset;
99 len = msize - toffset;
100 if (len > remainder)
101 len = remainder;
102 mempa = aie_cal_reg_pa(apart->adev, loc,
103 toffset + mem->offset);
104
105 ret = remap_pfn_range(vma, addr, mempa >> PAGE_SHIFT,
106 len, vma->vm_page_prot);
107 if (ret) {
108 dev_err(&apart->dev,
109 "failed to mmap (%u,%u)memory, remap failed, 0x%pa, 0x%lx.\n",
110 loc.col, loc.row, &mempa, len);
111 return ret;
112 }
113 addr += len;
114 offset += len;
115 moffset += msize;
116 }
117 }
118 return 0;
119}
120
121static void aie_mem_dmabuf_release(struct dma_buf *dmabuf)
122{
123 struct aie_part_mem *pmem = dmabuf->priv;
124
125 pmem->dbuf = NULL;
126}
127
128static const struct dma_buf_ops aie_mem_dma_buf_ops = {
129 .map_dma_buf = aie_mem_map_dma_buf,
130 .unmap_dma_buf = aie_mem_unmap_dma_buf,
131 .mmap = aie_mem_mmap,
132 .release = aie_mem_dmabuf_release,
133};
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static int aie_mem_create_dmabuf(struct aie_partition *apart,
149 struct aie_part_mem *pmem,
150 struct aie_mem *mem)
151{
152 struct dma_buf *dmabuf;
153 int ret;
154
155 if (!PAGE_ALIGNED(pmem->mem.size)) {
156 dev_warn(&apart->dev,
157 "no dmabuf for mem(0x%zx, 0x%zx), not aligned with page size.\n",
158 pmem->mem.offset, pmem->mem.size);
159 return -EINVAL;
160 }
161
162 dmabuf = pmem->dbuf;
163 if (!dmabuf) {
164 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
165
166 exp_info.ops = &aie_mem_dma_buf_ops;
167 exp_info.size = pmem->size;
168 exp_info.flags = O_RDWR;
169 exp_info.priv = pmem;
170
171 dmabuf = dma_buf_export(&exp_info);
172 if (IS_ERR(dmabuf))
173 return PTR_ERR(dmabuf);
174
175 pmem->dbuf = dmabuf;
176 }
177
178 ret = dma_buf_fd(dmabuf, O_CLOEXEC);
179 if (ret < 0) {
180 dev_err(&apart->dev,
181 "dmabuf creation failed, failed to get fd.\n");
182 return ret;
183 }
184 memcpy(mem, &pmem->mem, sizeof(*mem));
185 mem->fd = ret;
186
187 return 0;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int aie_mem_get_info(struct aie_partition *apart, unsigned long arg)
211{
212 struct aie_mem_args margs;
213 struct aie_mem *mems;
214 unsigned int num_mems, i;
215 int ret;
216
217 if (copy_from_user(&margs, (void __user *)arg, sizeof(margs)))
218 return -EFAULT;
219
220 num_mems = apart->adev->ops->get_mem_info(&apart->range, NULL);
221 if (num_mems <= 0)
222 return -EINVAL;
223
224 if (!margs.num_mems) {
225 struct aie_mem_args __user *umargs_ptr = (void __user *)arg;
226
227
228 if (copy_to_user((void __user *)&umargs_ptr->num_mems,
229 &num_mems, sizeof(num_mems)))
230 return -EFAULT;
231 return 0;
232 }
233
234 if (num_mems != margs.num_mems) {
235 dev_err(&apart->dev,
236 "failed to get mem info, invalid num of mems %d,%d.\n",
237 num_mems, margs.num_mems);
238 return -EINVAL;
239 }
240 if (!margs.mems) {
241 dev_err(&apart->dev,
242 "failed to get mem info, mems pointer is NULL.\n");
243 return -EINVAL;
244 }
245
246 mems = kcalloc(num_mems, sizeof(*mems), GFP_KERNEL);
247 if (!mems)
248 return -ENOMEM;
249
250
251
252
253
254 for (i = 0; i < num_mems; i++) {
255 ret = aie_mem_create_dmabuf(apart, &apart->pmems[i], &mems[i]);
256 if (ret)
257 break;
258 }
259 if (!ret) {
260 if (copy_to_user((void __user *)margs.mems, mems,
261 num_mems * sizeof(mems[0])))
262 ret = -EFAULT;
263 }
264
265 if (ret) {
266 for (i = 0; i < num_mems; i++) {
267 if (mems[i].fd)
268 put_unused_fd(mems[i].fd);
269 }
270 }
271
272 kfree(mems);
273 return ret;
274}
275
276
277
278
279
280
281
282
283
284bool aie_part_has_mem_mmapped(struct aie_partition *apart)
285{
286 unsigned int num_mems, i;
287
288 num_mems = apart->adev->ops->get_mem_info(&apart->range, NULL);
289 if (!num_mems)
290 return false;
291
292 for (i = 0; i < num_mems; i++) {
293 if (apart->pmems[i].dbuf)
294 return true;
295 }
296 return false;
297}
298