1
2
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/errno.h>
7#include <linux/interrupt.h>
8#include <linux/pagemap.h>
9#include <linux/dma-mapping.h>
10#include <linux/io.h>
11#include <linux/platform_device.h>
12#include <linux/uaccess.h>
13#include <linux/mm.h>
14#include <linux/of.h>
15#include <linux/slab.h>
16#include <soc/bcm2835/raspberrypi-firmware.h>
17
18#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
19
20#include "vchiq_arm.h"
21#include "vchiq_connected.h"
22#include "vchiq_pagelist.h"
23
24#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
25
26#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
27#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
28
29#define BELL0 0x00
30#define BELL2 0x08
31
32struct vchiq_2835_state {
33 int inited;
34 struct vchiq_arm_state arm_state;
35};
36
37struct vchiq_pagelist_info {
38 struct pagelist *pagelist;
39 size_t pagelist_buffer_size;
40 dma_addr_t dma_addr;
41 enum dma_data_direction dma_dir;
42 unsigned int num_pages;
43 unsigned int pages_need_release;
44 struct page **pages;
45 struct scatterlist *scatterlist;
46 unsigned int scatterlist_mapped;
47};
48
49static void __iomem *g_regs;
50
51
52
53
54
55
56
57
58
59
60static unsigned int g_cache_line_size = 32;
61static unsigned int g_fragments_size;
62static char *g_fragments_base;
63static char *g_free_fragments;
64static struct semaphore g_free_fragments_sema;
65static struct device *g_dev;
66
67static DEFINE_SEMAPHORE(g_free_fragments_mutex);
68
69static irqreturn_t
70vchiq_doorbell_irq(int irq, void *dev_id);
71
72static struct vchiq_pagelist_info *
73create_pagelist(char __user *buf, size_t count, unsigned short type);
74
75static void
76free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
77 int actual);
78
79int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
80{
81 struct device *dev = &pdev->dev;
82 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
83 struct rpi_firmware *fw = drvdata->fw;
84 struct vchiq_slot_zero *vchiq_slot_zero;
85 void *slot_mem;
86 dma_addr_t slot_phys;
87 u32 channelbase;
88 int slot_mem_size, frag_mem_size;
89 int err, irq, i;
90
91
92
93
94
95 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
96
97 if (err < 0)
98 return err;
99
100 g_cache_line_size = drvdata->cache_line_size;
101 g_fragments_size = 2 * g_cache_line_size;
102
103
104 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
105 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
106
107 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
108 &slot_phys, GFP_KERNEL);
109 if (!slot_mem) {
110 dev_err(dev, "could not allocate DMA memory\n");
111 return -ENOMEM;
112 }
113
114 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
115
116 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
117 if (!vchiq_slot_zero)
118 return -EINVAL;
119
120 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
121 (int)slot_phys + slot_mem_size;
122 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
123 MAX_FRAGMENTS;
124
125 g_fragments_base = (char *)slot_mem + slot_mem_size;
126
127 g_free_fragments = g_fragments_base;
128 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
129 *(char **)&g_fragments_base[i*g_fragments_size] =
130 &g_fragments_base[(i + 1)*g_fragments_size];
131 }
132 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
133 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
134
135 if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
136 return -EINVAL;
137
138 g_regs = devm_platform_ioremap_resource(pdev, 0);
139 if (IS_ERR(g_regs))
140 return PTR_ERR(g_regs);
141
142 irq = platform_get_irq(pdev, 0);
143 if (irq <= 0)
144 return irq;
145
146 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
147 "VCHIQ doorbell", state);
148 if (err) {
149 dev_err(dev, "failed to register irq=%d\n", irq);
150 return err;
151 }
152
153
154 channelbase = slot_phys;
155 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
156 &channelbase, sizeof(channelbase));
157 if (err || channelbase) {
158 dev_err(dev, "failed to set channelbase\n");
159 return err ? : -ENXIO;
160 }
161
162 g_dev = dev;
163 vchiq_log_info(vchiq_arm_log_level,
164 "vchiq_init - done (slots %pK, phys %pad)",
165 vchiq_slot_zero, &slot_phys);
166
167 vchiq_call_connected_callbacks();
168
169 return 0;
170}
171
172enum vchiq_status
173vchiq_platform_init_state(struct vchiq_state *state)
174{
175 enum vchiq_status status = VCHIQ_SUCCESS;
176 struct vchiq_2835_state *platform_state;
177
178 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
179 if (!state->platform_state)
180 return VCHIQ_ERROR;
181
182 platform_state = (struct vchiq_2835_state *)state->platform_state;
183
184 platform_state->inited = 1;
185 status = vchiq_arm_init_state(state, &platform_state->arm_state);
186
187 if (status != VCHIQ_SUCCESS)
188 platform_state->inited = 0;
189
190 return status;
191}
192
193struct vchiq_arm_state*
194vchiq_platform_get_arm_state(struct vchiq_state *state)
195{
196 struct vchiq_2835_state *platform_state;
197
198 platform_state = (struct vchiq_2835_state *)state->platform_state;
199
200 WARN_ON_ONCE(!platform_state->inited);
201
202 return &platform_state->arm_state;
203}
204
205void
206remote_event_signal(struct remote_event *event)
207{
208 wmb();
209
210 event->fired = 1;
211
212 dsb(sy);
213
214 if (event->armed)
215 writel(0, g_regs + BELL2);
216}
217
218enum vchiq_status
219vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
220 int dir)
221{
222 struct vchiq_pagelist_info *pagelistinfo;
223
224 pagelistinfo = create_pagelist((char __user *)offset, size,
225 (dir == VCHIQ_BULK_RECEIVE)
226 ? PAGELIST_READ
227 : PAGELIST_WRITE);
228
229 if (!pagelistinfo)
230 return VCHIQ_ERROR;
231
232 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
233
234
235
236
237
238 bulk->remote_data = pagelistinfo;
239
240 return VCHIQ_SUCCESS;
241}
242
243void
244vchiq_complete_bulk(struct vchiq_bulk *bulk)
245{
246 if (bulk && bulk->remote_data && bulk->actual)
247 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
248 bulk->actual);
249}
250
251int vchiq_dump_platform_state(void *dump_context)
252{
253 char buf[80];
254 int len;
255
256 len = snprintf(buf, sizeof(buf),
257 " Platform: 2835 (VC master)");
258 return vchiq_dump(dump_context, buf, len + 1);
259}
260
261
262
263
264
265static irqreturn_t
266vchiq_doorbell_irq(int irq, void *dev_id)
267{
268 struct vchiq_state *state = dev_id;
269 irqreturn_t ret = IRQ_NONE;
270 unsigned int status;
271
272
273 status = readl(g_regs + BELL0);
274
275 if (status & 0x4) {
276 remote_event_pollall(state);
277 ret = IRQ_HANDLED;
278 }
279
280 return ret;
281}
282
283static void
284cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
285{
286 if (pagelistinfo->scatterlist_mapped) {
287 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
288 pagelistinfo->num_pages, pagelistinfo->dma_dir);
289 }
290
291 if (pagelistinfo->pages_need_release)
292 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
293
294 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
295 pagelistinfo->pagelist, pagelistinfo->dma_addr);
296}
297
298
299
300
301
302
303
304
305
306static struct vchiq_pagelist_info *
307create_pagelist(char __user *buf, size_t count, unsigned short type)
308{
309 struct pagelist *pagelist;
310 struct vchiq_pagelist_info *pagelistinfo;
311 struct page **pages;
312 u32 *addrs;
313 unsigned int num_pages, offset, i, k;
314 int actual_pages;
315 size_t pagelist_size;
316 struct scatterlist *scatterlist, *sg;
317 int dma_buffers;
318 dma_addr_t dma_addr;
319
320 if (count >= INT_MAX - PAGE_SIZE)
321 return NULL;
322
323 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
324 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
325
326 if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
327 sizeof(struct vchiq_pagelist_info)) /
328 (sizeof(u32) + sizeof(pages[0]) +
329 sizeof(struct scatterlist)))
330 return NULL;
331
332 pagelist_size = sizeof(struct pagelist) +
333 (num_pages * sizeof(u32)) +
334 (num_pages * sizeof(pages[0]) +
335 (num_pages * sizeof(struct scatterlist))) +
336 sizeof(struct vchiq_pagelist_info);
337
338
339
340
341 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
342 GFP_KERNEL);
343
344 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
345
346 if (!pagelist)
347 return NULL;
348
349 addrs = pagelist->addrs;
350 pages = (struct page **)(addrs + num_pages);
351 scatterlist = (struct scatterlist *)(pages + num_pages);
352 pagelistinfo = (struct vchiq_pagelist_info *)
353 (scatterlist + num_pages);
354
355 pagelist->length = count;
356 pagelist->type = type;
357 pagelist->offset = offset;
358
359
360 pagelistinfo->pagelist = pagelist;
361 pagelistinfo->pagelist_buffer_size = pagelist_size;
362 pagelistinfo->dma_addr = dma_addr;
363 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
364 DMA_TO_DEVICE : DMA_FROM_DEVICE;
365 pagelistinfo->num_pages = num_pages;
366 pagelistinfo->pages_need_release = 0;
367 pagelistinfo->pages = pages;
368 pagelistinfo->scatterlist = scatterlist;
369 pagelistinfo->scatterlist_mapped = 0;
370
371 if (is_vmalloc_addr((void __force *)buf)) {
372 unsigned long length = count;
373 unsigned int off = offset;
374
375 for (actual_pages = 0; actual_pages < num_pages;
376 actual_pages++) {
377 struct page *pg =
378 vmalloc_to_page((void __force *)(buf +
379 (actual_pages * PAGE_SIZE)));
380 size_t bytes = PAGE_SIZE - off;
381
382 if (!pg) {
383 cleanup_pagelistinfo(pagelistinfo);
384 return NULL;
385 }
386
387 if (bytes > length)
388 bytes = length;
389 pages[actual_pages] = pg;
390 length -= bytes;
391 off = 0;
392 }
393
394 } else {
395 actual_pages = pin_user_pages_fast(
396 (unsigned long)buf & PAGE_MASK,
397 num_pages,
398 type == PAGELIST_READ,
399 pages);
400
401 if (actual_pages != num_pages) {
402 vchiq_log_info(vchiq_arm_log_level,
403 "%s - only %d/%d pages locked",
404 __func__, actual_pages, num_pages);
405
406
407 if (actual_pages > 0)
408 unpin_user_pages(pages, actual_pages);
409 cleanup_pagelistinfo(pagelistinfo);
410 return NULL;
411 }
412
413 pagelistinfo->pages_need_release = 1;
414 }
415
416
417
418
419
420 sg_init_table(scatterlist, num_pages);
421
422 for (i = 0; i < num_pages; i++) {
423 unsigned int len = PAGE_SIZE - offset;
424
425 if (len > count)
426 len = count;
427 sg_set_page(scatterlist + i, pages[i], len, offset);
428 offset = 0;
429 count -= len;
430 }
431
432 dma_buffers = dma_map_sg(g_dev,
433 scatterlist,
434 num_pages,
435 pagelistinfo->dma_dir);
436
437 if (dma_buffers == 0) {
438 cleanup_pagelistinfo(pagelistinfo);
439 return NULL;
440 }
441
442 pagelistinfo->scatterlist_mapped = 1;
443
444
445 k = 0;
446 for_each_sg(scatterlist, sg, dma_buffers, i) {
447 u32 len = sg_dma_len(sg);
448 u32 addr = sg_dma_address(sg);
449
450
451
452
453
454 WARN_ON(len == 0);
455 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
456 WARN_ON(i && (addr & ~PAGE_MASK));
457 if (k > 0 &&
458 ((addrs[k - 1] & PAGE_MASK) +
459 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
460 == (addr & PAGE_MASK))
461 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
462 else
463 addrs[k++] = (addr & PAGE_MASK) |
464 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
465 }
466
467
468 if ((type == PAGELIST_READ) &&
469 ((pagelist->offset & (g_cache_line_size - 1)) ||
470 ((pagelist->offset + pagelist->length) &
471 (g_cache_line_size - 1)))) {
472 char *fragments;
473
474 if (down_interruptible(&g_free_fragments_sema)) {
475 cleanup_pagelistinfo(pagelistinfo);
476 return NULL;
477 }
478
479 WARN_ON(!g_free_fragments);
480
481 down(&g_free_fragments_mutex);
482 fragments = g_free_fragments;
483 WARN_ON(!fragments);
484 g_free_fragments = *(char **) g_free_fragments;
485 up(&g_free_fragments_mutex);
486 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
487 (fragments - g_fragments_base) / g_fragments_size;
488 }
489
490 return pagelistinfo;
491}
492
493static void
494free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
495 int actual)
496{
497 struct pagelist *pagelist = pagelistinfo->pagelist;
498 struct page **pages = pagelistinfo->pages;
499 unsigned int num_pages = pagelistinfo->num_pages;
500
501 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
502 __func__, pagelistinfo->pagelist, actual);
503
504
505
506
507
508 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
509 pagelistinfo->num_pages, pagelistinfo->dma_dir);
510 pagelistinfo->scatterlist_mapped = 0;
511
512
513 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
514 char *fragments = g_fragments_base +
515 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
516 g_fragments_size;
517 int head_bytes, tail_bytes;
518
519 head_bytes = (g_cache_line_size - pagelist->offset) &
520 (g_cache_line_size - 1);
521 tail_bytes = (pagelist->offset + actual) &
522 (g_cache_line_size - 1);
523
524 if ((actual >= 0) && (head_bytes != 0)) {
525 if (head_bytes > actual)
526 head_bytes = actual;
527
528 memcpy((char *)kmap(pages[0]) +
529 pagelist->offset,
530 fragments,
531 head_bytes);
532 kunmap(pages[0]);
533 }
534 if ((actual >= 0) && (head_bytes < actual) &&
535 (tail_bytes != 0)) {
536 memcpy((char *)kmap(pages[num_pages - 1]) +
537 ((pagelist->offset + actual) &
538 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
539 fragments + g_cache_line_size,
540 tail_bytes);
541 kunmap(pages[num_pages - 1]);
542 }
543
544 down(&g_free_fragments_mutex);
545 *(char **)fragments = g_free_fragments;
546 g_free_fragments = fragments;
547 up(&g_free_fragments_mutex);
548 up(&g_free_fragments_sema);
549 }
550
551
552 if (pagelist->type != PAGELIST_WRITE &&
553 pagelistinfo->pages_need_release) {
554 unsigned int i;
555
556 for (i = 0; i < num_pages; i++)
557 set_page_dirty(pages[i]);
558 }
559
560 cleanup_pagelistinfo(pagelistinfo);
561}
562