1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/completion.h>
25#include <linux/vmalloc.h>
26#include <linux/raspberrypi/vchiq.h>
27#include <media/videobuf2-vmalloc.h>
28
29#include "mmal-common.h"
30#include "mmal-vchiq.h"
31#include "mmal-msg.h"
32
33
34
35
36
37#define VCHIQ_MMAL_MAX_COMPONENTS 64
38
39
40
41
42
43#define SYNC_MSG_TIMEOUT 3
44
45
46
47#ifdef DEBUG
48static const char *const msg_type_names[] = {
49 "UNKNOWN",
50 "QUIT",
51 "SERVICE_CLOSED",
52 "GET_VERSION",
53 "COMPONENT_CREATE",
54 "COMPONENT_DESTROY",
55 "COMPONENT_ENABLE",
56 "COMPONENT_DISABLE",
57 "PORT_INFO_GET",
58 "PORT_INFO_SET",
59 "PORT_ACTION",
60 "BUFFER_FROM_HOST",
61 "BUFFER_TO_HOST",
62 "GET_STATS",
63 "PORT_PARAMETER_SET",
64 "PORT_PARAMETER_GET",
65 "EVENT_TO_HOST",
66 "GET_CORE_STATS_FOR_PORT",
67 "OPAQUE_ALLOCATOR",
68 "CONSUME_MEM",
69 "LMK",
70 "OPAQUE_ALLOCATOR_DESC",
71 "DRM_GET_LHS32",
72 "DRM_GET_TIME",
73 "BUFFER_FROM_HOST_ZEROLEN",
74 "PORT_FLUSH",
75 "HOST_LOG",
76};
77#endif
78
79static const char *const port_action_type_names[] = {
80 "UNKNOWN",
81 "ENABLE",
82 "DISABLE",
83 "FLUSH",
84 "CONNECT",
85 "DISCONNECT",
86 "SET_REQUIREMENTS",
87};
88
89#if defined(DEBUG)
90#if defined(FULL_MSG_DUMP)
91#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
92 do { \
93 pr_debug(TITLE" type:%s(%d) length:%d\n", \
94 msg_type_names[(MSG)->h.type], \
95 (MSG)->h.type, (MSG_LEN)); \
96 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
97 16, 4, (MSG), \
98 sizeof(struct mmal_msg_header), 1); \
99 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
100 16, 4, \
101 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
102 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
103 } while (0)
104#else
105#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
106 { \
107 pr_debug(TITLE" type:%s(%d) length:%d\n", \
108 msg_type_names[(MSG)->h.type], \
109 (MSG)->h.type, (MSG_LEN)); \
110 }
111#endif
112#else
113#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
114#endif
115
116struct vchiq_mmal_instance;
117
118
119struct mmal_msg_context {
120 struct vchiq_mmal_instance *instance;
121
122
123
124
125 int handle;
126
127 union {
128 struct {
129
130 struct work_struct work;
131
132 struct work_struct buffer_to_host_work;
133
134 struct vchiq_mmal_instance *instance;
135
136 struct vchiq_mmal_port *port;
137
138 struct mmal_buffer *buffer;
139
140 unsigned long buffer_used;
141
142 u32 mmal_flags;
143
144 s64 pts;
145 s64 dts;
146
147 int status;
148
149 } bulk;
150
151 struct {
152
153 struct vchiq_header *msg_handle;
154
155 struct mmal_msg *msg;
156
157 u32 msg_len;
158
159 struct completion cmplt;
160 } sync;
161 } u;
162
163};
164
165struct vchiq_mmal_instance {
166 unsigned int service_handle;
167
168
169 struct mutex vchiq_mutex;
170
171
172 void *bulk_scratch;
173
174 struct idr context_map;
175
176 struct mutex context_map_lock;
177
178 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
179
180
181 struct workqueue_struct *bulk_wq;
182};
183
184static struct mmal_msg_context *
185get_msg_context(struct vchiq_mmal_instance *instance)
186{
187 struct mmal_msg_context *msg_context;
188 int handle;
189
190
191 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
192
193 if (!msg_context)
194 return ERR_PTR(-ENOMEM);
195
196
197
198
199
200 mutex_lock(&instance->context_map_lock);
201 handle = idr_alloc(&instance->context_map, msg_context,
202 0, 0, GFP_KERNEL);
203 mutex_unlock(&instance->context_map_lock);
204
205 if (handle < 0) {
206 kfree(msg_context);
207 return ERR_PTR(handle);
208 }
209
210 msg_context->instance = instance;
211 msg_context->handle = handle;
212
213 return msg_context;
214}
215
216static struct mmal_msg_context *
217lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
218{
219 return idr_find(&instance->context_map, handle);
220}
221
222static void
223release_msg_context(struct mmal_msg_context *msg_context)
224{
225 struct vchiq_mmal_instance *instance = msg_context->instance;
226
227 mutex_lock(&instance->context_map_lock);
228 idr_remove(&instance->context_map, msg_context->handle);
229 mutex_unlock(&instance->context_map_lock);
230 kfree(msg_context);
231}
232
233
234static void event_to_host_cb(struct vchiq_mmal_instance *instance,
235 struct mmal_msg *msg, u32 msg_len)
236{
237 pr_debug("unhandled event\n");
238 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
239 msg->u.event_to_host.client_component,
240 msg->u.event_to_host.port_type,
241 msg->u.event_to_host.port_num,
242 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
243}
244
245
246
247
248
249
250static void buffer_work_cb(struct work_struct *work)
251{
252 struct mmal_msg_context *msg_context =
253 container_of(work, struct mmal_msg_context, u.bulk.work);
254 struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
255
256 if (!buffer) {
257 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
258 __func__, msg_context);
259 return;
260 }
261
262 buffer->length = msg_context->u.bulk.buffer_used;
263 buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
264 buffer->dts = msg_context->u.bulk.dts;
265 buffer->pts = msg_context->u.bulk.pts;
266
267 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
268
269 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
270 msg_context->u.bulk.port,
271 msg_context->u.bulk.status,
272 msg_context->u.bulk.buffer);
273}
274
275
276
277
278
279
280
281
282static void buffer_to_host_work_cb(struct work_struct *work)
283{
284 struct mmal_msg_context *msg_context =
285 container_of(work, struct mmal_msg_context,
286 u.bulk.buffer_to_host_work);
287 struct vchiq_mmal_instance *instance = msg_context->instance;
288 unsigned long len = msg_context->u.bulk.buffer_used;
289 int ret;
290
291 if (!len)
292
293 len = 8;
294
295 vchiq_use_service(instance->service_handle);
296 ret = vchiq_bulk_receive(instance->service_handle,
297 msg_context->u.bulk.buffer->buffer,
298
299
300
301 (len + 3) & ~3,
302 msg_context,
303 VCHIQ_BULK_MODE_CALLBACK);
304
305 vchiq_release_service(instance->service_handle);
306
307 if (ret != 0)
308 pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
309 __func__, msg_context, ret);
310}
311
312
313static int bulk_receive(struct vchiq_mmal_instance *instance,
314 struct mmal_msg *msg,
315 struct mmal_msg_context *msg_context)
316{
317 unsigned long rd_len;
318
319 rd_len = msg->u.buffer_from_host.buffer_header.length;
320
321 if (!msg_context->u.bulk.buffer) {
322 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
323
324
325
326
327
328
329
330
331
332
333
334 return -EINVAL;
335 }
336
337
338 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
339 rd_len = msg_context->u.bulk.buffer->buffer_size;
340 pr_warn("short read as not enough receive buffer space\n");
341
342
343
344 }
345
346
347 msg_context->u.bulk.buffer_used = rd_len;
348 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
349 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
350
351 queue_work(msg_context->instance->bulk_wq,
352 &msg_context->u.bulk.buffer_to_host_work);
353
354 return 0;
355}
356
357
358static int inline_receive(struct vchiq_mmal_instance *instance,
359 struct mmal_msg *msg,
360 struct mmal_msg_context *msg_context)
361{
362 memcpy(msg_context->u.bulk.buffer->buffer,
363 msg->u.buffer_from_host.short_data,
364 msg->u.buffer_from_host.payload_in_message);
365
366 msg_context->u.bulk.buffer_used =
367 msg->u.buffer_from_host.payload_in_message;
368
369 return 0;
370}
371
372
373static int
374buffer_from_host(struct vchiq_mmal_instance *instance,
375 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
376{
377 struct mmal_msg_context *msg_context;
378 struct mmal_msg m;
379 int ret;
380
381 if (!port->enabled)
382 return -EINVAL;
383
384 pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
385
386
387 if (!buf->msg_context) {
388 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
389 buf);
390 return -EINVAL;
391 }
392 msg_context = buf->msg_context;
393
394
395 msg_context->u.bulk.instance = instance;
396 msg_context->u.bulk.port = port;
397 msg_context->u.bulk.buffer = buf;
398 msg_context->u.bulk.buffer_used = 0;
399
400
401 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
402 INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
403 buffer_to_host_work_cb);
404
405 atomic_inc(&port->buffers_with_vpu);
406
407
408 memset(&m, 0xbc, sizeof(m));
409
410 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
411 m.h.magic = MMAL_MAGIC;
412 m.h.context = msg_context->handle;
413 m.h.status = 0;
414
415
416 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
417 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
418 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
419 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
420
421
422 m.u.buffer_from_host.buffer_header.cmd = 0;
423 m.u.buffer_from_host.buffer_header.data =
424 (u32)(unsigned long)buf->buffer;
425 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
426 m.u.buffer_from_host.buffer_header.length = 0;
427 m.u.buffer_from_host.buffer_header.offset = 0;
428 m.u.buffer_from_host.buffer_header.flags = 0;
429 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
430 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
431
432
433 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
434 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
435
436
437 m.u.buffer_from_host.payload_in_message = 0;
438
439 vchiq_use_service(instance->service_handle);
440
441 ret = vchiq_queue_kernel_message(instance->service_handle, &m,
442 sizeof(struct mmal_msg_header) +
443 sizeof(m.u.buffer_from_host));
444 if (ret)
445 atomic_dec(&port->buffers_with_vpu);
446
447 vchiq_release_service(instance->service_handle);
448
449 return ret;
450}
451
452
453static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
454 struct mmal_msg *msg, u32 msg_len)
455{
456 struct mmal_msg_context *msg_context;
457 u32 handle;
458
459 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
460 __func__, instance, msg, msg_len);
461
462 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
463 handle = msg->u.buffer_from_host.drvbuf.client_context;
464 msg_context = lookup_msg_context(instance, handle);
465
466 if (!msg_context) {
467 pr_err("drvbuf.client_context(%u) is invalid\n",
468 handle);
469 return;
470 }
471 } else {
472 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
473 return;
474 }
475
476 msg_context->u.bulk.mmal_flags =
477 msg->u.buffer_from_host.buffer_header.flags;
478
479 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
480
481 pr_warn("error %d in reply\n", msg->h.status);
482
483 msg_context->u.bulk.status = msg->h.status;
484
485 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
486
487 if (msg->u.buffer_from_host.buffer_header.flags &
488 MMAL_BUFFER_HEADER_FLAG_EOS) {
489 msg_context->u.bulk.status =
490 bulk_receive(instance, msg, msg_context);
491 if (msg_context->u.bulk.status == 0)
492 return;
493
494
495 } else {
496
497 msg_context->u.bulk.status = 0;
498 msg_context->u.bulk.buffer_used = 0;
499 }
500 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
501
502 msg_context->u.bulk.status =
503 bulk_receive(instance, msg, msg_context);
504 if (msg_context->u.bulk.status == 0)
505 return;
506
507
508
509
510 pr_err("error %d on bulk submission\n",
511 msg_context->u.bulk.status);
512
513 } else if (msg->u.buffer_from_host.payload_in_message <=
514 MMAL_VC_SHORT_DATA) {
515
516 msg_context->u.bulk.status = inline_receive(instance, msg,
517 msg_context);
518 } else {
519 pr_err("message with invalid short payload\n");
520
521
522 msg_context->u.bulk.status = -EINVAL;
523 msg_context->u.bulk.buffer_used =
524 msg->u.buffer_from_host.payload_in_message;
525 }
526
527
528 schedule_work(&msg_context->u.bulk.work);
529}
530
531static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
532 struct mmal_msg_context *msg_context)
533{
534 msg_context->u.bulk.status = 0;
535
536
537 schedule_work(&msg_context->u.bulk.work);
538}
539
540static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
541 struct mmal_msg_context *msg_context)
542{
543 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
544
545 msg_context->u.bulk.status = -EINTR;
546
547 schedule_work(&msg_context->u.bulk.work);
548}
549
550
551static enum vchiq_status service_callback(enum vchiq_reason reason,
552 struct vchiq_header *header,
553 unsigned int handle, void *bulk_ctx)
554{
555 struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle);
556 u32 msg_len;
557 struct mmal_msg *msg;
558 struct mmal_msg_context *msg_context;
559
560 if (!instance) {
561 pr_err("Message callback passed NULL instance\n");
562 return VCHIQ_SUCCESS;
563 }
564
565 switch (reason) {
566 case VCHIQ_MESSAGE_AVAILABLE:
567 msg = (void *)header->data;
568 msg_len = header->size;
569
570 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
571
572
573 switch (msg->h.type) {
574 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
575 vchiq_release_message(handle, header);
576 break;
577
578 case MMAL_MSG_TYPE_EVENT_TO_HOST:
579 event_to_host_cb(instance, msg, msg_len);
580 vchiq_release_message(handle, header);
581
582 break;
583
584 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
585 buffer_to_host_cb(instance, msg, msg_len);
586 vchiq_release_message(handle, header);
587 break;
588
589 default:
590
591 if (!msg->h.context) {
592 pr_err("received message context was null!\n");
593 vchiq_release_message(handle, header);
594 break;
595 }
596
597 msg_context = lookup_msg_context(instance,
598 msg->h.context);
599 if (!msg_context) {
600 pr_err("received invalid message context %u!\n",
601 msg->h.context);
602 vchiq_release_message(handle, header);
603 break;
604 }
605
606
607 msg_context->u.sync.msg_handle = header;
608 msg_context->u.sync.msg = msg;
609 msg_context->u.sync.msg_len = msg_len;
610
611
612
613
614
615
616
617
618
619
620
621 complete(&msg_context->u.sync.cmplt);
622 break;
623 }
624
625 break;
626
627 case VCHIQ_BULK_RECEIVE_DONE:
628 bulk_receive_cb(instance, bulk_ctx);
629 break;
630
631 case VCHIQ_BULK_RECEIVE_ABORTED:
632 bulk_abort_cb(instance, bulk_ctx);
633 break;
634
635 case VCHIQ_SERVICE_CLOSED:
636
637
638
639 break;
640
641 default:
642 pr_err("Received unhandled message reason %d\n", reason);
643 break;
644 }
645
646 return VCHIQ_SUCCESS;
647}
648
649static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
650 struct mmal_msg *msg,
651 unsigned int payload_len,
652 struct mmal_msg **msg_out,
653 struct vchiq_header **msg_handle)
654{
655 struct mmal_msg_context *msg_context;
656 int ret;
657 unsigned long timeout;
658
659
660 if (payload_len >
661 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
662 pr_err("payload length %d exceeds max:%d\n", payload_len,
663 (int)(MMAL_MSG_MAX_SIZE -
664 sizeof(struct mmal_msg_header)));
665 return -EINVAL;
666 }
667
668 msg_context = get_msg_context(instance);
669 if (IS_ERR(msg_context))
670 return PTR_ERR(msg_context);
671
672 init_completion(&msg_context->u.sync.cmplt);
673
674 msg->h.magic = MMAL_MAGIC;
675 msg->h.context = msg_context->handle;
676 msg->h.status = 0;
677
678 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
679 ">>> sync message");
680
681 vchiq_use_service(instance->service_handle);
682
683 ret = vchiq_queue_kernel_message(instance->service_handle, msg,
684 sizeof(struct mmal_msg_header) +
685 payload_len);
686
687 vchiq_release_service(instance->service_handle);
688
689 if (ret) {
690 pr_err("error %d queuing message\n", ret);
691 release_msg_context(msg_context);
692 return ret;
693 }
694
695 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
696 SYNC_MSG_TIMEOUT * HZ);
697 if (timeout == 0) {
698 pr_err("timed out waiting for sync completion\n");
699 ret = -ETIME;
700
701 release_msg_context(msg_context);
702 return ret;
703 }
704
705 *msg_out = msg_context->u.sync.msg;
706 *msg_handle = msg_context->u.sync.msg_handle;
707 release_msg_context(msg_context);
708
709 return 0;
710}
711
712static void dump_port_info(struct vchiq_mmal_port *port)
713{
714 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
715
716 pr_debug("buffer minimum num:%d size:%d align:%d\n",
717 port->minimum_buffer.num,
718 port->minimum_buffer.size, port->minimum_buffer.alignment);
719
720 pr_debug("buffer recommended num:%d size:%d align:%d\n",
721 port->recommended_buffer.num,
722 port->recommended_buffer.size,
723 port->recommended_buffer.alignment);
724
725 pr_debug("buffer current values num:%d size:%d align:%d\n",
726 port->current_buffer.num,
727 port->current_buffer.size, port->current_buffer.alignment);
728
729 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
730 port->format.type,
731 port->format.encoding, port->format.encoding_variant);
732
733 pr_debug(" bitrate:%d flags:0x%x\n",
734 port->format.bitrate, port->format.flags);
735
736 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
737 pr_debug
738 ("es video format: width:%d height:%d colourspace:0x%x\n",
739 port->es.video.width, port->es.video.height,
740 port->es.video.color_space);
741
742 pr_debug(" : crop xywh %d,%d,%d,%d\n",
743 port->es.video.crop.x,
744 port->es.video.crop.y,
745 port->es.video.crop.width, port->es.video.crop.height);
746 pr_debug(" : framerate %d/%d aspect %d/%d\n",
747 port->es.video.frame_rate.num,
748 port->es.video.frame_rate.den,
749 port->es.video.par.num, port->es.video.par.den);
750 }
751}
752
753static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
754{
755
756 p->type = port->type;
757 p->index = port->index;
758 p->index_all = 0;
759 p->is_enabled = port->enabled;
760 p->buffer_num_min = port->minimum_buffer.num;
761 p->buffer_size_min = port->minimum_buffer.size;
762 p->buffer_alignment_min = port->minimum_buffer.alignment;
763 p->buffer_num_recommended = port->recommended_buffer.num;
764 p->buffer_size_recommended = port->recommended_buffer.size;
765
766
767 p->buffer_num = port->current_buffer.num;
768 p->buffer_size = port->current_buffer.size;
769 p->userdata = (u32)(unsigned long)port;
770}
771
772static int port_info_set(struct vchiq_mmal_instance *instance,
773 struct vchiq_mmal_port *port)
774{
775 int ret;
776 struct mmal_msg m;
777 struct mmal_msg *rmsg;
778 struct vchiq_header *rmsg_handle;
779
780 pr_debug("setting port info port %p\n", port);
781 if (!port)
782 return -1;
783 dump_port_info(port);
784
785 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
786
787 m.u.port_info_set.component_handle = port->component->handle;
788 m.u.port_info_set.port_type = port->type;
789 m.u.port_info_set.port_index = port->index;
790
791 port_to_mmal_msg(port, &m.u.port_info_set.port);
792
793
794 m.u.port_info_set.format.type = port->format.type;
795 m.u.port_info_set.format.encoding = port->format.encoding;
796 m.u.port_info_set.format.encoding_variant =
797 port->format.encoding_variant;
798 m.u.port_info_set.format.bitrate = port->format.bitrate;
799 m.u.port_info_set.format.flags = port->format.flags;
800
801 memcpy(&m.u.port_info_set.es, &port->es,
802 sizeof(union mmal_es_specific_format));
803
804 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
805 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
806 port->format.extradata_size);
807
808 ret = send_synchronous_mmal_msg(instance, &m,
809 sizeof(m.u.port_info_set),
810 &rmsg, &rmsg_handle);
811 if (ret)
812 return ret;
813
814 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
815
816 ret = -EINVAL;
817 goto release_msg;
818 }
819
820
821 ret = -rmsg->u.port_info_get_reply.status;
822
823 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
824 port->component->handle, port->handle);
825
826release_msg:
827 vchiq_release_message(instance->service_handle, rmsg_handle);
828
829 return ret;
830}
831
832
833static int port_info_get(struct vchiq_mmal_instance *instance,
834 struct vchiq_mmal_port *port)
835{
836 int ret;
837 struct mmal_msg m;
838 struct mmal_msg *rmsg;
839 struct vchiq_header *rmsg_handle;
840
841
842 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
843 m.u.port_info_get.component_handle = port->component->handle;
844 m.u.port_info_get.port_type = port->type;
845 m.u.port_info_get.index = port->index;
846
847 ret = send_synchronous_mmal_msg(instance, &m,
848 sizeof(m.u.port_info_get),
849 &rmsg, &rmsg_handle);
850 if (ret)
851 return ret;
852
853 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
854
855 ret = -EINVAL;
856 goto release_msg;
857 }
858
859
860 ret = -rmsg->u.port_info_get_reply.status;
861 if (ret != MMAL_MSG_STATUS_SUCCESS)
862 goto release_msg;
863
864 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
865 port->enabled = 0;
866 else
867 port->enabled = 1;
868
869
870 port->handle = rmsg->u.port_info_get_reply.port_handle;
871
872
873
874
875 port->type = rmsg->u.port_info_get_reply.port_type;
876 port->index = rmsg->u.port_info_get_reply.port_index;
877
878 port->minimum_buffer.num =
879 rmsg->u.port_info_get_reply.port.buffer_num_min;
880 port->minimum_buffer.size =
881 rmsg->u.port_info_get_reply.port.buffer_size_min;
882 port->minimum_buffer.alignment =
883 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
884
885 port->recommended_buffer.alignment =
886 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
887 port->recommended_buffer.num =
888 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
889
890 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
891 port->current_buffer.size =
892 rmsg->u.port_info_get_reply.port.buffer_size;
893
894
895 port->format.type = rmsg->u.port_info_get_reply.format.type;
896 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
897 port->format.encoding_variant =
898 rmsg->u.port_info_get_reply.format.encoding_variant;
899 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
900 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
901
902
903 memcpy(&port->es,
904 &rmsg->u.port_info_get_reply.es,
905 sizeof(union mmal_es_specific_format));
906 port->format.es = &port->es;
907
908 port->format.extradata_size =
909 rmsg->u.port_info_get_reply.format.extradata_size;
910 memcpy(port->format.extradata,
911 rmsg->u.port_info_get_reply.extradata,
912 port->format.extradata_size);
913
914 pr_debug("received port info\n");
915 dump_port_info(port);
916
917release_msg:
918
919 pr_debug("%s:result:%d component:0x%x port:%d\n",
920 __func__, ret, port->component->handle, port->handle);
921
922 vchiq_release_message(instance->service_handle, rmsg_handle);
923
924 return ret;
925}
926
927
928static int create_component(struct vchiq_mmal_instance *instance,
929 struct vchiq_mmal_component *component,
930 const char *name)
931{
932 int ret;
933 struct mmal_msg m;
934 struct mmal_msg *rmsg;
935 struct vchiq_header *rmsg_handle;
936
937
938 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
939 m.u.component_create.client_component = component->client_component;
940 strncpy(m.u.component_create.name, name,
941 sizeof(m.u.component_create.name));
942
943 ret = send_synchronous_mmal_msg(instance, &m,
944 sizeof(m.u.component_create),
945 &rmsg, &rmsg_handle);
946 if (ret)
947 return ret;
948
949 if (rmsg->h.type != m.h.type) {
950
951 ret = -EINVAL;
952 goto release_msg;
953 }
954
955 ret = -rmsg->u.component_create_reply.status;
956 if (ret != MMAL_MSG_STATUS_SUCCESS)
957 goto release_msg;
958
959
960 component->handle = rmsg->u.component_create_reply.component_handle;
961 component->inputs = rmsg->u.component_create_reply.input_num;
962 component->outputs = rmsg->u.component_create_reply.output_num;
963 component->clocks = rmsg->u.component_create_reply.clock_num;
964
965 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
966 component->handle,
967 component->inputs, component->outputs, component->clocks);
968
969release_msg:
970 vchiq_release_message(instance->service_handle, rmsg_handle);
971
972 return ret;
973}
974
975
976static int destroy_component(struct vchiq_mmal_instance *instance,
977 struct vchiq_mmal_component *component)
978{
979 int ret;
980 struct mmal_msg m;
981 struct mmal_msg *rmsg;
982 struct vchiq_header *rmsg_handle;
983
984 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
985 m.u.component_destroy.component_handle = component->handle;
986
987 ret = send_synchronous_mmal_msg(instance, &m,
988 sizeof(m.u.component_destroy),
989 &rmsg, &rmsg_handle);
990 if (ret)
991 return ret;
992
993 if (rmsg->h.type != m.h.type) {
994
995 ret = -EINVAL;
996 goto release_msg;
997 }
998
999 ret = -rmsg->u.component_destroy_reply.status;
1000
1001release_msg:
1002
1003 vchiq_release_message(instance->service_handle, rmsg_handle);
1004
1005 return ret;
1006}
1007
1008
1009static int enable_component(struct vchiq_mmal_instance *instance,
1010 struct vchiq_mmal_component *component)
1011{
1012 int ret;
1013 struct mmal_msg m;
1014 struct mmal_msg *rmsg;
1015 struct vchiq_header *rmsg_handle;
1016
1017 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1018 m.u.component_enable.component_handle = component->handle;
1019
1020 ret = send_synchronous_mmal_msg(instance, &m,
1021 sizeof(m.u.component_enable),
1022 &rmsg, &rmsg_handle);
1023 if (ret)
1024 return ret;
1025
1026 if (rmsg->h.type != m.h.type) {
1027
1028 ret = -EINVAL;
1029 goto release_msg;
1030 }
1031
1032 ret = -rmsg->u.component_enable_reply.status;
1033
1034release_msg:
1035 vchiq_release_message(instance->service_handle, rmsg_handle);
1036
1037 return ret;
1038}
1039
1040
1041static int disable_component(struct vchiq_mmal_instance *instance,
1042 struct vchiq_mmal_component *component)
1043{
1044 int ret;
1045 struct mmal_msg m;
1046 struct mmal_msg *rmsg;
1047 struct vchiq_header *rmsg_handle;
1048
1049 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1050 m.u.component_disable.component_handle = component->handle;
1051
1052 ret = send_synchronous_mmal_msg(instance, &m,
1053 sizeof(m.u.component_disable),
1054 &rmsg, &rmsg_handle);
1055 if (ret)
1056 return ret;
1057
1058 if (rmsg->h.type != m.h.type) {
1059
1060 ret = -EINVAL;
1061 goto release_msg;
1062 }
1063
1064 ret = -rmsg->u.component_disable_reply.status;
1065
1066release_msg:
1067
1068 vchiq_release_message(instance->service_handle, rmsg_handle);
1069
1070 return ret;
1071}
1072
1073
1074static int get_version(struct vchiq_mmal_instance *instance,
1075 u32 *major_out, u32 *minor_out)
1076{
1077 int ret;
1078 struct mmal_msg m;
1079 struct mmal_msg *rmsg;
1080 struct vchiq_header *rmsg_handle;
1081
1082 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1083
1084 ret = send_synchronous_mmal_msg(instance, &m,
1085 sizeof(m.u.version),
1086 &rmsg, &rmsg_handle);
1087 if (ret)
1088 return ret;
1089
1090 if (rmsg->h.type != m.h.type) {
1091
1092 ret = -EINVAL;
1093 goto release_msg;
1094 }
1095
1096 *major_out = rmsg->u.version.major;
1097 *minor_out = rmsg->u.version.minor;
1098
1099release_msg:
1100 vchiq_release_message(instance->service_handle, rmsg_handle);
1101
1102 return ret;
1103}
1104
1105
1106static int port_action_port(struct vchiq_mmal_instance *instance,
1107 struct vchiq_mmal_port *port,
1108 enum mmal_msg_port_action_type action_type)
1109{
1110 int ret;
1111 struct mmal_msg m;
1112 struct mmal_msg *rmsg;
1113 struct vchiq_header *rmsg_handle;
1114
1115 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1116 m.u.port_action_port.component_handle = port->component->handle;
1117 m.u.port_action_port.port_handle = port->handle;
1118 m.u.port_action_port.action = action_type;
1119
1120 port_to_mmal_msg(port, &m.u.port_action_port.port);
1121
1122 ret = send_synchronous_mmal_msg(instance, &m,
1123 sizeof(m.u.port_action_port),
1124 &rmsg, &rmsg_handle);
1125 if (ret)
1126 return ret;
1127
1128 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1129
1130 ret = -EINVAL;
1131 goto release_msg;
1132 }
1133
1134 ret = -rmsg->u.port_action_reply.status;
1135
1136 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1137 __func__,
1138 ret, port->component->handle, port->handle,
1139 port_action_type_names[action_type], action_type);
1140
1141release_msg:
1142 vchiq_release_message(instance->service_handle, rmsg_handle);
1143
1144 return ret;
1145}
1146
1147
1148static int port_action_handle(struct vchiq_mmal_instance *instance,
1149 struct vchiq_mmal_port *port,
1150 enum mmal_msg_port_action_type action_type,
1151 u32 connect_component_handle,
1152 u32 connect_port_handle)
1153{
1154 int ret;
1155 struct mmal_msg m;
1156 struct mmal_msg *rmsg;
1157 struct vchiq_header *rmsg_handle;
1158
1159 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1160
1161 m.u.port_action_handle.component_handle = port->component->handle;
1162 m.u.port_action_handle.port_handle = port->handle;
1163 m.u.port_action_handle.action = action_type;
1164
1165 m.u.port_action_handle.connect_component_handle =
1166 connect_component_handle;
1167 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1168
1169 ret = send_synchronous_mmal_msg(instance, &m,
1170 sizeof(m.u.port_action_handle),
1171 &rmsg, &rmsg_handle);
1172 if (ret)
1173 return ret;
1174
1175 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1176
1177 ret = -EINVAL;
1178 goto release_msg;
1179 }
1180
1181 ret = -rmsg->u.port_action_reply.status;
1182
1183 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1184 __func__,
1185 ret, port->component->handle, port->handle,
1186 port_action_type_names[action_type],
1187 action_type, connect_component_handle, connect_port_handle);
1188
1189release_msg:
1190 vchiq_release_message(instance->service_handle, rmsg_handle);
1191
1192 return ret;
1193}
1194
1195static int port_parameter_set(struct vchiq_mmal_instance *instance,
1196 struct vchiq_mmal_port *port,
1197 u32 parameter_id, void *value, u32 value_size)
1198{
1199 int ret;
1200 struct mmal_msg m;
1201 struct mmal_msg *rmsg;
1202 struct vchiq_header *rmsg_handle;
1203
1204 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1205
1206 m.u.port_parameter_set.component_handle = port->component->handle;
1207 m.u.port_parameter_set.port_handle = port->handle;
1208 m.u.port_parameter_set.id = parameter_id;
1209 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1210 memcpy(&m.u.port_parameter_set.value, value, value_size);
1211
1212 ret = send_synchronous_mmal_msg(instance, &m,
1213 (4 * sizeof(u32)) + value_size,
1214 &rmsg, &rmsg_handle);
1215 if (ret)
1216 return ret;
1217
1218 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1219
1220 ret = -EINVAL;
1221 goto release_msg;
1222 }
1223
1224 ret = -rmsg->u.port_parameter_set_reply.status;
1225
1226 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1227 __func__,
1228 ret, port->component->handle, port->handle, parameter_id);
1229
1230release_msg:
1231 vchiq_release_message(instance->service_handle, rmsg_handle);
1232
1233 return ret;
1234}
1235
1236static int port_parameter_get(struct vchiq_mmal_instance *instance,
1237 struct vchiq_mmal_port *port,
1238 u32 parameter_id, void *value, u32 *value_size)
1239{
1240 int ret;
1241 struct mmal_msg m;
1242 struct mmal_msg *rmsg;
1243 struct vchiq_header *rmsg_handle;
1244
1245 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1246
1247 m.u.port_parameter_get.component_handle = port->component->handle;
1248 m.u.port_parameter_get.port_handle = port->handle;
1249 m.u.port_parameter_get.id = parameter_id;
1250 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1251
1252 ret = send_synchronous_mmal_msg(instance, &m,
1253 sizeof(struct
1254 mmal_msg_port_parameter_get),
1255 &rmsg, &rmsg_handle);
1256 if (ret)
1257 return ret;
1258
1259 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1260
1261 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1262 ret = -EINVAL;
1263 goto release_msg;
1264 }
1265
1266 ret = rmsg->u.port_parameter_get_reply.status;
1267
1268
1269
1270
1271 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1272
1273 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1274
1275
1276
1277 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1278 *value_size);
1279 } else {
1280 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1281 rmsg->u.port_parameter_get_reply.size);
1282 }
1283
1284 *value_size = rmsg->u.port_parameter_get_reply.size;
1285
1286 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1287 ret, port->component->handle, port->handle, parameter_id);
1288
1289release_msg:
1290 vchiq_release_message(instance->service_handle, rmsg_handle);
1291
1292 return ret;
1293}
1294
1295
1296static int port_disable(struct vchiq_mmal_instance *instance,
1297 struct vchiq_mmal_port *port)
1298{
1299 int ret;
1300 struct list_head *q, *buf_head;
1301 unsigned long flags = 0;
1302
1303 if (!port->enabled)
1304 return 0;
1305
1306 port->enabled = 0;
1307
1308 ret = port_action_port(instance, port,
1309 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1310 if (ret == 0) {
1311
1312
1313
1314
1315
1316
1317
1318
1319 spin_lock_irqsave(&port->slock, flags);
1320
1321 list_for_each_safe(buf_head, q, &port->buffers) {
1322 struct mmal_buffer *mmalbuf;
1323
1324 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1325 list);
1326 list_del(buf_head);
1327 if (port->buffer_cb) {
1328 mmalbuf->length = 0;
1329 mmalbuf->mmal_flags = 0;
1330 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1331 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1332 port->buffer_cb(instance,
1333 port, 0, mmalbuf);
1334 }
1335 }
1336
1337 spin_unlock_irqrestore(&port->slock, flags);
1338
1339 ret = port_info_get(instance, port);
1340 }
1341
1342 return ret;
1343}
1344
1345
1346static int port_enable(struct vchiq_mmal_instance *instance,
1347 struct vchiq_mmal_port *port)
1348{
1349 unsigned int hdr_count;
1350 struct list_head *q, *buf_head;
1351 int ret;
1352
1353 if (port->enabled)
1354 return 0;
1355
1356 ret = port_action_port(instance, port,
1357 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1358 if (ret)
1359 goto done;
1360
1361 port->enabled = 1;
1362
1363 if (port->buffer_cb) {
1364
1365 hdr_count = 1;
1366 list_for_each_safe(buf_head, q, &port->buffers) {
1367 struct mmal_buffer *mmalbuf;
1368
1369 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1370 list);
1371 ret = buffer_from_host(instance, port, mmalbuf);
1372 if (ret)
1373 goto done;
1374
1375 list_del(buf_head);
1376 hdr_count++;
1377 if (hdr_count > port->current_buffer.num)
1378 break;
1379 }
1380 }
1381
1382 ret = port_info_get(instance, port);
1383
1384done:
1385 return ret;
1386}
1387
1388
1389
1390
1391
1392
1393int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1394 struct vchiq_mmal_port *port)
1395{
1396 int ret;
1397
1398 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1399 return -EINTR;
1400
1401 ret = port_info_set(instance, port);
1402 if (ret)
1403 goto release_unlock;
1404
1405
1406 ret = port_info_get(instance, port);
1407
1408release_unlock:
1409 mutex_unlock(&instance->vchiq_mutex);
1410
1411 return ret;
1412}
1413EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1414
1415int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1416 struct vchiq_mmal_port *port,
1417 u32 parameter, void *value, u32 value_size)
1418{
1419 int ret;
1420
1421 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1422 return -EINTR;
1423
1424 ret = port_parameter_set(instance, port, parameter, value, value_size);
1425
1426 mutex_unlock(&instance->vchiq_mutex);
1427
1428 return ret;
1429}
1430EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1431
1432int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1433 struct vchiq_mmal_port *port,
1434 u32 parameter, void *value, u32 *value_size)
1435{
1436 int ret;
1437
1438 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1439 return -EINTR;
1440
1441 ret = port_parameter_get(instance, port, parameter, value, value_size);
1442
1443 mutex_unlock(&instance->vchiq_mutex);
1444
1445 return ret;
1446}
1447EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1448
1449
1450
1451
1452
1453
1454int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1455 struct vchiq_mmal_port *port,
1456 vchiq_mmal_buffer_cb buffer_cb)
1457{
1458 int ret;
1459
1460 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1461 return -EINTR;
1462
1463
1464 if (port->enabled) {
1465 ret = 0;
1466 goto unlock;
1467 }
1468
1469 port->buffer_cb = buffer_cb;
1470
1471 ret = port_enable(instance, port);
1472
1473unlock:
1474 mutex_unlock(&instance->vchiq_mutex);
1475
1476 return ret;
1477}
1478EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1479
1480int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1481 struct vchiq_mmal_port *port)
1482{
1483 int ret;
1484
1485 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1486 return -EINTR;
1487
1488 if (!port->enabled) {
1489 mutex_unlock(&instance->vchiq_mutex);
1490 return 0;
1491 }
1492
1493 ret = port_disable(instance, port);
1494
1495 mutex_unlock(&instance->vchiq_mutex);
1496
1497 return ret;
1498}
1499EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1500
1501
1502
1503
1504int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1505 struct vchiq_mmal_port *src,
1506 struct vchiq_mmal_port *dst)
1507{
1508 int ret;
1509
1510 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1511 return -EINTR;
1512
1513
1514 if (src->connected) {
1515 ret = port_disable(instance, src);
1516 if (ret) {
1517 pr_err("failed disabling src port(%d)\n", ret);
1518 goto release_unlock;
1519 }
1520
1521
1522
1523
1524
1525 ret = port_action_handle(instance, src,
1526 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1527 src->connected->component->handle,
1528 src->connected->handle);
1529 if (ret < 0) {
1530 pr_err("failed disconnecting src port\n");
1531 goto release_unlock;
1532 }
1533 src->connected->enabled = 0;
1534 src->connected = NULL;
1535 }
1536
1537 if (!dst) {
1538
1539 ret = 0;
1540 pr_debug("not making new connection\n");
1541 goto release_unlock;
1542 }
1543
1544
1545 dst->format.encoding = src->format.encoding;
1546 dst->es.video.width = src->es.video.width;
1547 dst->es.video.height = src->es.video.height;
1548 dst->es.video.crop.x = src->es.video.crop.x;
1549 dst->es.video.crop.y = src->es.video.crop.y;
1550 dst->es.video.crop.width = src->es.video.crop.width;
1551 dst->es.video.crop.height = src->es.video.crop.height;
1552 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1553 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1554
1555
1556 ret = port_info_set(instance, dst);
1557 if (ret) {
1558 pr_debug("setting port info failed\n");
1559 goto release_unlock;
1560 }
1561
1562
1563 ret = port_info_get(instance, dst);
1564 if (ret) {
1565 pr_debug("read back port info failed\n");
1566 goto release_unlock;
1567 }
1568
1569
1570 ret = port_action_handle(instance, src,
1571 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1572 dst->component->handle, dst->handle);
1573 if (ret < 0) {
1574 pr_debug("connecting port %d:%d to %d:%d failed\n",
1575 src->component->handle, src->handle,
1576 dst->component->handle, dst->handle);
1577 goto release_unlock;
1578 }
1579 src->connected = dst;
1580
1581release_unlock:
1582
1583 mutex_unlock(&instance->vchiq_mutex);
1584
1585 return ret;
1586}
1587EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1588
1589int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1590 struct vchiq_mmal_port *port,
1591 struct mmal_buffer *buffer)
1592{
1593 unsigned long flags = 0;
1594 int ret;
1595
1596 ret = buffer_from_host(instance, port, buffer);
1597 if (ret == -EINVAL) {
1598
1599 spin_lock_irqsave(&port->slock, flags);
1600 list_add_tail(&buffer->list, &port->buffers);
1601 spin_unlock_irqrestore(&port->slock, flags);
1602 }
1603
1604 return 0;
1605}
1606EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1607
1608int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1609 struct mmal_buffer *buf)
1610{
1611 struct mmal_msg_context *msg_context = get_msg_context(instance);
1612
1613 if (IS_ERR(msg_context))
1614 return (PTR_ERR(msg_context));
1615
1616 buf->msg_context = msg_context;
1617 return 0;
1618}
1619EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1620
1621int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1622{
1623 struct mmal_msg_context *msg_context = buf->msg_context;
1624
1625 if (msg_context)
1626 release_msg_context(msg_context);
1627 buf->msg_context = NULL;
1628
1629 return 0;
1630}
1631EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1632
1633
1634
1635
1636int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1637 const char *name,
1638 struct vchiq_mmal_component **component_out)
1639{
1640 int ret;
1641 int idx;
1642 struct vchiq_mmal_component *component = NULL;
1643
1644 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1645 return -EINTR;
1646
1647 for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1648 if (!instance->component[idx].in_use) {
1649 component = &instance->component[idx];
1650 component->in_use = 1;
1651 break;
1652 }
1653 }
1654
1655 if (!component) {
1656 ret = -EINVAL;
1657 goto unlock;
1658 }
1659
1660
1661
1662
1663
1664 component->client_component = idx;
1665
1666 ret = create_component(instance, component, name);
1667 if (ret < 0) {
1668 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1669 __func__, ret);
1670 goto unlock;
1671 }
1672
1673
1674 component->control.type = MMAL_PORT_TYPE_CONTROL;
1675 component->control.index = 0;
1676 component->control.component = component;
1677 spin_lock_init(&component->control.slock);
1678 INIT_LIST_HEAD(&component->control.buffers);
1679 ret = port_info_get(instance, &component->control);
1680 if (ret < 0)
1681 goto release_component;
1682
1683 for (idx = 0; idx < component->inputs; idx++) {
1684 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1685 component->input[idx].index = idx;
1686 component->input[idx].component = component;
1687 spin_lock_init(&component->input[idx].slock);
1688 INIT_LIST_HEAD(&component->input[idx].buffers);
1689 ret = port_info_get(instance, &component->input[idx]);
1690 if (ret < 0)
1691 goto release_component;
1692 }
1693
1694 for (idx = 0; idx < component->outputs; idx++) {
1695 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1696 component->output[idx].index = idx;
1697 component->output[idx].component = component;
1698 spin_lock_init(&component->output[idx].slock);
1699 INIT_LIST_HEAD(&component->output[idx].buffers);
1700 ret = port_info_get(instance, &component->output[idx]);
1701 if (ret < 0)
1702 goto release_component;
1703 }
1704
1705 for (idx = 0; idx < component->clocks; idx++) {
1706 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1707 component->clock[idx].index = idx;
1708 component->clock[idx].component = component;
1709 spin_lock_init(&component->clock[idx].slock);
1710 INIT_LIST_HEAD(&component->clock[idx].buffers);
1711 ret = port_info_get(instance, &component->clock[idx]);
1712 if (ret < 0)
1713 goto release_component;
1714 }
1715
1716 *component_out = component;
1717
1718 mutex_unlock(&instance->vchiq_mutex);
1719
1720 return 0;
1721
1722release_component:
1723 destroy_component(instance, component);
1724unlock:
1725 if (component)
1726 component->in_use = 0;
1727 mutex_unlock(&instance->vchiq_mutex);
1728
1729 return ret;
1730}
1731EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1732
1733
1734
1735
1736int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1737 struct vchiq_mmal_component *component)
1738{
1739 int ret;
1740
1741 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1742 return -EINTR;
1743
1744 if (component->enabled)
1745 ret = disable_component(instance, component);
1746
1747 ret = destroy_component(instance, component);
1748
1749 component->in_use = 0;
1750
1751 mutex_unlock(&instance->vchiq_mutex);
1752
1753 return ret;
1754}
1755EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1756
1757
1758
1759
1760int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1761 struct vchiq_mmal_component *component)
1762{
1763 int ret;
1764
1765 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1766 return -EINTR;
1767
1768 if (component->enabled) {
1769 mutex_unlock(&instance->vchiq_mutex);
1770 return 0;
1771 }
1772
1773 ret = enable_component(instance, component);
1774 if (ret == 0)
1775 component->enabled = true;
1776
1777 mutex_unlock(&instance->vchiq_mutex);
1778
1779 return ret;
1780}
1781EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1782
1783
1784
1785
1786int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1787 struct vchiq_mmal_component *component)
1788{
1789 int ret;
1790
1791 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1792 return -EINTR;
1793
1794 if (!component->enabled) {
1795 mutex_unlock(&instance->vchiq_mutex);
1796 return 0;
1797 }
1798
1799 ret = disable_component(instance, component);
1800 if (ret == 0)
1801 component->enabled = 0;
1802
1803 mutex_unlock(&instance->vchiq_mutex);
1804
1805 return ret;
1806}
1807EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1808
1809int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1810 u32 *major_out, u32 *minor_out)
1811{
1812 int ret;
1813
1814 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1815 return -EINTR;
1816
1817 ret = get_version(instance, major_out, minor_out);
1818
1819 mutex_unlock(&instance->vchiq_mutex);
1820
1821 return ret;
1822}
1823EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1824
1825int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1826{
1827 int status = 0;
1828
1829 if (!instance)
1830 return -EINVAL;
1831
1832 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1833 return -EINTR;
1834
1835 vchiq_use_service(instance->service_handle);
1836
1837 status = vchiq_close_service(instance->service_handle);
1838 if (status != 0)
1839 pr_err("mmal-vchiq: VCHIQ close failed\n");
1840
1841 mutex_unlock(&instance->vchiq_mutex);
1842
1843 flush_workqueue(instance->bulk_wq);
1844 destroy_workqueue(instance->bulk_wq);
1845
1846 vfree(instance->bulk_scratch);
1847
1848 idr_destroy(&instance->context_map);
1849
1850 kfree(instance);
1851
1852 return status;
1853}
1854EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1855
1856int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1857{
1858 int status;
1859 struct vchiq_mmal_instance *instance;
1860 static struct vchiq_instance *vchiq_instance;
1861 struct vchiq_service_params params = {
1862 .version = VC_MMAL_VER,
1863 .version_min = VC_MMAL_MIN_VER,
1864 .fourcc = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1865 .callback = service_callback,
1866 .userdata = NULL,
1867 };
1868
1869
1870
1871
1872
1873
1874 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1875
1876
1877 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1878
1879
1880 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1881
1882
1883 status = vchiq_initialise(&vchiq_instance);
1884 if (status) {
1885 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1886 status);
1887 return -EIO;
1888 }
1889
1890 status = vchiq_connect(vchiq_instance);
1891 if (status) {
1892 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1893 return -EIO;
1894 }
1895
1896 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1897
1898 if (!instance)
1899 return -ENOMEM;
1900
1901 mutex_init(&instance->vchiq_mutex);
1902
1903 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1904
1905 mutex_init(&instance->context_map_lock);
1906 idr_init_base(&instance->context_map, 1);
1907
1908 params.userdata = instance;
1909
1910 instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1911 WQ_MEM_RECLAIM);
1912 if (!instance->bulk_wq)
1913 goto err_free;
1914
1915 status = vchiq_open_service(vchiq_instance, ¶ms,
1916 &instance->service_handle);
1917 if (status) {
1918 pr_err("Failed to open VCHI service connection (status=%d)\n",
1919 status);
1920 goto err_close_services;
1921 }
1922
1923 vchiq_release_service(instance->service_handle);
1924
1925 *out_instance = instance;
1926
1927 return 0;
1928
1929err_close_services:
1930 vchiq_close_service(instance->service_handle);
1931 destroy_workqueue(instance->bulk_wq);
1932err_free:
1933 vfree(instance->bulk_scratch);
1934 kfree(instance);
1935 return -ENODEV;
1936}
1937EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1938
1939MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1940MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1941MODULE_LICENSE("GPL");
1942