1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/wait.h>
15#include <linux/workqueue.h>
16
17#include "greybus.h"
18#include "greybus_trace.h"
19
20static struct kmem_cache *gb_operation_cache;
21static struct kmem_cache *gb_message_cache;
22
23
24static struct workqueue_struct *gb_operation_completion_wq;
25
26
27static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28
29
30
31
32static DEFINE_SPINLOCK(gb_operations_lock);
33
34static int gb_operation_response_send(struct gb_operation *operation,
35 int errno);
36
37
38
39
40
41
42
43static int gb_operation_get_active(struct gb_operation *operation)
44{
45 struct gb_connection *connection = operation->connection;
46 unsigned long flags;
47
48 spin_lock_irqsave(&connection->lock, flags);
49 switch (connection->state) {
50 case GB_CONNECTION_STATE_ENABLED:
51 break;
52 case GB_CONNECTION_STATE_ENABLED_TX:
53 if (gb_operation_is_incoming(operation))
54 goto err_unlock;
55 break;
56 case GB_CONNECTION_STATE_DISCONNECTING:
57 if (!gb_operation_is_core(operation))
58 goto err_unlock;
59 break;
60 default:
61 goto err_unlock;
62 }
63
64 if (operation->active++ == 0)
65 list_add_tail(&operation->links, &connection->operations);
66
67 trace_gb_operation_get_active(operation);
68
69 spin_unlock_irqrestore(&connection->lock, flags);
70
71 return 0;
72
73err_unlock:
74 spin_unlock_irqrestore(&connection->lock, flags);
75
76 return -ENOTCONN;
77}
78
79
80static void gb_operation_put_active(struct gb_operation *operation)
81{
82 struct gb_connection *connection = operation->connection;
83 unsigned long flags;
84
85 spin_lock_irqsave(&connection->lock, flags);
86
87 trace_gb_operation_put_active(operation);
88
89 if (--operation->active == 0) {
90 list_del(&operation->links);
91 if (atomic_read(&operation->waiters))
92 wake_up(&gb_operation_cancellation_queue);
93 }
94 spin_unlock_irqrestore(&connection->lock, flags);
95}
96
97static bool gb_operation_is_active(struct gb_operation *operation)
98{
99 struct gb_connection *connection = operation->connection;
100 unsigned long flags;
101 bool ret;
102
103 spin_lock_irqsave(&connection->lock, flags);
104 ret = operation->active;
105 spin_unlock_irqrestore(&connection->lock, flags);
106
107 return ret;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static bool gb_operation_result_set(struct gb_operation *operation, int result)
136{
137 unsigned long flags;
138 int prev;
139
140 if (result == -EINPROGRESS) {
141
142
143
144
145
146
147
148 spin_lock_irqsave(&gb_operations_lock, flags);
149 prev = operation->errno;
150 if (prev == -EBADR)
151 operation->errno = result;
152 else
153 operation->errno = -EILSEQ;
154 spin_unlock_irqrestore(&gb_operations_lock, flags);
155 WARN_ON(prev != -EBADR);
156
157 return true;
158 }
159
160
161
162
163
164
165
166
167
168
169 if (WARN_ON(result == -EBADR))
170 result = -EILSEQ;
171
172 spin_lock_irqsave(&gb_operations_lock, flags);
173 prev = operation->errno;
174 if (prev == -EINPROGRESS)
175 operation->errno = result;
176 spin_unlock_irqrestore(&gb_operations_lock, flags);
177
178 return prev == -EINPROGRESS;
179}
180
181int gb_operation_result(struct gb_operation *operation)
182{
183 int result = operation->errno;
184
185 WARN_ON(result == -EBADR);
186 WARN_ON(result == -EINPROGRESS);
187
188 return result;
189}
190EXPORT_SYMBOL_GPL(gb_operation_result);
191
192
193
194
195
196static struct gb_operation *
197gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
198{
199 struct gb_operation *operation;
200 unsigned long flags;
201 bool found = false;
202
203 spin_lock_irqsave(&connection->lock, flags);
204 list_for_each_entry(operation, &connection->operations, links)
205 if (operation->id == operation_id &&
206 !gb_operation_is_incoming(operation)) {
207 gb_operation_get(operation);
208 found = true;
209 break;
210 }
211 spin_unlock_irqrestore(&connection->lock, flags);
212
213 return found ? operation : NULL;
214}
215
216static int gb_message_send(struct gb_message *message, gfp_t gfp)
217{
218 struct gb_connection *connection = message->operation->connection;
219
220 trace_gb_message_send(message);
221 return connection->hd->driver->message_send(connection->hd,
222 connection->hd_cport_id,
223 message,
224 gfp);
225}
226
227
228
229
230static void gb_message_cancel(struct gb_message *message)
231{
232 struct gb_host_device *hd = message->operation->connection->hd;
233
234 hd->driver->message_cancel(message);
235}
236
237static void gb_operation_request_handle(struct gb_operation *operation)
238{
239 struct gb_connection *connection = operation->connection;
240 int status;
241 int ret;
242
243 if (connection->handler) {
244 status = connection->handler(operation);
245 } else {
246 dev_err(&connection->hd->dev,
247 "%s: unexpected incoming request of type 0x%02x\n",
248 connection->name, operation->type);
249
250 status = -EPROTONOSUPPORT;
251 }
252
253 ret = gb_operation_response_send(operation, status);
254 if (ret) {
255 dev_err(&connection->hd->dev,
256 "%s: failed to send response %d for type 0x%02x: %d\n",
257 connection->name, status, operation->type, ret);
258 return;
259 }
260}
261
262
263
264
265
266
267
268
269
270
271
272
273static void gb_operation_work(struct work_struct *work)
274{
275 struct gb_operation *operation;
276
277 operation = container_of(work, struct gb_operation, work);
278
279 if (gb_operation_is_incoming(operation))
280 gb_operation_request_handle(operation);
281 else
282 operation->callback(operation);
283
284 gb_operation_put_active(operation);
285 gb_operation_put(operation);
286}
287
288static void gb_operation_message_init(struct gb_host_device *hd,
289 struct gb_message *message, u16 operation_id,
290 size_t payload_size, u8 type)
291{
292 struct gb_operation_msg_hdr *header;
293
294 header = message->buffer;
295
296 message->header = header;
297 message->payload = payload_size ? header + 1 : NULL;
298 message->payload_size = payload_size;
299
300
301
302
303
304
305 if (type != GB_REQUEST_TYPE_INVALID) {
306 u16 message_size = (u16)(sizeof(*header) + payload_size);
307
308
309
310
311
312
313
314
315
316
317 header->size = cpu_to_le16(message_size);
318 header->operation_id = 0;
319 header->type = type;
320 header->result = 0;
321 }
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338static struct gb_message *
339gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
340 size_t payload_size, gfp_t gfp_flags)
341{
342 struct gb_message *message;
343 struct gb_operation_msg_hdr *header;
344 size_t message_size = payload_size + sizeof(*header);
345
346 if (message_size > hd->buffer_size_max) {
347 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
348 message_size, hd->buffer_size_max);
349 return NULL;
350 }
351
352
353 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
354 if (!message)
355 return NULL;
356
357 message->buffer = kzalloc(message_size, gfp_flags);
358 if (!message->buffer)
359 goto err_free_message;
360
361
362 gb_operation_message_init(hd, message, 0, payload_size, type);
363
364 return message;
365
366err_free_message:
367 kmem_cache_free(gb_message_cache, message);
368
369 return NULL;
370}
371
372static void gb_operation_message_free(struct gb_message *message)
373{
374 kfree(message->buffer);
375 kmem_cache_free(gb_message_cache, message);
376}
377
378
379
380
381
382static int gb_operation_status_map(u8 status)
383{
384 switch (status) {
385 case GB_OP_SUCCESS:
386 return 0;
387 case GB_OP_INTERRUPTED:
388 return -EINTR;
389 case GB_OP_TIMEOUT:
390 return -ETIMEDOUT;
391 case GB_OP_NO_MEMORY:
392 return -ENOMEM;
393 case GB_OP_PROTOCOL_BAD:
394 return -EPROTONOSUPPORT;
395 case GB_OP_OVERFLOW:
396 return -EMSGSIZE;
397 case GB_OP_INVALID:
398 return -EINVAL;
399 case GB_OP_RETRY:
400 return -EAGAIN;
401 case GB_OP_NONEXISTENT:
402 return -ENODEV;
403 case GB_OP_MALFUNCTION:
404 return -EILSEQ;
405 case GB_OP_UNKNOWN_ERROR:
406 default:
407 return -EIO;
408 }
409}
410
411
412
413
414
415
416
417static u8 gb_operation_errno_map(int errno)
418{
419 switch (errno) {
420 case 0:
421 return GB_OP_SUCCESS;
422 case -EINTR:
423 return GB_OP_INTERRUPTED;
424 case -ETIMEDOUT:
425 return GB_OP_TIMEOUT;
426 case -ENOMEM:
427 return GB_OP_NO_MEMORY;
428 case -EPROTONOSUPPORT:
429 return GB_OP_PROTOCOL_BAD;
430 case -EMSGSIZE:
431 return GB_OP_OVERFLOW;
432 case -EINVAL:
433 return GB_OP_INVALID;
434 case -EAGAIN:
435 return GB_OP_RETRY;
436 case -EILSEQ:
437 return GB_OP_MALFUNCTION;
438 case -ENODEV:
439 return GB_OP_NONEXISTENT;
440 case -EIO:
441 default:
442 return GB_OP_UNKNOWN_ERROR;
443 }
444}
445
446bool gb_operation_response_alloc(struct gb_operation *operation,
447 size_t response_size, gfp_t gfp)
448{
449 struct gb_host_device *hd = operation->connection->hd;
450 struct gb_operation_msg_hdr *request_header;
451 struct gb_message *response;
452 u8 type;
453
454 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
455 response = gb_operation_message_alloc(hd, type, response_size, gfp);
456 if (!response)
457 return false;
458 response->operation = operation;
459
460
461
462
463
464
465
466 request_header = operation->request->header;
467 response->header->operation_id = request_header->operation_id;
468 operation->response = response;
469
470 return true;
471}
472EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496static struct gb_operation *
497gb_operation_create_common(struct gb_connection *connection, u8 type,
498 size_t request_size, size_t response_size,
499 unsigned long op_flags, gfp_t gfp_flags)
500{
501 struct gb_host_device *hd = connection->hd;
502 struct gb_operation *operation;
503
504 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
505 if (!operation)
506 return NULL;
507 operation->connection = connection;
508
509 operation->request = gb_operation_message_alloc(hd, type, request_size,
510 gfp_flags);
511 if (!operation->request)
512 goto err_cache;
513 operation->request->operation = operation;
514
515
516 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
517 if (!gb_operation_response_alloc(operation, response_size,
518 gfp_flags)) {
519 goto err_request;
520 }
521 }
522
523 operation->flags = op_flags;
524 operation->type = type;
525 operation->errno = -EBADR;
526
527 INIT_WORK(&operation->work, gb_operation_work);
528 init_completion(&operation->completion);
529 kref_init(&operation->kref);
530 atomic_set(&operation->waiters, 0);
531
532 return operation;
533
534err_request:
535 gb_operation_message_free(operation->request);
536err_cache:
537 kmem_cache_free(gb_operation_cache, operation);
538
539 return NULL;
540}
541
542
543
544
545
546
547
548
549
550struct gb_operation *
551gb_operation_create_flags(struct gb_connection *connection,
552 u8 type, size_t request_size,
553 size_t response_size, unsigned long flags,
554 gfp_t gfp)
555{
556 struct gb_operation *operation;
557
558 if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
559 return NULL;
560 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
561 type &= ~GB_MESSAGE_TYPE_RESPONSE;
562
563 if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
564 flags &= GB_OPERATION_FLAG_USER_MASK;
565
566 operation = gb_operation_create_common(connection, type,
567 request_size, response_size,
568 flags, gfp);
569 if (operation)
570 trace_gb_operation_create(operation);
571
572 return operation;
573}
574EXPORT_SYMBOL_GPL(gb_operation_create_flags);
575
576struct gb_operation *
577gb_operation_create_core(struct gb_connection *connection,
578 u8 type, size_t request_size,
579 size_t response_size, unsigned long flags,
580 gfp_t gfp)
581{
582 struct gb_operation *operation;
583
584 flags |= GB_OPERATION_FLAG_CORE;
585
586 operation = gb_operation_create_common(connection, type,
587 request_size, response_size,
588 flags, gfp);
589 if (operation)
590 trace_gb_operation_create_core(operation);
591
592 return operation;
593}
594
595
596size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
597{
598 struct gb_host_device *hd = connection->hd;
599
600 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
601}
602EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
603
604static struct gb_operation *
605gb_operation_create_incoming(struct gb_connection *connection, u16 id,
606 u8 type, void *data, size_t size)
607{
608 struct gb_operation *operation;
609 size_t request_size;
610 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
611
612
613 request_size = size - sizeof(struct gb_operation_msg_hdr);
614
615 if (!id)
616 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
617
618 operation = gb_operation_create_common(connection, type,
619 request_size,
620 GB_REQUEST_TYPE_INVALID,
621 flags, GFP_ATOMIC);
622 if (!operation)
623 return NULL;
624
625 operation->id = id;
626 memcpy(operation->request->header, data, size);
627 trace_gb_operation_create_incoming(operation);
628
629 return operation;
630}
631
632
633
634
635void gb_operation_get(struct gb_operation *operation)
636{
637 kref_get(&operation->kref);
638}
639EXPORT_SYMBOL_GPL(gb_operation_get);
640
641
642
643
644static void _gb_operation_destroy(struct kref *kref)
645{
646 struct gb_operation *operation;
647
648 operation = container_of(kref, struct gb_operation, kref);
649
650 trace_gb_operation_destroy(operation);
651
652 if (operation->response)
653 gb_operation_message_free(operation->response);
654 gb_operation_message_free(operation->request);
655
656 kmem_cache_free(gb_operation_cache, operation);
657}
658
659
660
661
662
663void gb_operation_put(struct gb_operation *operation)
664{
665 if (WARN_ON(!operation))
666 return;
667
668 kref_put(&operation->kref, _gb_operation_destroy);
669}
670EXPORT_SYMBOL_GPL(gb_operation_put);
671
672
673static void gb_operation_sync_callback(struct gb_operation *operation)
674{
675 complete(&operation->completion);
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694int gb_operation_request_send(struct gb_operation *operation,
695 gb_operation_callback callback,
696 gfp_t gfp)
697{
698 struct gb_connection *connection = operation->connection;
699 struct gb_operation_msg_hdr *header;
700 unsigned int cycle;
701 int ret;
702
703 if (gb_connection_is_offloaded(connection))
704 return -EBUSY;
705
706 if (!callback)
707 return -EINVAL;
708
709
710
711
712
713
714 operation->callback = callback;
715
716
717
718
719
720 if (gb_operation_is_unidirectional(operation)) {
721 operation->id = 0;
722 } else {
723 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
724 operation->id = (u16)(cycle % U16_MAX + 1);
725 }
726
727 header = operation->request->header;
728 header->operation_id = cpu_to_le16(operation->id);
729
730 gb_operation_result_set(operation, -EINPROGRESS);
731
732
733
734
735
736 gb_operation_get(operation);
737 ret = gb_operation_get_active(operation);
738 if (ret)
739 goto err_put;
740
741 ret = gb_message_send(operation->request, gfp);
742 if (ret)
743 goto err_put_active;
744
745 return 0;
746
747err_put_active:
748 gb_operation_put_active(operation);
749err_put:
750 gb_operation_put(operation);
751
752 return ret;
753}
754EXPORT_SYMBOL_GPL(gb_operation_request_send);
755
756
757
758
759
760
761
762int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
763 unsigned int timeout)
764{
765 int ret;
766 unsigned long timeout_jiffies;
767
768 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
769 GFP_KERNEL);
770 if (ret)
771 return ret;
772
773 if (timeout)
774 timeout_jiffies = msecs_to_jiffies(timeout);
775 else
776 timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
777
778 ret = wait_for_completion_interruptible_timeout(&operation->completion,
779 timeout_jiffies);
780 if (ret < 0) {
781
782 gb_operation_cancel(operation, -ECANCELED);
783 } else if (ret == 0) {
784
785 gb_operation_cancel(operation, -ETIMEDOUT);
786 }
787
788 return gb_operation_result(operation);
789}
790EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
791
792
793
794
795
796
797
798
799
800
801static int gb_operation_response_send(struct gb_operation *operation,
802 int errno)
803{
804 struct gb_connection *connection = operation->connection;
805 int ret;
806
807 if (!operation->response &&
808 !gb_operation_is_unidirectional(operation)) {
809 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
810 return -ENOMEM;
811 }
812
813
814 if (!gb_operation_result_set(operation, errno)) {
815 dev_err(&connection->hd->dev, "request result already set\n");
816 return -EIO;
817 }
818
819
820 if (gb_operation_is_unidirectional(operation))
821 return 0;
822
823
824 gb_operation_get(operation);
825 ret = gb_operation_get_active(operation);
826 if (ret)
827 goto err_put;
828
829
830 operation->response->header->result = gb_operation_errno_map(errno);
831
832 ret = gb_message_send(operation->response, GFP_KERNEL);
833 if (ret)
834 goto err_put_active;
835
836 return 0;
837
838err_put_active:
839 gb_operation_put_active(operation);
840err_put:
841 gb_operation_put(operation);
842
843 return ret;
844}
845
846
847
848
849void greybus_message_sent(struct gb_host_device *hd,
850 struct gb_message *message, int status)
851{
852 struct gb_operation *operation = message->operation;
853 struct gb_connection *connection = operation->connection;
854
855
856
857
858
859
860
861
862
863
864
865
866 if (message == operation->response) {
867 if (status) {
868 dev_err(&connection->hd->dev,
869 "%s: error sending response 0x%02x: %d\n",
870 connection->name, operation->type, status);
871 }
872
873 gb_operation_put_active(operation);
874 gb_operation_put(operation);
875 } else if (status || gb_operation_is_unidirectional(operation)) {
876 if (gb_operation_result_set(operation, status)) {
877 queue_work(gb_operation_completion_wq,
878 &operation->work);
879 }
880 }
881}
882EXPORT_SYMBOL_GPL(greybus_message_sent);
883
884
885
886
887
888
889
890
891static void gb_connection_recv_request(struct gb_connection *connection,
892 const struct gb_operation_msg_hdr *header,
893 void *data, size_t size)
894{
895 struct gb_operation *operation;
896 u16 operation_id;
897 u8 type;
898 int ret;
899
900 operation_id = le16_to_cpu(header->operation_id);
901 type = header->type;
902
903 operation = gb_operation_create_incoming(connection, operation_id,
904 type, data, size);
905 if (!operation) {
906 dev_err(&connection->hd->dev,
907 "%s: can't create incoming operation\n",
908 connection->name);
909 return;
910 }
911
912 ret = gb_operation_get_active(operation);
913 if (ret) {
914 gb_operation_put(operation);
915 return;
916 }
917 trace_gb_message_recv_request(operation->request);
918
919
920
921
922
923 if (gb_operation_result_set(operation, -EINPROGRESS))
924 queue_work(connection->wq, &operation->work);
925}
926
927
928
929
930
931
932
933
934
935static void gb_connection_recv_response(struct gb_connection *connection,
936 const struct gb_operation_msg_hdr *header,
937 void *data, size_t size)
938{
939 struct gb_operation *operation;
940 struct gb_message *message;
941 size_t message_size;
942 u16 operation_id;
943 int errno;
944
945 operation_id = le16_to_cpu(header->operation_id);
946
947 if (!operation_id) {
948 dev_err_ratelimited(&connection->hd->dev,
949 "%s: invalid response id 0 received\n",
950 connection->name);
951 return;
952 }
953
954 operation = gb_operation_find_outgoing(connection, operation_id);
955 if (!operation) {
956 dev_err_ratelimited(&connection->hd->dev,
957 "%s: unexpected response id 0x%04x received\n",
958 connection->name, operation_id);
959 return;
960 }
961
962 errno = gb_operation_status_map(header->result);
963 message = operation->response;
964 message_size = sizeof(*header) + message->payload_size;
965 if (!errno && size > message_size) {
966 dev_err_ratelimited(&connection->hd->dev,
967 "%s: malformed response 0x%02x received (%zu > %zu)\n",
968 connection->name, header->type,
969 size, message_size);
970 errno = -EMSGSIZE;
971 } else if (!errno && size < message_size) {
972 if (gb_operation_short_response_allowed(operation)) {
973 message->payload_size = size - sizeof(*header);
974 } else {
975 dev_err_ratelimited(&connection->hd->dev,
976 "%s: short response 0x%02x received (%zu < %zu)\n",
977 connection->name, header->type,
978 size, message_size);
979 errno = -EMSGSIZE;
980 }
981 }
982
983
984 if (errno)
985 size = sizeof(*header);
986
987
988 if (gb_operation_result_set(operation, errno)) {
989 memcpy(message->buffer, data, size);
990
991 trace_gb_message_recv_response(message);
992
993 queue_work(gb_operation_completion_wq, &operation->work);
994 }
995
996 gb_operation_put(operation);
997}
998
999
1000
1001
1002
1003
1004void gb_connection_recv(struct gb_connection *connection,
1005 void *data, size_t size)
1006{
1007 struct gb_operation_msg_hdr header;
1008 struct device *dev = &connection->hd->dev;
1009 size_t msg_size;
1010
1011 if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1012 gb_connection_is_offloaded(connection)) {
1013 dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1014 connection->name, size);
1015 return;
1016 }
1017
1018 if (size < sizeof(header)) {
1019 dev_err_ratelimited(dev, "%s: short message received\n",
1020 connection->name);
1021 return;
1022 }
1023
1024
1025 memcpy(&header, data, sizeof(header));
1026 msg_size = le16_to_cpu(header.size);
1027 if (size < msg_size) {
1028 dev_err_ratelimited(dev,
1029 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1030 connection->name,
1031 le16_to_cpu(header.operation_id),
1032 header.type, size, msg_size);
1033 return;
1034 }
1035
1036 if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1037 gb_connection_recv_response(connection, &header, data,
1038 msg_size);
1039 } else {
1040 gb_connection_recv_request(connection, &header, data,
1041 msg_size);
1042 }
1043}
1044
1045
1046
1047
1048
1049void gb_operation_cancel(struct gb_operation *operation, int errno)
1050{
1051 if (WARN_ON(gb_operation_is_incoming(operation)))
1052 return;
1053
1054 if (gb_operation_result_set(operation, errno)) {
1055 gb_message_cancel(operation->request);
1056 queue_work(gb_operation_completion_wq, &operation->work);
1057 }
1058 trace_gb_message_cancel_outgoing(operation->request);
1059
1060 atomic_inc(&operation->waiters);
1061 wait_event(gb_operation_cancellation_queue,
1062 !gb_operation_is_active(operation));
1063 atomic_dec(&operation->waiters);
1064}
1065EXPORT_SYMBOL_GPL(gb_operation_cancel);
1066
1067
1068
1069
1070
1071void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1072{
1073 if (WARN_ON(!gb_operation_is_incoming(operation)))
1074 return;
1075
1076 if (!gb_operation_is_unidirectional(operation)) {
1077
1078
1079
1080
1081 flush_work(&operation->work);
1082 if (!gb_operation_result_set(operation, errno))
1083 gb_message_cancel(operation->response);
1084 }
1085 trace_gb_message_cancel_incoming(operation->response);
1086
1087 atomic_inc(&operation->waiters);
1088 wait_event(gb_operation_cancellation_queue,
1089 !gb_operation_is_active(operation));
1090 atomic_dec(&operation->waiters);
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1117 void *request, int request_size,
1118 void *response, int response_size,
1119 unsigned int timeout)
1120{
1121 struct gb_operation *operation;
1122 int ret;
1123
1124 if ((response_size && !response) ||
1125 (request_size && !request))
1126 return -EINVAL;
1127
1128 operation = gb_operation_create(connection, type,
1129 request_size, response_size,
1130 GFP_KERNEL);
1131 if (!operation)
1132 return -ENOMEM;
1133
1134 if (request_size)
1135 memcpy(operation->request->payload, request, request_size);
1136
1137 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1138 if (ret) {
1139 dev_err(&connection->hd->dev,
1140 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1141 connection->name, operation->id, type, ret);
1142 } else {
1143 if (response_size) {
1144 memcpy(response, operation->response->payload,
1145 response_size);
1146 }
1147 }
1148
1149 gb_operation_put(operation);
1150
1151 return ret;
1152}
1153EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1170 int type, void *request, int request_size,
1171 unsigned int timeout)
1172{
1173 struct gb_operation *operation;
1174 int ret;
1175
1176 if (request_size && !request)
1177 return -EINVAL;
1178
1179 operation = gb_operation_create_flags(connection, type,
1180 request_size, 0,
1181 GB_OPERATION_FLAG_UNIDIRECTIONAL,
1182 GFP_KERNEL);
1183 if (!operation)
1184 return -ENOMEM;
1185
1186 if (request_size)
1187 memcpy(operation->request->payload, request, request_size);
1188
1189 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1190 if (ret) {
1191 dev_err(&connection->hd->dev,
1192 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1193 connection->name, type, ret);
1194 }
1195
1196 gb_operation_put(operation);
1197
1198 return ret;
1199}
1200EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1201
1202int __init gb_operation_init(void)
1203{
1204 gb_message_cache = kmem_cache_create("gb_message_cache",
1205 sizeof(struct gb_message), 0, 0, NULL);
1206 if (!gb_message_cache)
1207 return -ENOMEM;
1208
1209 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1210 sizeof(struct gb_operation), 0, 0, NULL);
1211 if (!gb_operation_cache)
1212 goto err_destroy_message_cache;
1213
1214 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1215 0, 0);
1216 if (!gb_operation_completion_wq)
1217 goto err_destroy_operation_cache;
1218
1219 return 0;
1220
1221err_destroy_operation_cache:
1222 kmem_cache_destroy(gb_operation_cache);
1223 gb_operation_cache = NULL;
1224err_destroy_message_cache:
1225 kmem_cache_destroy(gb_message_cache);
1226 gb_message_cache = NULL;
1227
1228 return -ENOMEM;
1229}
1230
1231void gb_operation_exit(void)
1232{
1233 destroy_workqueue(gb_operation_completion_wq);
1234 gb_operation_completion_wq = NULL;
1235 kmem_cache_destroy(gb_operation_cache);
1236 gb_operation_cache = NULL;
1237 kmem_cache_destroy(gb_message_cache);
1238 gb_message_cache = NULL;
1239}
1240