1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#define dev_fmt(fmt) "SCMI Notifications - " fmt
74#define pr_fmt(fmt) "SCMI Notifications - " fmt
75
76#include <linux/bitfield.h>
77#include <linux/bug.h>
78#include <linux/compiler.h>
79#include <linux/device.h>
80#include <linux/err.h>
81#include <linux/hashtable.h>
82#include <linux/kernel.h>
83#include <linux/ktime.h>
84#include <linux/kfifo.h>
85#include <linux/list.h>
86#include <linux/mutex.h>
87#include <linux/notifier.h>
88#include <linux/refcount.h>
89#include <linux/scmi_protocol.h>
90#include <linux/slab.h>
91#include <linux/types.h>
92#include <linux/workqueue.h>
93
94#include "common.h"
95#include "notify.h"
96
97#define SCMI_MAX_PROTO 256
98
99#define PROTO_ID_MASK GENMASK(31, 24)
100#define EVT_ID_MASK GENMASK(23, 16)
101#define SRC_ID_MASK GENMASK(15, 0)
102
103
104
105
106
107#define MAKE_HASH_KEY(p, e, s) \
108 (FIELD_PREP(PROTO_ID_MASK, (p)) | \
109 FIELD_PREP(EVT_ID_MASK, (e)) | \
110 FIELD_PREP(SRC_ID_MASK, (s)))
111
112#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
113
114
115
116
117
118
119
120
121
122
123
124
125#define KEY_FIND(__ht, __obj, __k) \
126({ \
127 typeof(__k) k_ = __k; \
128 typeof(__obj) obj_; \
129 \
130 hash_for_each_possible((__ht), obj_, hash, k_) \
131 if (obj_->key == k_) \
132 break; \
133 __obj = obj_; \
134})
135
136#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
137#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
138#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
139
140
141
142
143
144
145
146#define SCMI_GET_PROTO(__ni, __pid) \
147({ \
148 typeof(__ni) ni_ = __ni; \
149 struct scmi_registered_events_desc *__pd = NULL; \
150 \
151 if (ni_) \
152 __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
153 __pd; \
154})
155
156#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
157({ \
158 typeof(__pd) pd_ = __pd; \
159 typeof(__eid) eid_ = __eid; \
160 struct scmi_registered_event *__revt = NULL; \
161 \
162 if (pd_ && eid_ < pd_->num_events) \
163 __revt = READ_ONCE(pd_->registered_events[eid_]); \
164 __revt; \
165})
166
167#define SCMI_GET_REVT(__ni, __pid, __eid) \
168({ \
169 struct scmi_registered_event *__revt; \
170 struct scmi_registered_events_desc *__pd; \
171 \
172 __pd = SCMI_GET_PROTO((__ni), (__pid)); \
173 __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
174 __revt; \
175})
176
177
178#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
179({ \
180 typeof(revt) r = revt; \
181 r->proto->ops->set_notify_enabled(r->proto->ph, \
182 (eid), (sid), (state)); \
183})
184
185#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
186 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
187
188#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
189 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
190
191#define REVT_FILL_REPORT(revt, ...) \
192({ \
193 typeof(revt) r = revt; \
194 r->proto->ops->fill_custom_report(r->proto->ph, \
195 __VA_ARGS__); \
196})
197
198#define SCMI_PENDING_HASH_SZ 4
199#define SCMI_REGISTERED_HASH_SZ 6
200
201struct scmi_registered_events_desc;
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220struct scmi_notify_instance {
221 void *gid;
222 struct scmi_handle *handle;
223 struct work_struct init_work;
224 struct workqueue_struct *notify_wq;
225
226 struct mutex pending_mtx;
227 struct scmi_registered_events_desc **registered_protocols;
228 DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
229};
230
231
232
233
234
235
236
237
238
239
240struct events_queue {
241 size_t sz;
242 struct kfifo kfifo;
243 struct work_struct notify_work;
244 struct workqueue_struct *wq;
245};
246
247
248
249
250
251
252
253
254
255
256
257
258struct scmi_event_header {
259 ktime_t timestamp;
260 size_t payld_sz;
261 unsigned char evt_id;
262 unsigned char payld[];
263};
264
265struct scmi_registered_event;
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295struct scmi_registered_events_desc {
296 u8 id;
297 const struct scmi_event_ops *ops;
298 struct events_queue equeue;
299 struct scmi_notify_instance *ni;
300 struct scmi_event_header *eh;
301 size_t eh_sz;
302 void *in_flight;
303 int num_events;
304 struct scmi_registered_event **registered_events;
305
306 struct mutex registered_mtx;
307 const struct scmi_protocol_handle *ph;
308 DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
309};
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331struct scmi_registered_event {
332 struct scmi_registered_events_desc *proto;
333 const struct scmi_event *evt;
334 void *report;
335 u32 num_sources;
336 refcount_t *sources;
337
338 struct mutex sources_mtx;
339};
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct scmi_event_handler {
360 u32 key;
361 refcount_t users;
362 struct scmi_registered_event *r_evt;
363 struct blocking_notifier_head chain;
364 struct hlist_node hash;
365 bool enabled;
366};
367
368#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
369
370static struct scmi_event_handler *
371scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
372static void scmi_put_active_handler(struct scmi_notify_instance *ni,
373 struct scmi_event_handler *hndl);
374static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
375 struct scmi_event_handler *hndl);
376
377
378
379
380
381
382
383
384static inline void
385scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
386 u32 evt_key, void *report)
387{
388 int ret;
389 struct scmi_event_handler *hndl;
390
391
392
393
394
395
396
397 hndl = scmi_get_active_handler(ni, evt_key);
398 if (!hndl)
399 return;
400
401 ret = blocking_notifier_call_chain(&hndl->chain,
402 KEY_XTRACT_EVT_ID(evt_key),
403 report);
404
405 WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
406
407 scmi_put_active_handler(ni, hndl);
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424static inline struct scmi_registered_event *
425scmi_process_event_header(struct events_queue *eq,
426 struct scmi_registered_events_desc *pd)
427{
428 unsigned int outs;
429 struct scmi_registered_event *r_evt;
430
431 outs = kfifo_out(&eq->kfifo, pd->eh,
432 sizeof(struct scmi_event_header));
433 if (!outs)
434 return NULL;
435 if (outs != sizeof(struct scmi_event_header)) {
436 dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
437 kfifo_reset_out(&eq->kfifo);
438 return NULL;
439 }
440
441 r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
442 if (!r_evt)
443 r_evt = ERR_PTR(-EINVAL);
444
445 return r_evt;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461static inline bool
462scmi_process_event_payload(struct events_queue *eq,
463 struct scmi_registered_events_desc *pd,
464 struct scmi_registered_event *r_evt)
465{
466 u32 src_id, key;
467 unsigned int outs;
468 void *report = NULL;
469
470 outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
471 if (!outs)
472 return false;
473
474
475 pd->in_flight = NULL;
476
477 if (outs != pd->eh->payld_sz) {
478 dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
479 kfifo_reset_out(&eq->kfifo);
480 return false;
481 }
482
483 if (IS_ERR(r_evt)) {
484 dev_warn(pd->ni->handle->dev,
485 "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
486 pd->id, pd->eh->evt_id);
487 return true;
488 }
489
490 report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
491 pd->eh->payld, pd->eh->payld_sz,
492 r_evt->report, &src_id);
493 if (!report) {
494 dev_err(pd->ni->handle->dev,
495 "report not available - proto:%X evt:%d\n",
496 pd->id, pd->eh->evt_id);
497 return true;
498 }
499
500
501 key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
502 scmi_lookup_and_call_event_chain(pd->ni, key, report);
503
504
505 key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
506 scmi_lookup_and_call_event_chain(pd->ni, key, report);
507
508 return true;
509}
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536static void scmi_events_dispatcher(struct work_struct *work)
537{
538 struct events_queue *eq;
539 struct scmi_registered_events_desc *pd;
540 struct scmi_registered_event *r_evt;
541
542 eq = container_of(work, struct events_queue, notify_work);
543 pd = container_of(eq, struct scmi_registered_events_desc, equeue);
544
545
546
547
548
549
550
551 do {
552 if (!pd->in_flight) {
553 r_evt = scmi_process_event_header(eq, pd);
554 if (!r_evt)
555 break;
556 pd->in_flight = r_evt;
557 } else {
558 r_evt = pd->in_flight;
559 }
560 } while (scmi_process_event_payload(eq, pd, r_evt));
561}
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
579 const void *buf, size_t len, ktime_t ts)
580{
581 struct scmi_registered_event *r_evt;
582 struct scmi_event_header eh;
583 struct scmi_notify_instance *ni;
584
585 ni = scmi_notification_instance_data_get(handle);
586 if (!ni)
587 return 0;
588
589 r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
590 if (!r_evt)
591 return -EINVAL;
592
593 if (len > r_evt->evt->max_payld_sz) {
594 dev_err(handle->dev, "discard badly sized message\n");
595 return -EINVAL;
596 }
597 if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
598 dev_warn(handle->dev,
599 "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
600 proto_id, evt_id, ktime_to_ns(ts));
601 return -ENOMEM;
602 }
603
604 eh.timestamp = ts;
605 eh.evt_id = evt_id;
606 eh.payld_sz = len;
607
608
609
610
611
612 kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
613 kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
614
615
616
617
618
619
620
621
622
623
624
625
626 queue_work(r_evt->proto->equeue.wq,
627 &r_evt->proto->equeue.notify_work);
628
629 return 0;
630}
631
632
633
634
635
636static void scmi_kfifo_free(void *kfifo)
637{
638 kfifo_free((struct kfifo *)kfifo);
639}
640
641
642
643
644
645
646
647
648
649
650
651static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
652 struct events_queue *equeue, size_t sz)
653{
654 int ret;
655
656 if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
657 return -ENOMEM;
658
659 equeue->sz = kfifo_size(&equeue->kfifo);
660
661 ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
662 &equeue->kfifo);
663 if (ret)
664 return ret;
665
666 INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
667 equeue->wq = ni->notify_wq;
668
669 return ret;
670}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690static struct scmi_registered_events_desc *
691scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
692 u8 proto_id, size_t queue_sz, size_t eh_sz,
693 int num_events,
694 const struct scmi_event_ops *ops)
695{
696 int ret;
697 struct scmi_registered_events_desc *pd;
698
699
700 smp_rmb();
701 if (WARN_ON(ni->registered_protocols[proto_id]))
702 return ERR_PTR(-EINVAL);
703
704 pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
705 if (!pd)
706 return ERR_PTR(-ENOMEM);
707 pd->id = proto_id;
708 pd->ops = ops;
709 pd->ni = ni;
710
711 ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
712 if (ret)
713 return ERR_PTR(ret);
714
715 pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
716 if (!pd->eh)
717 return ERR_PTR(-ENOMEM);
718 pd->eh_sz = eh_sz;
719
720 pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
721 sizeof(char *), GFP_KERNEL);
722 if (!pd->registered_events)
723 return ERR_PTR(-ENOMEM);
724 pd->num_events = num_events;
725
726
727 mutex_init(&pd->registered_mtx);
728 hash_init(pd->registered_events_handlers);
729
730 return pd;
731}
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
749 const struct scmi_protocol_handle *ph,
750 const struct scmi_protocol_events *ee)
751{
752 int i;
753 unsigned int num_sources;
754 size_t payld_sz = 0;
755 struct scmi_registered_events_desc *pd;
756 struct scmi_notify_instance *ni;
757 const struct scmi_event *evt;
758
759 if (!ee || !ee->ops || !ee->evts || !ph ||
760 (!ee->num_sources && !ee->ops->get_num_sources))
761 return -EINVAL;
762
763 ni = scmi_notification_instance_data_get(handle);
764 if (!ni)
765 return -ENOMEM;
766
767
768 if (ee->num_sources) {
769 num_sources = ee->num_sources;
770 } else {
771 int nsrc = ee->ops->get_num_sources(ph);
772
773 if (nsrc <= 0)
774 return -EINVAL;
775 num_sources = nsrc;
776 }
777
778 evt = ee->evts;
779 for (i = 0; i < ee->num_events; i++)
780 payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
781 payld_sz += sizeof(struct scmi_event_header);
782
783 pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
784 payld_sz, ee->num_events,
785 ee->ops);
786 if (IS_ERR(pd))
787 return PTR_ERR(pd);
788
789 pd->ph = ph;
790 for (i = 0; i < ee->num_events; i++, evt++) {
791 struct scmi_registered_event *r_evt;
792
793 r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
794 GFP_KERNEL);
795 if (!r_evt)
796 return -ENOMEM;
797 r_evt->proto = pd;
798 r_evt->evt = evt;
799
800 r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
801 sizeof(refcount_t), GFP_KERNEL);
802 if (!r_evt->sources)
803 return -ENOMEM;
804 r_evt->num_sources = num_sources;
805 mutex_init(&r_evt->sources_mtx);
806
807 r_evt->report = devm_kzalloc(ni->handle->dev,
808 evt->max_report_sz, GFP_KERNEL);
809 if (!r_evt->report)
810 return -ENOMEM;
811
812 pd->registered_events[i] = r_evt;
813
814 smp_wmb();
815 dev_dbg(handle->dev, "registered event - %lX\n",
816 MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
817 }
818
819
820 ni->registered_protocols[proto_id] = pd;
821
822 smp_wmb();
823
824
825
826
827
828 schedule_work(&ni->init_work);
829
830 return 0;
831}
832
833
834
835
836
837
838
839void scmi_deregister_protocol_events(const struct scmi_handle *handle,
840 u8 proto_id)
841{
842 struct scmi_notify_instance *ni;
843 struct scmi_registered_events_desc *pd;
844
845 ni = scmi_notification_instance_data_get(handle);
846 if (!ni)
847 return;
848
849 pd = ni->registered_protocols[proto_id];
850 if (!pd)
851 return;
852
853 ni->registered_protocols[proto_id] = NULL;
854
855 smp_wmb();
856
857 cancel_work_sync(&pd->equeue.notify_work);
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static struct scmi_event_handler *
876scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
877{
878 struct scmi_event_handler *hndl;
879
880 hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
881 if (!hndl)
882 return NULL;
883 hndl->key = evt_key;
884 BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
885 refcount_set(&hndl->users, 1);
886
887 hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
888
889 return hndl;
890}
891
892
893
894
895
896
897
898
899static void scmi_free_event_handler(struct scmi_event_handler *hndl)
900{
901 hash_del(&hndl->hash);
902 kfree(hndl);
903}
904
905
906
907
908
909
910
911
912
913
914
915
916
917static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
918 struct scmi_event_handler *hndl)
919{
920 struct scmi_registered_event *r_evt;
921
922 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
923 KEY_XTRACT_EVT_ID(hndl->key));
924 if (!r_evt)
925 return -EINVAL;
926
927
928
929
930
931 hash_del(&hndl->hash);
932
933
934
935
936
937
938
939 scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
940 hndl->r_evt = r_evt;
941
942 mutex_lock(&r_evt->proto->registered_mtx);
943 hash_add(r_evt->proto->registered_events_handlers,
944 &hndl->hash, hndl->key);
945 mutex_unlock(&r_evt->proto->registered_mtx);
946
947 return 0;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
965 struct scmi_event_handler *hndl)
966{
967 struct scmi_registered_events_desc *pd;
968
969 if (!IS_HNDL_PENDING(hndl))
970 return -EINVAL;
971
972 pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
973 if (pd)
974 return -EINVAL;
975
976 return 0;
977}
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996static int scmi_register_event_handler(struct scmi_notify_instance *ni,
997 struct scmi_event_handler *hndl)
998{
999 int ret;
1000
1001 ret = scmi_bind_event_handler(ni, hndl);
1002 if (!ret) {
1003 dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
1004 hndl->key);
1005 } else {
1006 ret = scmi_valid_pending_handler(ni, hndl);
1007 if (!ret)
1008 dev_dbg(ni->handle->dev,
1009 "registered PENDING handler - key:%X\n",
1010 hndl->key);
1011 }
1012
1013 return ret;
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static inline struct scmi_event_handler *
1047__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
1048 u32 evt_key, bool create)
1049{
1050 struct scmi_registered_event *r_evt;
1051 struct scmi_event_handler *hndl = NULL;
1052
1053 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1054 KEY_XTRACT_EVT_ID(evt_key));
1055
1056 mutex_lock(&ni->pending_mtx);
1057
1058 if (r_evt) {
1059 mutex_lock(&r_evt->proto->registered_mtx);
1060 hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1061 hndl, evt_key);
1062 if (hndl)
1063 refcount_inc(&hndl->users);
1064 mutex_unlock(&r_evt->proto->registered_mtx);
1065 }
1066
1067
1068 if (!hndl) {
1069 hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
1070 if (hndl)
1071 refcount_inc(&hndl->users);
1072 }
1073
1074
1075 if (!hndl && create) {
1076 hndl = scmi_allocate_event_handler(ni, evt_key);
1077 if (hndl && scmi_register_event_handler(ni, hndl)) {
1078 dev_dbg(ni->handle->dev,
1079 "purging UNKNOWN handler - key:%X\n",
1080 hndl->key);
1081
1082 scmi_put_handler_unlocked(ni, hndl);
1083 hndl = NULL;
1084 }
1085 }
1086 mutex_unlock(&ni->pending_mtx);
1087
1088 return hndl;
1089}
1090
1091static struct scmi_event_handler *
1092scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
1093{
1094 return __scmi_event_handler_get_ops(ni, evt_key, false);
1095}
1096
1097static struct scmi_event_handler *
1098scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
1099{
1100 return __scmi_event_handler_get_ops(ni, evt_key, true);
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static struct scmi_event_handler *
1115scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
1116{
1117 struct scmi_registered_event *r_evt;
1118 struct scmi_event_handler *hndl = NULL;
1119
1120 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1121 KEY_XTRACT_EVT_ID(evt_key));
1122 if (r_evt) {
1123 mutex_lock(&r_evt->proto->registered_mtx);
1124 hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1125 hndl, evt_key);
1126 if (hndl)
1127 refcount_inc(&hndl->users);
1128 mutex_unlock(&r_evt->proto->registered_mtx);
1129 }
1130
1131 return hndl;
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
1148 u32 src_id, bool enable)
1149{
1150 int retvals = 0;
1151 u32 num_sources;
1152 refcount_t *sid;
1153
1154 if (src_id == SRC_ID_MASK) {
1155 src_id = 0;
1156 num_sources = r_evt->num_sources;
1157 } else if (src_id < r_evt->num_sources) {
1158 num_sources = 1;
1159 } else {
1160 return -EINVAL;
1161 }
1162
1163 mutex_lock(&r_evt->sources_mtx);
1164 if (enable) {
1165 for (; num_sources; src_id++, num_sources--) {
1166 int ret = 0;
1167
1168 sid = &r_evt->sources[src_id];
1169 if (refcount_read(sid) == 0) {
1170 ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
1171 src_id);
1172 if (!ret)
1173 refcount_set(sid, 1);
1174 } else {
1175 refcount_inc(sid);
1176 }
1177 retvals += !ret;
1178 }
1179 } else {
1180 for (; num_sources; src_id++, num_sources--) {
1181 sid = &r_evt->sources[src_id];
1182 if (refcount_dec_and_test(sid))
1183 REVT_NOTIFY_DISABLE(r_evt,
1184 r_evt->evt->id, src_id);
1185 }
1186 retvals = 1;
1187 }
1188 mutex_unlock(&r_evt->sources_mtx);
1189
1190 return retvals ? 0 : -EINVAL;
1191}
1192
1193static int scmi_enable_events(struct scmi_event_handler *hndl)
1194{
1195 int ret = 0;
1196
1197 if (!hndl->enabled) {
1198 ret = __scmi_enable_evt(hndl->r_evt,
1199 KEY_XTRACT_SRC_ID(hndl->key), true);
1200 if (!ret)
1201 hndl->enabled = true;
1202 }
1203
1204 return ret;
1205}
1206
1207static int scmi_disable_events(struct scmi_event_handler *hndl)
1208{
1209 int ret = 0;
1210
1211 if (hndl->enabled) {
1212 ret = __scmi_enable_evt(hndl->r_evt,
1213 KEY_XTRACT_SRC_ID(hndl->key), false);
1214 if (!ret)
1215 hndl->enabled = false;
1216 }
1217
1218 return ret;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
1236 struct scmi_event_handler *hndl)
1237{
1238 bool freed = false;
1239
1240 if (refcount_dec_and_test(&hndl->users)) {
1241 if (!IS_HNDL_PENDING(hndl))
1242 scmi_disable_events(hndl);
1243 scmi_free_event_handler(hndl);
1244 freed = true;
1245 }
1246
1247 return freed;
1248}
1249
1250static void scmi_put_handler(struct scmi_notify_instance *ni,
1251 struct scmi_event_handler *hndl)
1252{
1253 bool freed;
1254 u8 protocol_id;
1255 struct scmi_registered_event *r_evt = hndl->r_evt;
1256
1257 mutex_lock(&ni->pending_mtx);
1258 if (r_evt) {
1259 protocol_id = r_evt->proto->id;
1260 mutex_lock(&r_evt->proto->registered_mtx);
1261 }
1262
1263 freed = scmi_put_handler_unlocked(ni, hndl);
1264
1265 if (r_evt) {
1266 mutex_unlock(&r_evt->proto->registered_mtx);
1267
1268
1269
1270
1271
1272
1273 if (freed)
1274 scmi_protocol_release(ni->handle, protocol_id);
1275 }
1276 mutex_unlock(&ni->pending_mtx);
1277}
1278
1279static void scmi_put_active_handler(struct scmi_notify_instance *ni,
1280 struct scmi_event_handler *hndl)
1281{
1282 bool freed;
1283 struct scmi_registered_event *r_evt = hndl->r_evt;
1284 u8 protocol_id = r_evt->proto->id;
1285
1286 mutex_lock(&r_evt->proto->registered_mtx);
1287 freed = scmi_put_handler_unlocked(ni, hndl);
1288 mutex_unlock(&r_evt->proto->registered_mtx);
1289 if (freed)
1290 scmi_protocol_release(ni->handle, protocol_id);
1291}
1292
1293
1294
1295
1296
1297
1298
1299static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
1300{
1301 if (scmi_enable_events(hndl)) {
1302 pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
1303 return -EINVAL;
1304 }
1305
1306 return 0;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342static int scmi_notifier_register(const struct scmi_handle *handle,
1343 u8 proto_id, u8 evt_id, const u32 *src_id,
1344 struct notifier_block *nb)
1345{
1346 int ret = 0;
1347 u32 evt_key;
1348 struct scmi_event_handler *hndl;
1349 struct scmi_notify_instance *ni;
1350
1351 ni = scmi_notification_instance_data_get(handle);
1352 if (!ni)
1353 return -ENODEV;
1354
1355 evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1356 src_id ? *src_id : SRC_ID_MASK);
1357 hndl = scmi_get_or_create_handler(ni, evt_key);
1358 if (!hndl)
1359 return -EINVAL;
1360
1361 blocking_notifier_chain_register(&hndl->chain, nb);
1362
1363
1364 if (!IS_HNDL_PENDING(hndl)) {
1365 ret = scmi_event_handler_enable_events(hndl);
1366 if (ret)
1367 scmi_put_handler(ni, hndl);
1368 }
1369
1370 return ret;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389static int scmi_notifier_unregister(const struct scmi_handle *handle,
1390 u8 proto_id, u8 evt_id, const u32 *src_id,
1391 struct notifier_block *nb)
1392{
1393 u32 evt_key;
1394 struct scmi_event_handler *hndl;
1395 struct scmi_notify_instance *ni;
1396
1397 ni = scmi_notification_instance_data_get(handle);
1398 if (!ni)
1399 return -ENODEV;
1400
1401 evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1402 src_id ? *src_id : SRC_ID_MASK);
1403 hndl = scmi_get_handler(ni, evt_key);
1404 if (!hndl)
1405 return -EINVAL;
1406
1407
1408
1409
1410
1411 blocking_notifier_chain_unregister(&hndl->chain, nb);
1412 scmi_put_handler(ni, hndl);
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 scmi_put_handler(ni, hndl);
1426
1427 return 0;
1428}
1429
1430struct scmi_notifier_devres {
1431 const struct scmi_handle *handle;
1432 u8 proto_id;
1433 u8 evt_id;
1434 u32 __src_id;
1435 u32 *src_id;
1436 struct notifier_block *nb;
1437};
1438
1439static void scmi_devm_release_notifier(struct device *dev, void *res)
1440{
1441 struct scmi_notifier_devres *dres = res;
1442
1443 scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
1444 dres->src_id, dres->nb);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static int scmi_devm_notifier_register(struct scmi_device *sdev,
1464 u8 proto_id, u8 evt_id,
1465 const u32 *src_id,
1466 struct notifier_block *nb)
1467{
1468 int ret;
1469 struct scmi_notifier_devres *dres;
1470
1471 dres = devres_alloc(scmi_devm_release_notifier,
1472 sizeof(*dres), GFP_KERNEL);
1473 if (!dres)
1474 return -ENOMEM;
1475
1476 ret = scmi_notifier_register(sdev->handle, proto_id,
1477 evt_id, src_id, nb);
1478 if (ret) {
1479 devres_free(dres);
1480 return ret;
1481 }
1482
1483 dres->handle = sdev->handle;
1484 dres->proto_id = proto_id;
1485 dres->evt_id = evt_id;
1486 dres->nb = nb;
1487 if (src_id) {
1488 dres->__src_id = *src_id;
1489 dres->src_id = &dres->__src_id;
1490 } else {
1491 dres->src_id = NULL;
1492 }
1493 devres_add(&sdev->dev, dres);
1494
1495 return ret;
1496}
1497
1498static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
1499{
1500 struct scmi_notifier_devres *dres = res;
1501 struct scmi_notifier_devres *xres = data;
1502
1503 if (WARN_ON(!dres || !xres))
1504 return 0;
1505
1506 return dres->proto_id == xres->proto_id &&
1507 dres->evt_id == xres->evt_id &&
1508 dres->nb == xres->nb &&
1509 ((!dres->src_id && !xres->src_id) ||
1510 (dres->src_id && xres->src_id &&
1511 dres->__src_id == xres->__src_id));
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
1532 u8 proto_id, u8 evt_id,
1533 const u32 *src_id,
1534 struct notifier_block *nb)
1535{
1536 int ret;
1537 struct scmi_notifier_devres dres;
1538
1539 dres.handle = sdev->handle;
1540 dres.proto_id = proto_id;
1541 dres.evt_id = evt_id;
1542 if (src_id) {
1543 dres.__src_id = *src_id;
1544 dres.src_id = &dres.__src_id;
1545 } else {
1546 dres.src_id = NULL;
1547 }
1548
1549 ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
1550 scmi_devm_notifier_match, &dres);
1551
1552 WARN_ON(ret);
1553
1554 return ret;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567static void scmi_protocols_late_init(struct work_struct *work)
1568{
1569 int bkt;
1570 struct scmi_event_handler *hndl;
1571 struct scmi_notify_instance *ni;
1572 struct hlist_node *tmp;
1573
1574 ni = container_of(work, struct scmi_notify_instance, init_work);
1575
1576
1577 smp_rmb();
1578
1579 mutex_lock(&ni->pending_mtx);
1580 hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
1581 int ret;
1582
1583 ret = scmi_bind_event_handler(ni, hndl);
1584 if (!ret) {
1585 dev_dbg(ni->handle->dev,
1586 "finalized PENDING handler - key:%X\n",
1587 hndl->key);
1588 ret = scmi_event_handler_enable_events(hndl);
1589 if (ret) {
1590 dev_dbg(ni->handle->dev,
1591 "purging INVALID handler - key:%X\n",
1592 hndl->key);
1593 scmi_put_active_handler(ni, hndl);
1594 }
1595 } else {
1596 ret = scmi_valid_pending_handler(ni, hndl);
1597 if (ret) {
1598 dev_dbg(ni->handle->dev,
1599 "purging PENDING handler - key:%X\n",
1600 hndl->key);
1601
1602 scmi_put_handler_unlocked(ni, hndl);
1603 }
1604 }
1605 }
1606 mutex_unlock(&ni->pending_mtx);
1607}
1608
1609
1610
1611
1612
1613static const struct scmi_notify_ops notify_ops = {
1614 .devm_event_notifier_register = scmi_devm_notifier_register,
1615 .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
1616 .event_notifier_register = scmi_notifier_register,
1617 .event_notifier_unregister = scmi_notifier_unregister,
1618};
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646int scmi_notification_init(struct scmi_handle *handle)
1647{
1648 void *gid;
1649 struct scmi_notify_instance *ni;
1650
1651 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1652 if (!gid)
1653 return -ENOMEM;
1654
1655 ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
1656 if (!ni)
1657 goto err;
1658
1659 ni->gid = gid;
1660 ni->handle = handle;
1661
1662 ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
1663 sizeof(char *), GFP_KERNEL);
1664 if (!ni->registered_protocols)
1665 goto err;
1666
1667 ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
1668 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
1669 0);
1670 if (!ni->notify_wq)
1671 goto err;
1672
1673 mutex_init(&ni->pending_mtx);
1674 hash_init(ni->pending_events_handlers);
1675
1676 INIT_WORK(&ni->init_work, scmi_protocols_late_init);
1677
1678 scmi_notification_instance_data_set(handle, ni);
1679 handle->notify_ops = ¬ify_ops;
1680
1681 smp_wmb();
1682
1683 dev_info(handle->dev, "Core Enabled.\n");
1684
1685 devres_close_group(handle->dev, ni->gid);
1686
1687 return 0;
1688
1689err:
1690 dev_warn(handle->dev, "Initialization Failed.\n");
1691 devres_release_group(handle->dev, gid);
1692 return -ENOMEM;
1693}
1694
1695
1696
1697
1698
1699void scmi_notification_exit(struct scmi_handle *handle)
1700{
1701 struct scmi_notify_instance *ni;
1702
1703 ni = scmi_notification_instance_data_get(handle);
1704 if (!ni)
1705 return;
1706 scmi_notification_instance_data_set(handle, NULL);
1707
1708
1709 destroy_workqueue(ni->notify_wq);
1710
1711 devres_release_group(ni->handle->dev, ni->gid);
1712}
1713