1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "%s: " fmt, __func__
10
11#include <linux/bitmap.h>
12#include <linux/debugfs.h>
13#include <linux/export.h>
14#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/mailbox_client.h>
17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/semaphore.h>
20#include <linux/slab.h>
21#include <linux/soc/ti/ti-msgmgr.h>
22#include <linux/soc/ti/ti_sci_protocol.h>
23#include <linux/reboot.h>
24
25#include "ti_sci.h"
26
27
28static LIST_HEAD(ti_sci_list);
29
30static DEFINE_MUTEX(ti_sci_list_mutex);
31
32
33
34
35
36
37
38
39
40
41
42struct ti_sci_xfer {
43 struct ti_msgmgr_message tx_message;
44 u8 rx_len;
45 u8 *xfer_buf;
46 struct completion done;
47};
48
49
50
51
52
53
54
55
56
57
58
59struct ti_sci_xfers_info {
60 struct semaphore sem_xfer_count;
61 struct ti_sci_xfer *xfer_block;
62 unsigned long *xfer_alloc_table;
63
64 spinlock_t xfer_lock;
65};
66
67
68
69
70
71
72
73
74
75struct ti_sci_desc {
76 u8 default_host_id;
77 int max_rx_timeout_ms;
78 int max_msgs;
79 int max_msg_size;
80};
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100struct ti_sci_info {
101 struct device *dev;
102 struct notifier_block nb;
103 const struct ti_sci_desc *desc;
104 struct dentry *d;
105 void __iomem *debug_region;
106 char *debug_buffer;
107 size_t debug_region_size;
108 struct ti_sci_handle handle;
109 struct mbox_client cl;
110 struct mbox_chan *chan_tx;
111 struct mbox_chan *chan_rx;
112 struct ti_sci_xfers_info minfo;
113 struct list_head node;
114 u8 host_id;
115
116 int users;
117
118};
119
120#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
121#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
122#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
123
124#ifdef CONFIG_DEBUG_FS
125
126
127
128
129
130
131
132
133static int ti_sci_debug_show(struct seq_file *s, void *unused)
134{
135 struct ti_sci_info *info = s->private;
136
137 memcpy_fromio(info->debug_buffer, info->debug_region,
138 info->debug_region_size);
139
140
141
142
143
144
145 seq_puts(s, info->debug_buffer);
146 return 0;
147}
148
149
150DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
151
152
153
154
155
156
157
158
159static int ti_sci_debugfs_create(struct platform_device *pdev,
160 struct ti_sci_info *info)
161{
162 struct device *dev = &pdev->dev;
163 struct resource *res;
164 char debug_name[50] = "ti_sci_debug@";
165
166
167 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
168 "debug_messages");
169 info->debug_region = devm_ioremap_resource(dev, res);
170 if (IS_ERR(info->debug_region))
171 return 0;
172 info->debug_region_size = resource_size(res);
173
174 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
175 sizeof(char), GFP_KERNEL);
176 if (!info->debug_buffer)
177 return -ENOMEM;
178
179 info->debug_buffer[info->debug_region_size] = 0;
180
181 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
182 sizeof(debug_name) -
183 sizeof("ti_sci_debug@")),
184 0444, NULL, info, &ti_sci_debug_fops);
185 if (IS_ERR(info->d))
186 return PTR_ERR(info->d);
187
188 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
189 info->debug_region, info->debug_region_size, res);
190 return 0;
191}
192
193
194
195
196
197
198static void ti_sci_debugfs_destroy(struct platform_device *pdev,
199 struct ti_sci_info *info)
200{
201 if (IS_ERR(info->debug_region))
202 return;
203
204 debugfs_remove(info->d);
205}
206#else
207static inline int ti_sci_debugfs_create(struct platform_device *dev,
208 struct ti_sci_info *info)
209{
210 return 0;
211}
212
213static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
214 struct ti_sci_info *info)
215{
216}
217#endif
218
219
220
221
222
223
224static inline void ti_sci_dump_header_dbg(struct device *dev,
225 struct ti_sci_msg_hdr *hdr)
226{
227 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
228 hdr->type, hdr->host, hdr->seq, hdr->flags);
229}
230
231
232
233
234
235
236
237
238
239
240
241
242static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
243{
244 struct ti_sci_info *info = cl_to_ti_sci_info(cl);
245 struct device *dev = info->dev;
246 struct ti_sci_xfers_info *minfo = &info->minfo;
247 struct ti_msgmgr_message *mbox_msg = m;
248 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
249 struct ti_sci_xfer *xfer;
250 u8 xfer_id;
251
252 xfer_id = hdr->seq;
253
254
255
256
257
258 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
259 dev_err(dev, "Message for %d is not expected!\n", xfer_id);
260 return;
261 }
262
263 xfer = &minfo->xfer_block[xfer_id];
264
265
266 if (mbox_msg->len > info->desc->max_msg_size) {
267 dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
268 mbox_msg->len, info->desc->max_msg_size);
269 ti_sci_dump_header_dbg(dev, hdr);
270 return;
271 }
272 if (mbox_msg->len < xfer->rx_len) {
273 dev_err(dev, "Recv xfer %zu < expected %d length\n",
274 mbox_msg->len, xfer->rx_len);
275 ti_sci_dump_header_dbg(dev, hdr);
276 return;
277 }
278
279 ti_sci_dump_header_dbg(dev, hdr);
280
281 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
282 complete(&xfer->done);
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
303 u16 msg_type, u32 msg_flags,
304 size_t tx_message_size,
305 size_t rx_message_size)
306{
307 struct ti_sci_xfers_info *minfo = &info->minfo;
308 struct ti_sci_xfer *xfer;
309 struct ti_sci_msg_hdr *hdr;
310 unsigned long flags;
311 unsigned long bit_pos;
312 u8 xfer_id;
313 int ret;
314 int timeout;
315
316
317 if (rx_message_size > info->desc->max_msg_size ||
318 tx_message_size > info->desc->max_msg_size ||
319 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
320 return ERR_PTR(-ERANGE);
321
322
323
324
325
326
327 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
328 ret = down_timeout(&minfo->sem_xfer_count, timeout);
329 if (ret < 0)
330 return ERR_PTR(ret);
331
332
333 spin_lock_irqsave(&minfo->xfer_lock, flags);
334 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
335 info->desc->max_msgs);
336 set_bit(bit_pos, minfo->xfer_alloc_table);
337 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
338
339
340
341
342
343
344
345
346 xfer_id = (u8)bit_pos;
347
348 xfer = &minfo->xfer_block[xfer_id];
349
350 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
351 xfer->tx_message.len = tx_message_size;
352 xfer->rx_len = (u8)rx_message_size;
353
354 reinit_completion(&xfer->done);
355
356 hdr->seq = xfer_id;
357 hdr->type = msg_type;
358 hdr->host = info->host_id;
359 hdr->flags = msg_flags;
360
361 return xfer;
362}
363
364
365
366
367
368
369
370
371static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
372 struct ti_sci_xfer *xfer)
373{
374 unsigned long flags;
375 struct ti_sci_msg_hdr *hdr;
376 u8 xfer_id;
377
378 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
379 xfer_id = hdr->seq;
380
381
382
383
384
385
386 spin_lock_irqsave(&minfo->xfer_lock, flags);
387 clear_bit(xfer_id, minfo->xfer_alloc_table);
388 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
389
390
391 up(&minfo->sem_xfer_count);
392}
393
394
395
396
397
398
399
400
401
402
403static inline int ti_sci_do_xfer(struct ti_sci_info *info,
404 struct ti_sci_xfer *xfer)
405{
406 int ret;
407 int timeout;
408 struct device *dev = info->dev;
409
410 ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
411 if (ret < 0)
412 return ret;
413
414 ret = 0;
415
416
417 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
418 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
419 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
420 (void *)_RET_IP_);
421 ret = -ETIMEDOUT;
422 }
423
424
425
426
427
428
429 mbox_client_txdone(info->chan_tx, ret);
430
431 return ret;
432}
433
434
435
436
437
438
439
440
441
442static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
443{
444 struct device *dev = info->dev;
445 struct ti_sci_handle *handle = &info->handle;
446 struct ti_sci_version_info *ver = &handle->version;
447 struct ti_sci_msg_resp_version *rev_info;
448 struct ti_sci_xfer *xfer;
449 int ret;
450
451 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
452 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
453 sizeof(struct ti_sci_msg_hdr),
454 sizeof(*rev_info));
455 if (IS_ERR(xfer)) {
456 ret = PTR_ERR(xfer);
457 dev_err(dev, "Message alloc failed(%d)\n", ret);
458 return ret;
459 }
460
461 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
462
463 ret = ti_sci_do_xfer(info, xfer);
464 if (ret) {
465 dev_err(dev, "Mbox send fail %d\n", ret);
466 goto fail;
467 }
468
469 ver->abi_major = rev_info->abi_major;
470 ver->abi_minor = rev_info->abi_minor;
471 ver->firmware_revision = rev_info->firmware_revision;
472 strncpy(ver->firmware_description, rev_info->firmware_description,
473 sizeof(ver->firmware_description));
474
475fail:
476 ti_sci_put_one_xfer(&info->minfo, xfer);
477 return ret;
478}
479
480
481
482
483
484
485
486static inline bool ti_sci_is_response_ack(void *r)
487{
488 struct ti_sci_msg_hdr *hdr = r;
489
490 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
491}
492
493
494
495
496
497
498
499
500
501
502static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
503 u32 id, u32 flags, u8 state)
504{
505 struct ti_sci_info *info;
506 struct ti_sci_msg_req_set_device_state *req;
507 struct ti_sci_msg_hdr *resp;
508 struct ti_sci_xfer *xfer;
509 struct device *dev;
510 int ret = 0;
511
512 if (IS_ERR(handle))
513 return PTR_ERR(handle);
514 if (!handle)
515 return -EINVAL;
516
517 info = handle_to_ti_sci_info(handle);
518 dev = info->dev;
519
520 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
521 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
522 sizeof(*req), sizeof(*resp));
523 if (IS_ERR(xfer)) {
524 ret = PTR_ERR(xfer);
525 dev_err(dev, "Message alloc failed(%d)\n", ret);
526 return ret;
527 }
528 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
529 req->id = id;
530 req->state = state;
531
532 ret = ti_sci_do_xfer(info, xfer);
533 if (ret) {
534 dev_err(dev, "Mbox send fail %d\n", ret);
535 goto fail;
536 }
537
538 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
539
540 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
541
542fail:
543 ti_sci_put_one_xfer(&info->minfo, xfer);
544
545 return ret;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
560 u32 id, u32 *clcnt, u32 *resets,
561 u8 *p_state, u8 *c_state)
562{
563 struct ti_sci_info *info;
564 struct ti_sci_msg_req_get_device_state *req;
565 struct ti_sci_msg_resp_get_device_state *resp;
566 struct ti_sci_xfer *xfer;
567 struct device *dev;
568 int ret = 0;
569
570 if (IS_ERR(handle))
571 return PTR_ERR(handle);
572 if (!handle)
573 return -EINVAL;
574
575 if (!clcnt && !resets && !p_state && !c_state)
576 return -EINVAL;
577
578 info = handle_to_ti_sci_info(handle);
579 dev = info->dev;
580
581 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
582 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
583 sizeof(*req), sizeof(*resp));
584 if (IS_ERR(xfer)) {
585 ret = PTR_ERR(xfer);
586 dev_err(dev, "Message alloc failed(%d)\n", ret);
587 return ret;
588 }
589 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
590 req->id = id;
591
592 ret = ti_sci_do_xfer(info, xfer);
593 if (ret) {
594 dev_err(dev, "Mbox send fail %d\n", ret);
595 goto fail;
596 }
597
598 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
599 if (!ti_sci_is_response_ack(resp)) {
600 ret = -ENODEV;
601 goto fail;
602 }
603
604 if (clcnt)
605 *clcnt = resp->context_loss_count;
606 if (resets)
607 *resets = resp->resets;
608 if (p_state)
609 *p_state = resp->programmed_state;
610 if (c_state)
611 *c_state = resp->current_state;
612fail:
613 ti_sci_put_one_xfer(&info->minfo, xfer);
614
615 return ret;
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
631{
632 return ti_sci_set_device_state(handle, id, 0,
633 MSG_DEVICE_SW_STATE_ON);
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
650 u32 id)
651{
652 return ti_sci_set_device_state(handle, id,
653 MSG_FLAG_DEVICE_EXCLUSIVE,
654 MSG_DEVICE_SW_STATE_ON);
655}
656
657
658
659
660
661
662
663
664
665
666
667
668static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
669{
670 return ti_sci_set_device_state(handle, id, 0,
671 MSG_DEVICE_SW_STATE_RETENTION);
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
688 u32 id)
689{
690 return ti_sci_set_device_state(handle, id,
691 MSG_FLAG_DEVICE_EXCLUSIVE,
692 MSG_DEVICE_SW_STATE_RETENTION);
693}
694
695
696
697
698
699
700
701
702
703
704
705
706static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
707{
708 return ti_sci_set_device_state(handle, id,
709 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
710}
711
712
713
714
715
716
717
718
719
720static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
721{
722 u8 unused;
723
724
725 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
726}
727
728
729
730
731
732
733
734
735
736static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
737 u32 *count)
738{
739 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
740}
741
742
743
744
745
746
747
748
749
750static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
751 bool *r_state)
752{
753 int ret;
754 u8 state;
755
756 if (!r_state)
757 return -EINVAL;
758
759 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
760 if (ret)
761 return ret;
762
763 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
764
765 return 0;
766}
767
768
769
770
771
772
773
774
775
776
777static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
778 bool *r_state, bool *curr_state)
779{
780 int ret;
781 u8 p_state, c_state;
782
783 if (!r_state && !curr_state)
784 return -EINVAL;
785
786 ret =
787 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
788 if (ret)
789 return ret;
790
791 if (r_state)
792 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
793 if (curr_state)
794 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
795
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807
808static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
809 bool *r_state, bool *curr_state)
810{
811 int ret;
812 u8 p_state, c_state;
813
814 if (!r_state && !curr_state)
815 return -EINVAL;
816
817 ret =
818 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
819 if (ret)
820 return ret;
821
822 if (r_state)
823 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
824 if (curr_state)
825 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
826
827 return 0;
828}
829
830
831
832
833
834
835
836
837
838static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
839 bool *curr_state)
840{
841 int ret;
842 u8 state;
843
844 if (!curr_state)
845 return -EINVAL;
846
847 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
848 if (ret)
849 return ret;
850
851 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
852
853 return 0;
854}
855
856
857
858
859
860
861
862
863
864
865static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
866 u32 id, u32 reset_state)
867{
868 struct ti_sci_info *info;
869 struct ti_sci_msg_req_set_device_resets *req;
870 struct ti_sci_msg_hdr *resp;
871 struct ti_sci_xfer *xfer;
872 struct device *dev;
873 int ret = 0;
874
875 if (IS_ERR(handle))
876 return PTR_ERR(handle);
877 if (!handle)
878 return -EINVAL;
879
880 info = handle_to_ti_sci_info(handle);
881 dev = info->dev;
882
883 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
884 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
885 sizeof(*req), sizeof(*resp));
886 if (IS_ERR(xfer)) {
887 ret = PTR_ERR(xfer);
888 dev_err(dev, "Message alloc failed(%d)\n", ret);
889 return ret;
890 }
891 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
892 req->id = id;
893 req->resets = reset_state;
894
895 ret = ti_sci_do_xfer(info, xfer);
896 if (ret) {
897 dev_err(dev, "Mbox send fail %d\n", ret);
898 goto fail;
899 }
900
901 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
902
903 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
904
905fail:
906 ti_sci_put_one_xfer(&info->minfo, xfer);
907
908 return ret;
909}
910
911
912
913
914
915
916
917
918
919
920static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
921 u32 id, u32 *reset_state)
922{
923 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
924 NULL);
925}
926
927
928
929
930
931
932
933
934
935
936
937
938
939static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
940 u32 dev_id, u32 clk_id,
941 u32 flags, u8 state)
942{
943 struct ti_sci_info *info;
944 struct ti_sci_msg_req_set_clock_state *req;
945 struct ti_sci_msg_hdr *resp;
946 struct ti_sci_xfer *xfer;
947 struct device *dev;
948 int ret = 0;
949
950 if (IS_ERR(handle))
951 return PTR_ERR(handle);
952 if (!handle)
953 return -EINVAL;
954
955 info = handle_to_ti_sci_info(handle);
956 dev = info->dev;
957
958 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
959 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
960 sizeof(*req), sizeof(*resp));
961 if (IS_ERR(xfer)) {
962 ret = PTR_ERR(xfer);
963 dev_err(dev, "Message alloc failed(%d)\n", ret);
964 return ret;
965 }
966 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
967 req->dev_id = dev_id;
968 if (clk_id < 255) {
969 req->clk_id = clk_id;
970 } else {
971 req->clk_id = 255;
972 req->clk_id_32 = clk_id;
973 }
974 req->request_state = state;
975
976 ret = ti_sci_do_xfer(info, xfer);
977 if (ret) {
978 dev_err(dev, "Mbox send fail %d\n", ret);
979 goto fail;
980 }
981
982 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
983
984 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
985
986fail:
987 ti_sci_put_one_xfer(&info->minfo, xfer);
988
989 return ret;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1005 u32 dev_id, u32 clk_id,
1006 u8 *programmed_state, u8 *current_state)
1007{
1008 struct ti_sci_info *info;
1009 struct ti_sci_msg_req_get_clock_state *req;
1010 struct ti_sci_msg_resp_get_clock_state *resp;
1011 struct ti_sci_xfer *xfer;
1012 struct device *dev;
1013 int ret = 0;
1014
1015 if (IS_ERR(handle))
1016 return PTR_ERR(handle);
1017 if (!handle)
1018 return -EINVAL;
1019
1020 if (!programmed_state && !current_state)
1021 return -EINVAL;
1022
1023 info = handle_to_ti_sci_info(handle);
1024 dev = info->dev;
1025
1026 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1027 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1028 sizeof(*req), sizeof(*resp));
1029 if (IS_ERR(xfer)) {
1030 ret = PTR_ERR(xfer);
1031 dev_err(dev, "Message alloc failed(%d)\n", ret);
1032 return ret;
1033 }
1034 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1035 req->dev_id = dev_id;
1036 if (clk_id < 255) {
1037 req->clk_id = clk_id;
1038 } else {
1039 req->clk_id = 255;
1040 req->clk_id_32 = clk_id;
1041 }
1042
1043 ret = ti_sci_do_xfer(info, xfer);
1044 if (ret) {
1045 dev_err(dev, "Mbox send fail %d\n", ret);
1046 goto fail;
1047 }
1048
1049 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1050
1051 if (!ti_sci_is_response_ack(resp)) {
1052 ret = -ENODEV;
1053 goto fail;
1054 }
1055
1056 if (programmed_state)
1057 *programmed_state = resp->programmed_state;
1058 if (current_state)
1059 *current_state = resp->current_state;
1060
1061fail:
1062 ti_sci_put_one_xfer(&info->minfo, xfer);
1063
1064 return ret;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1081 u32 clk_id, bool needs_ssc,
1082 bool can_change_freq, bool enable_input_term)
1083{
1084 u32 flags = 0;
1085
1086 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1087 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1088 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1089
1090 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1091 MSG_CLOCK_SW_STATE_REQ);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1107 u32 dev_id, u32 clk_id)
1108{
1109 return ti_sci_set_clock_state(handle, dev_id, clk_id,
1110 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1111 MSG_CLOCK_SW_STATE_UNREQ);
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1127 u32 dev_id, u32 clk_id)
1128{
1129 return ti_sci_set_clock_state(handle, dev_id, clk_id,
1130 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1131 MSG_CLOCK_SW_STATE_AUTO);
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1146 u32 dev_id, u32 clk_id, bool *req_state)
1147{
1148 u8 state = 0;
1149 int ret;
1150
1151 if (!req_state)
1152 return -EINVAL;
1153
1154 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1155 if (ret)
1156 return ret;
1157
1158 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1175 u32 clk_id, bool *req_state, bool *curr_state)
1176{
1177 u8 c_state = 0, r_state = 0;
1178 int ret;
1179
1180 if (!req_state && !curr_state)
1181 return -EINVAL;
1182
1183 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1184 &r_state, &c_state);
1185 if (ret)
1186 return ret;
1187
1188 if (req_state)
1189 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1190 if (curr_state)
1191 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1192 return 0;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1208 u32 clk_id, bool *req_state, bool *curr_state)
1209{
1210 u8 c_state = 0, r_state = 0;
1211 int ret;
1212
1213 if (!req_state && !curr_state)
1214 return -EINVAL;
1215
1216 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1217 &r_state, &c_state);
1218 if (ret)
1219 return ret;
1220
1221 if (req_state)
1222 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1223 if (curr_state)
1224 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1225 return 0;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1240 u32 dev_id, u32 clk_id, u32 parent_id)
1241{
1242 struct ti_sci_info *info;
1243 struct ti_sci_msg_req_set_clock_parent *req;
1244 struct ti_sci_msg_hdr *resp;
1245 struct ti_sci_xfer *xfer;
1246 struct device *dev;
1247 int ret = 0;
1248
1249 if (IS_ERR(handle))
1250 return PTR_ERR(handle);
1251 if (!handle)
1252 return -EINVAL;
1253
1254 info = handle_to_ti_sci_info(handle);
1255 dev = info->dev;
1256
1257 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1258 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1259 sizeof(*req), sizeof(*resp));
1260 if (IS_ERR(xfer)) {
1261 ret = PTR_ERR(xfer);
1262 dev_err(dev, "Message alloc failed(%d)\n", ret);
1263 return ret;
1264 }
1265 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1266 req->dev_id = dev_id;
1267 if (clk_id < 255) {
1268 req->clk_id = clk_id;
1269 } else {
1270 req->clk_id = 255;
1271 req->clk_id_32 = clk_id;
1272 }
1273 if (parent_id < 255) {
1274 req->parent_id = parent_id;
1275 } else {
1276 req->parent_id = 255;
1277 req->parent_id_32 = parent_id;
1278 }
1279
1280 ret = ti_sci_do_xfer(info, xfer);
1281 if (ret) {
1282 dev_err(dev, "Mbox send fail %d\n", ret);
1283 goto fail;
1284 }
1285
1286 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1287
1288 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1289
1290fail:
1291 ti_sci_put_one_xfer(&info->minfo, xfer);
1292
1293 return ret;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1308 u32 dev_id, u32 clk_id, u32 *parent_id)
1309{
1310 struct ti_sci_info *info;
1311 struct ti_sci_msg_req_get_clock_parent *req;
1312 struct ti_sci_msg_resp_get_clock_parent *resp;
1313 struct ti_sci_xfer *xfer;
1314 struct device *dev;
1315 int ret = 0;
1316
1317 if (IS_ERR(handle))
1318 return PTR_ERR(handle);
1319 if (!handle || !parent_id)
1320 return -EINVAL;
1321
1322 info = handle_to_ti_sci_info(handle);
1323 dev = info->dev;
1324
1325 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1326 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1327 sizeof(*req), sizeof(*resp));
1328 if (IS_ERR(xfer)) {
1329 ret = PTR_ERR(xfer);
1330 dev_err(dev, "Message alloc failed(%d)\n", ret);
1331 return ret;
1332 }
1333 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1334 req->dev_id = dev_id;
1335 if (clk_id < 255) {
1336 req->clk_id = clk_id;
1337 } else {
1338 req->clk_id = 255;
1339 req->clk_id_32 = clk_id;
1340 }
1341
1342 ret = ti_sci_do_xfer(info, xfer);
1343 if (ret) {
1344 dev_err(dev, "Mbox send fail %d\n", ret);
1345 goto fail;
1346 }
1347
1348 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1349
1350 if (!ti_sci_is_response_ack(resp)) {
1351 ret = -ENODEV;
1352 } else {
1353 if (resp->parent_id < 255)
1354 *parent_id = resp->parent_id;
1355 else
1356 *parent_id = resp->parent_id_32;
1357 }
1358
1359fail:
1360 ti_sci_put_one_xfer(&info->minfo, xfer);
1361
1362 return ret;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1377 u32 dev_id, u32 clk_id,
1378 u32 *num_parents)
1379{
1380 struct ti_sci_info *info;
1381 struct ti_sci_msg_req_get_clock_num_parents *req;
1382 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1383 struct ti_sci_xfer *xfer;
1384 struct device *dev;
1385 int ret = 0;
1386
1387 if (IS_ERR(handle))
1388 return PTR_ERR(handle);
1389 if (!handle || !num_parents)
1390 return -EINVAL;
1391
1392 info = handle_to_ti_sci_info(handle);
1393 dev = info->dev;
1394
1395 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1397 sizeof(*req), sizeof(*resp));
1398 if (IS_ERR(xfer)) {
1399 ret = PTR_ERR(xfer);
1400 dev_err(dev, "Message alloc failed(%d)\n", ret);
1401 return ret;
1402 }
1403 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1404 req->dev_id = dev_id;
1405 if (clk_id < 255) {
1406 req->clk_id = clk_id;
1407 } else {
1408 req->clk_id = 255;
1409 req->clk_id_32 = clk_id;
1410 }
1411
1412 ret = ti_sci_do_xfer(info, xfer);
1413 if (ret) {
1414 dev_err(dev, "Mbox send fail %d\n", ret);
1415 goto fail;
1416 }
1417
1418 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1419
1420 if (!ti_sci_is_response_ack(resp)) {
1421 ret = -ENODEV;
1422 } else {
1423 if (resp->num_parents < 255)
1424 *num_parents = resp->num_parents;
1425 else
1426 *num_parents = resp->num_parents_32;
1427 }
1428
1429fail:
1430 ti_sci_put_one_xfer(&info->minfo, xfer);
1431
1432 return ret;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1455 u32 dev_id, u32 clk_id, u64 min_freq,
1456 u64 target_freq, u64 max_freq,
1457 u64 *match_freq)
1458{
1459 struct ti_sci_info *info;
1460 struct ti_sci_msg_req_query_clock_freq *req;
1461 struct ti_sci_msg_resp_query_clock_freq *resp;
1462 struct ti_sci_xfer *xfer;
1463 struct device *dev;
1464 int ret = 0;
1465
1466 if (IS_ERR(handle))
1467 return PTR_ERR(handle);
1468 if (!handle || !match_freq)
1469 return -EINVAL;
1470
1471 info = handle_to_ti_sci_info(handle);
1472 dev = info->dev;
1473
1474 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1475 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1476 sizeof(*req), sizeof(*resp));
1477 if (IS_ERR(xfer)) {
1478 ret = PTR_ERR(xfer);
1479 dev_err(dev, "Message alloc failed(%d)\n", ret);
1480 return ret;
1481 }
1482 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1483 req->dev_id = dev_id;
1484 if (clk_id < 255) {
1485 req->clk_id = clk_id;
1486 } else {
1487 req->clk_id = 255;
1488 req->clk_id_32 = clk_id;
1489 }
1490 req->min_freq_hz = min_freq;
1491 req->target_freq_hz = target_freq;
1492 req->max_freq_hz = max_freq;
1493
1494 ret = ti_sci_do_xfer(info, xfer);
1495 if (ret) {
1496 dev_err(dev, "Mbox send fail %d\n", ret);
1497 goto fail;
1498 }
1499
1500 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1501
1502 if (!ti_sci_is_response_ack(resp))
1503 ret = -ENODEV;
1504 else
1505 *match_freq = resp->freq_hz;
1506
1507fail:
1508 ti_sci_put_one_xfer(&info->minfo, xfer);
1509
1510 return ret;
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1532 u32 dev_id, u32 clk_id, u64 min_freq,
1533 u64 target_freq, u64 max_freq)
1534{
1535 struct ti_sci_info *info;
1536 struct ti_sci_msg_req_set_clock_freq *req;
1537 struct ti_sci_msg_hdr *resp;
1538 struct ti_sci_xfer *xfer;
1539 struct device *dev;
1540 int ret = 0;
1541
1542 if (IS_ERR(handle))
1543 return PTR_ERR(handle);
1544 if (!handle)
1545 return -EINVAL;
1546
1547 info = handle_to_ti_sci_info(handle);
1548 dev = info->dev;
1549
1550 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1551 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1552 sizeof(*req), sizeof(*resp));
1553 if (IS_ERR(xfer)) {
1554 ret = PTR_ERR(xfer);
1555 dev_err(dev, "Message alloc failed(%d)\n", ret);
1556 return ret;
1557 }
1558 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1559 req->dev_id = dev_id;
1560 if (clk_id < 255) {
1561 req->clk_id = clk_id;
1562 } else {
1563 req->clk_id = 255;
1564 req->clk_id_32 = clk_id;
1565 }
1566 req->min_freq_hz = min_freq;
1567 req->target_freq_hz = target_freq;
1568 req->max_freq_hz = max_freq;
1569
1570 ret = ti_sci_do_xfer(info, xfer);
1571 if (ret) {
1572 dev_err(dev, "Mbox send fail %d\n", ret);
1573 goto fail;
1574 }
1575
1576 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1577
1578 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1579
1580fail:
1581 ti_sci_put_one_xfer(&info->minfo, xfer);
1582
1583 return ret;
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1598 u32 dev_id, u32 clk_id, u64 *freq)
1599{
1600 struct ti_sci_info *info;
1601 struct ti_sci_msg_req_get_clock_freq *req;
1602 struct ti_sci_msg_resp_get_clock_freq *resp;
1603 struct ti_sci_xfer *xfer;
1604 struct device *dev;
1605 int ret = 0;
1606
1607 if (IS_ERR(handle))
1608 return PTR_ERR(handle);
1609 if (!handle || !freq)
1610 return -EINVAL;
1611
1612 info = handle_to_ti_sci_info(handle);
1613 dev = info->dev;
1614
1615 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1616 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1617 sizeof(*req), sizeof(*resp));
1618 if (IS_ERR(xfer)) {
1619 ret = PTR_ERR(xfer);
1620 dev_err(dev, "Message alloc failed(%d)\n", ret);
1621 return ret;
1622 }
1623 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1624 req->dev_id = dev_id;
1625 if (clk_id < 255) {
1626 req->clk_id = clk_id;
1627 } else {
1628 req->clk_id = 255;
1629 req->clk_id_32 = clk_id;
1630 }
1631
1632 ret = ti_sci_do_xfer(info, xfer);
1633 if (ret) {
1634 dev_err(dev, "Mbox send fail %d\n", ret);
1635 goto fail;
1636 }
1637
1638 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1639
1640 if (!ti_sci_is_response_ack(resp))
1641 ret = -ENODEV;
1642 else
1643 *freq = resp->freq_hz;
1644
1645fail:
1646 ti_sci_put_one_xfer(&info->minfo, xfer);
1647
1648 return ret;
1649}
1650
1651static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1652{
1653 struct ti_sci_info *info;
1654 struct ti_sci_msg_req_reboot *req;
1655 struct ti_sci_msg_hdr *resp;
1656 struct ti_sci_xfer *xfer;
1657 struct device *dev;
1658 int ret = 0;
1659
1660 if (IS_ERR(handle))
1661 return PTR_ERR(handle);
1662 if (!handle)
1663 return -EINVAL;
1664
1665 info = handle_to_ti_sci_info(handle);
1666 dev = info->dev;
1667
1668 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1669 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1670 sizeof(*req), sizeof(*resp));
1671 if (IS_ERR(xfer)) {
1672 ret = PTR_ERR(xfer);
1673 dev_err(dev, "Message alloc failed(%d)\n", ret);
1674 return ret;
1675 }
1676 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1677
1678 ret = ti_sci_do_xfer(info, xfer);
1679 if (ret) {
1680 dev_err(dev, "Mbox send fail %d\n", ret);
1681 goto fail;
1682 }
1683
1684 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1685
1686 if (!ti_sci_is_response_ack(resp))
1687 ret = -ENODEV;
1688 else
1689 ret = 0;
1690
1691fail:
1692 ti_sci_put_one_xfer(&info->minfo, xfer);
1693
1694 return ret;
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1712 u32 dev_id, u8 subtype, u8 s_host,
1713 u16 *range_start, u16 *range_num)
1714{
1715 struct ti_sci_msg_resp_get_resource_range *resp;
1716 struct ti_sci_msg_req_get_resource_range *req;
1717 struct ti_sci_xfer *xfer;
1718 struct ti_sci_info *info;
1719 struct device *dev;
1720 int ret = 0;
1721
1722 if (IS_ERR(handle))
1723 return PTR_ERR(handle);
1724 if (!handle)
1725 return -EINVAL;
1726
1727 info = handle_to_ti_sci_info(handle);
1728 dev = info->dev;
1729
1730 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1731 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1732 sizeof(*req), sizeof(*resp));
1733 if (IS_ERR(xfer)) {
1734 ret = PTR_ERR(xfer);
1735 dev_err(dev, "Message alloc failed(%d)\n", ret);
1736 return ret;
1737 }
1738
1739 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1740 req->secondary_host = s_host;
1741 req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1742 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1743
1744 ret = ti_sci_do_xfer(info, xfer);
1745 if (ret) {
1746 dev_err(dev, "Mbox send fail %d\n", ret);
1747 goto fail;
1748 }
1749
1750 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1751
1752 if (!ti_sci_is_response_ack(resp)) {
1753 ret = -ENODEV;
1754 } else if (!resp->range_start && !resp->range_num) {
1755 ret = -ENODEV;
1756 } else {
1757 *range_start = resp->range_start;
1758 *range_num = resp->range_num;
1759 };
1760
1761fail:
1762 ti_sci_put_one_xfer(&info->minfo, xfer);
1763
1764 return ret;
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1780 u32 dev_id, u8 subtype,
1781 u16 *range_start, u16 *range_num)
1782{
1783 return ti_sci_get_resource_range(handle, dev_id, subtype,
1784 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1785 range_start, range_num);
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static
1802int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1803 u32 dev_id, u8 subtype, u8 s_host,
1804 u16 *range_start, u16 *range_num)
1805{
1806 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1807 range_start, range_num);
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1830 u32 valid_params, u16 src_id, u16 src_index,
1831 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1832 u16 global_event, u8 vint_status_bit, u8 s_host,
1833 u16 type)
1834{
1835 struct ti_sci_msg_req_manage_irq *req;
1836 struct ti_sci_msg_hdr *resp;
1837 struct ti_sci_xfer *xfer;
1838 struct ti_sci_info *info;
1839 struct device *dev;
1840 int ret = 0;
1841
1842 if (IS_ERR(handle))
1843 return PTR_ERR(handle);
1844 if (!handle)
1845 return -EINVAL;
1846
1847 info = handle_to_ti_sci_info(handle);
1848 dev = info->dev;
1849
1850 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1851 sizeof(*req), sizeof(*resp));
1852 if (IS_ERR(xfer)) {
1853 ret = PTR_ERR(xfer);
1854 dev_err(dev, "Message alloc failed(%d)\n", ret);
1855 return ret;
1856 }
1857 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1858 req->valid_params = valid_params;
1859 req->src_id = src_id;
1860 req->src_index = src_index;
1861 req->dst_id = dst_id;
1862 req->dst_host_irq = dst_host_irq;
1863 req->ia_id = ia_id;
1864 req->vint = vint;
1865 req->global_event = global_event;
1866 req->vint_status_bit = vint_status_bit;
1867 req->secondary_host = s_host;
1868
1869 ret = ti_sci_do_xfer(info, xfer);
1870 if (ret) {
1871 dev_err(dev, "Mbox send fail %d\n", ret);
1872 goto fail;
1873 }
1874
1875 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1876
1877 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1878
1879fail:
1880 ti_sci_put_one_xfer(&info->minfo, xfer);
1881
1882 return ret;
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1904 u16 src_id, u16 src_index, u16 dst_id,
1905 u16 dst_host_irq, u16 ia_id, u16 vint,
1906 u16 global_event, u8 vint_status_bit, u8 s_host)
1907{
1908 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1909 __func__, valid_params, src_id, src_index,
1910 dst_id, dst_host_irq, ia_id, vint, global_event,
1911 vint_status_bit);
1912
1913 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1914 dst_id, dst_host_irq, ia_id, vint,
1915 global_event, vint_status_bit, s_host,
1916 TI_SCI_MSG_SET_IRQ);
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1938 u16 src_id, u16 src_index, u16 dst_id,
1939 u16 dst_host_irq, u16 ia_id, u16 vint,
1940 u16 global_event, u8 vint_status_bit, u8 s_host)
1941{
1942 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1943 __func__, valid_params, src_id, src_index,
1944 dst_id, dst_host_irq, ia_id, vint, global_event,
1945 vint_status_bit);
1946
1947 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1948 dst_id, dst_host_irq, ia_id, vint,
1949 global_event, vint_status_bit, s_host,
1950 TI_SCI_MSG_FREE_IRQ);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1967 u16 src_index, u16 dst_id, u16 dst_host_irq)
1968{
1969 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1970
1971 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1972 dst_host_irq, 0, 0, 0, 0, 0);
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
1989 u16 src_id, u16 src_index, u16 ia_id,
1990 u16 vint, u16 global_event,
1991 u8 vint_status_bit)
1992{
1993 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
1994 MSG_FLAG_GLB_EVNT_VALID |
1995 MSG_FLAG_VINT_STS_BIT_VALID;
1996
1997 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
1998 ia_id, vint, global_event, vint_status_bit, 0);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2015 u16 src_index, u16 dst_id, u16 dst_host_irq)
2016{
2017 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2018
2019 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2020 dst_host_irq, 0, 0, 0, 0, 0);
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2037 u16 src_id, u16 src_index, u16 ia_id,
2038 u16 vint, u16 global_event,
2039 u8 vint_status_bit)
2040{
2041 u32 valid_params = MSG_FLAG_IA_ID_VALID |
2042 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2043 MSG_FLAG_VINT_STS_BIT_VALID;
2044
2045 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2046 ia_id, vint, global_event, vint_status_bit, 0);
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2069 u32 valid_params, u16 nav_id, u16 index,
2070 u32 addr_lo, u32 addr_hi, u32 count,
2071 u8 mode, u8 size, u8 order_id)
2072{
2073 struct ti_sci_msg_rm_ring_cfg_req *req;
2074 struct ti_sci_msg_hdr *resp;
2075 struct ti_sci_xfer *xfer;
2076 struct ti_sci_info *info;
2077 struct device *dev;
2078 int ret = 0;
2079
2080 if (IS_ERR_OR_NULL(handle))
2081 return -EINVAL;
2082
2083 info = handle_to_ti_sci_info(handle);
2084 dev = info->dev;
2085
2086 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2087 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2088 sizeof(*req), sizeof(*resp));
2089 if (IS_ERR(xfer)) {
2090 ret = PTR_ERR(xfer);
2091 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2092 return ret;
2093 }
2094 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2095 req->valid_params = valid_params;
2096 req->nav_id = nav_id;
2097 req->index = index;
2098 req->addr_lo = addr_lo;
2099 req->addr_hi = addr_hi;
2100 req->count = count;
2101 req->mode = mode;
2102 req->size = size;
2103 req->order_id = order_id;
2104
2105 ret = ti_sci_do_xfer(info, xfer);
2106 if (ret) {
2107 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2108 goto fail;
2109 }
2110
2111 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2112 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2113
2114fail:
2115 ti_sci_put_one_xfer(&info->minfo, xfer);
2116 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2117 return ret;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2138 u32 nav_id, u32 index, u8 *mode,
2139 u32 *addr_lo, u32 *addr_hi,
2140 u32 *count, u8 *size, u8 *order_id)
2141{
2142 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2143 struct ti_sci_msg_rm_ring_get_cfg_req *req;
2144 struct ti_sci_xfer *xfer;
2145 struct ti_sci_info *info;
2146 struct device *dev;
2147 int ret = 0;
2148
2149 if (IS_ERR_OR_NULL(handle))
2150 return -EINVAL;
2151
2152 info = handle_to_ti_sci_info(handle);
2153 dev = info->dev;
2154
2155 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2156 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2157 sizeof(*req), sizeof(*resp));
2158 if (IS_ERR(xfer)) {
2159 ret = PTR_ERR(xfer);
2160 dev_err(dev,
2161 "RM_RA:Message get config failed(%d)\n", ret);
2162 return ret;
2163 }
2164 req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2165 req->nav_id = nav_id;
2166 req->index = index;
2167
2168 ret = ti_sci_do_xfer(info, xfer);
2169 if (ret) {
2170 dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2171 goto fail;
2172 }
2173
2174 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2175
2176 if (!ti_sci_is_response_ack(resp)) {
2177 ret = -ENODEV;
2178 } else {
2179 if (mode)
2180 *mode = resp->mode;
2181 if (addr_lo)
2182 *addr_lo = resp->addr_lo;
2183 if (addr_hi)
2184 *addr_hi = resp->addr_hi;
2185 if (count)
2186 *count = resp->count;
2187 if (size)
2188 *size = resp->size;
2189 if (order_id)
2190 *order_id = resp->order_id;
2191 };
2192
2193fail:
2194 ti_sci_put_one_xfer(&info->minfo, xfer);
2195 dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2196 return ret;
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2210 u32 nav_id, u32 src_thread, u32 dst_thread)
2211{
2212 struct ti_sci_msg_psil_pair *req;
2213 struct ti_sci_msg_hdr *resp;
2214 struct ti_sci_xfer *xfer;
2215 struct ti_sci_info *info;
2216 struct device *dev;
2217 int ret = 0;
2218
2219 if (IS_ERR(handle))
2220 return PTR_ERR(handle);
2221 if (!handle)
2222 return -EINVAL;
2223
2224 info = handle_to_ti_sci_info(handle);
2225 dev = info->dev;
2226
2227 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2228 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2229 sizeof(*req), sizeof(*resp));
2230 if (IS_ERR(xfer)) {
2231 ret = PTR_ERR(xfer);
2232 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2233 return ret;
2234 }
2235 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2236 req->nav_id = nav_id;
2237 req->src_thread = src_thread;
2238 req->dst_thread = dst_thread;
2239
2240 ret = ti_sci_do_xfer(info, xfer);
2241 if (ret) {
2242 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2243 goto fail;
2244 }
2245
2246 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2247 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2248
2249fail:
2250 ti_sci_put_one_xfer(&info->minfo, xfer);
2251
2252 return ret;
2253}
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2266 u32 nav_id, u32 src_thread, u32 dst_thread)
2267{
2268 struct ti_sci_msg_psil_unpair *req;
2269 struct ti_sci_msg_hdr *resp;
2270 struct ti_sci_xfer *xfer;
2271 struct ti_sci_info *info;
2272 struct device *dev;
2273 int ret = 0;
2274
2275 if (IS_ERR(handle))
2276 return PTR_ERR(handle);
2277 if (!handle)
2278 return -EINVAL;
2279
2280 info = handle_to_ti_sci_info(handle);
2281 dev = info->dev;
2282
2283 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2284 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2285 sizeof(*req), sizeof(*resp));
2286 if (IS_ERR(xfer)) {
2287 ret = PTR_ERR(xfer);
2288 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2289 return ret;
2290 }
2291 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2292 req->nav_id = nav_id;
2293 req->src_thread = src_thread;
2294 req->dst_thread = dst_thread;
2295
2296 ret = ti_sci_do_xfer(info, xfer);
2297 if (ret) {
2298 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2299 goto fail;
2300 }
2301
2302 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2303 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2304
2305fail:
2306 ti_sci_put_one_xfer(&info->minfo, xfer);
2307
2308 return ret;
2309}
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2323 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2324{
2325 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2326 struct ti_sci_msg_hdr *resp;
2327 struct ti_sci_xfer *xfer;
2328 struct ti_sci_info *info;
2329 struct device *dev;
2330 int ret = 0;
2331
2332 if (IS_ERR_OR_NULL(handle))
2333 return -EINVAL;
2334
2335 info = handle_to_ti_sci_info(handle);
2336 dev = info->dev;
2337
2338 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2339 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2340 sizeof(*req), sizeof(*resp));
2341 if (IS_ERR(xfer)) {
2342 ret = PTR_ERR(xfer);
2343 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2344 return ret;
2345 }
2346 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2347 req->valid_params = params->valid_params;
2348 req->nav_id = params->nav_id;
2349 req->index = params->index;
2350 req->tx_pause_on_err = params->tx_pause_on_err;
2351 req->tx_filt_einfo = params->tx_filt_einfo;
2352 req->tx_filt_pswords = params->tx_filt_pswords;
2353 req->tx_atype = params->tx_atype;
2354 req->tx_chan_type = params->tx_chan_type;
2355 req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2356 req->tx_fetch_size = params->tx_fetch_size;
2357 req->tx_credit_count = params->tx_credit_count;
2358 req->txcq_qnum = params->txcq_qnum;
2359 req->tx_priority = params->tx_priority;
2360 req->tx_qos = params->tx_qos;
2361 req->tx_orderid = params->tx_orderid;
2362 req->fdepth = params->fdepth;
2363 req->tx_sched_priority = params->tx_sched_priority;
2364 req->tx_burst_size = params->tx_burst_size;
2365
2366 ret = ti_sci_do_xfer(info, xfer);
2367 if (ret) {
2368 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2369 goto fail;
2370 }
2371
2372 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2373 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2374
2375fail:
2376 ti_sci_put_one_xfer(&info->minfo, xfer);
2377 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2378 return ret;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2393 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2394{
2395 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2396 struct ti_sci_msg_hdr *resp;
2397 struct ti_sci_xfer *xfer;
2398 struct ti_sci_info *info;
2399 struct device *dev;
2400 int ret = 0;
2401
2402 if (IS_ERR_OR_NULL(handle))
2403 return -EINVAL;
2404
2405 info = handle_to_ti_sci_info(handle);
2406 dev = info->dev;
2407
2408 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2409 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2410 sizeof(*req), sizeof(*resp));
2411 if (IS_ERR(xfer)) {
2412 ret = PTR_ERR(xfer);
2413 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2414 return ret;
2415 }
2416 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2417 req->valid_params = params->valid_params;
2418 req->nav_id = params->nav_id;
2419 req->index = params->index;
2420 req->rx_fetch_size = params->rx_fetch_size;
2421 req->rxcq_qnum = params->rxcq_qnum;
2422 req->rx_priority = params->rx_priority;
2423 req->rx_qos = params->rx_qos;
2424 req->rx_orderid = params->rx_orderid;
2425 req->rx_sched_priority = params->rx_sched_priority;
2426 req->flowid_start = params->flowid_start;
2427 req->flowid_cnt = params->flowid_cnt;
2428 req->rx_pause_on_err = params->rx_pause_on_err;
2429 req->rx_atype = params->rx_atype;
2430 req->rx_chan_type = params->rx_chan_type;
2431 req->rx_ignore_short = params->rx_ignore_short;
2432 req->rx_ignore_long = params->rx_ignore_long;
2433 req->rx_burst_size = params->rx_burst_size;
2434
2435 ret = ti_sci_do_xfer(info, xfer);
2436 if (ret) {
2437 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2438 goto fail;
2439 }
2440
2441 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2442 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2443
2444fail:
2445 ti_sci_put_one_xfer(&info->minfo, xfer);
2446 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2447 return ret;
2448}
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2462 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2463{
2464 struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2465 struct ti_sci_msg_hdr *resp;
2466 struct ti_sci_xfer *xfer;
2467 struct ti_sci_info *info;
2468 struct device *dev;
2469 int ret = 0;
2470
2471 if (IS_ERR_OR_NULL(handle))
2472 return -EINVAL;
2473
2474 info = handle_to_ti_sci_info(handle);
2475 dev = info->dev;
2476
2477 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2478 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2479 sizeof(*req), sizeof(*resp));
2480 if (IS_ERR(xfer)) {
2481 ret = PTR_ERR(xfer);
2482 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2483 return ret;
2484 }
2485 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2486 req->valid_params = params->valid_params;
2487 req->nav_id = params->nav_id;
2488 req->flow_index = params->flow_index;
2489 req->rx_einfo_present = params->rx_einfo_present;
2490 req->rx_psinfo_present = params->rx_psinfo_present;
2491 req->rx_error_handling = params->rx_error_handling;
2492 req->rx_desc_type = params->rx_desc_type;
2493 req->rx_sop_offset = params->rx_sop_offset;
2494 req->rx_dest_qnum = params->rx_dest_qnum;
2495 req->rx_src_tag_hi = params->rx_src_tag_hi;
2496 req->rx_src_tag_lo = params->rx_src_tag_lo;
2497 req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2498 req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2499 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2500 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2501 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2502 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2503 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2504 req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2505 req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2506 req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2507 req->rx_ps_location = params->rx_ps_location;
2508
2509 ret = ti_sci_do_xfer(info, xfer);
2510 if (ret) {
2511 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2512 goto fail;
2513 }
2514
2515 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2516 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2517
2518fail:
2519 ti_sci_put_one_xfer(&info->minfo, xfer);
2520 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2521 return ret;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2532 u8 proc_id)
2533{
2534 struct ti_sci_msg_req_proc_request *req;
2535 struct ti_sci_msg_hdr *resp;
2536 struct ti_sci_info *info;
2537 struct ti_sci_xfer *xfer;
2538 struct device *dev;
2539 int ret = 0;
2540
2541 if (!handle)
2542 return -EINVAL;
2543 if (IS_ERR(handle))
2544 return PTR_ERR(handle);
2545
2546 info = handle_to_ti_sci_info(handle);
2547 dev = info->dev;
2548
2549 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2550 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2551 sizeof(*req), sizeof(*resp));
2552 if (IS_ERR(xfer)) {
2553 ret = PTR_ERR(xfer);
2554 dev_err(dev, "Message alloc failed(%d)\n", ret);
2555 return ret;
2556 }
2557 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2558 req->processor_id = proc_id;
2559
2560 ret = ti_sci_do_xfer(info, xfer);
2561 if (ret) {
2562 dev_err(dev, "Mbox send fail %d\n", ret);
2563 goto fail;
2564 }
2565
2566 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2567
2568 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2569
2570fail:
2571 ti_sci_put_one_xfer(&info->minfo, xfer);
2572
2573 return ret;
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2584 u8 proc_id)
2585{
2586 struct ti_sci_msg_req_proc_release *req;
2587 struct ti_sci_msg_hdr *resp;
2588 struct ti_sci_info *info;
2589 struct ti_sci_xfer *xfer;
2590 struct device *dev;
2591 int ret = 0;
2592
2593 if (!handle)
2594 return -EINVAL;
2595 if (IS_ERR(handle))
2596 return PTR_ERR(handle);
2597
2598 info = handle_to_ti_sci_info(handle);
2599 dev = info->dev;
2600
2601 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2602 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2603 sizeof(*req), sizeof(*resp));
2604 if (IS_ERR(xfer)) {
2605 ret = PTR_ERR(xfer);
2606 dev_err(dev, "Message alloc failed(%d)\n", ret);
2607 return ret;
2608 }
2609 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2610 req->processor_id = proc_id;
2611
2612 ret = ti_sci_do_xfer(info, xfer);
2613 if (ret) {
2614 dev_err(dev, "Mbox send fail %d\n", ret);
2615 goto fail;
2616 }
2617
2618 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2619
2620 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2621
2622fail:
2623 ti_sci_put_one_xfer(&info->minfo, xfer);
2624
2625 return ret;
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2639 u8 proc_id, u8 host_id)
2640{
2641 struct ti_sci_msg_req_proc_handover *req;
2642 struct ti_sci_msg_hdr *resp;
2643 struct ti_sci_info *info;
2644 struct ti_sci_xfer *xfer;
2645 struct device *dev;
2646 int ret = 0;
2647
2648 if (!handle)
2649 return -EINVAL;
2650 if (IS_ERR(handle))
2651 return PTR_ERR(handle);
2652
2653 info = handle_to_ti_sci_info(handle);
2654 dev = info->dev;
2655
2656 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2657 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2658 sizeof(*req), sizeof(*resp));
2659 if (IS_ERR(xfer)) {
2660 ret = PTR_ERR(xfer);
2661 dev_err(dev, "Message alloc failed(%d)\n", ret);
2662 return ret;
2663 }
2664 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2665 req->processor_id = proc_id;
2666 req->host_id = host_id;
2667
2668 ret = ti_sci_do_xfer(info, xfer);
2669 if (ret) {
2670 dev_err(dev, "Mbox send fail %d\n", ret);
2671 goto fail;
2672 }
2673
2674 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2675
2676 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2677
2678fail:
2679 ti_sci_put_one_xfer(&info->minfo, xfer);
2680
2681 return ret;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2695 u8 proc_id, u64 bootvector,
2696 u32 config_flags_set,
2697 u32 config_flags_clear)
2698{
2699 struct ti_sci_msg_req_set_config *req;
2700 struct ti_sci_msg_hdr *resp;
2701 struct ti_sci_info *info;
2702 struct ti_sci_xfer *xfer;
2703 struct device *dev;
2704 int ret = 0;
2705
2706 if (!handle)
2707 return -EINVAL;
2708 if (IS_ERR(handle))
2709 return PTR_ERR(handle);
2710
2711 info = handle_to_ti_sci_info(handle);
2712 dev = info->dev;
2713
2714 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2715 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2716 sizeof(*req), sizeof(*resp));
2717 if (IS_ERR(xfer)) {
2718 ret = PTR_ERR(xfer);
2719 dev_err(dev, "Message alloc failed(%d)\n", ret);
2720 return ret;
2721 }
2722 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2723 req->processor_id = proc_id;
2724 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2725 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2726 TI_SCI_ADDR_HIGH_SHIFT;
2727 req->config_flags_set = config_flags_set;
2728 req->config_flags_clear = config_flags_clear;
2729
2730 ret = ti_sci_do_xfer(info, xfer);
2731 if (ret) {
2732 dev_err(dev, "Mbox send fail %d\n", ret);
2733 goto fail;
2734 }
2735
2736 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2737
2738 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2739
2740fail:
2741 ti_sci_put_one_xfer(&info->minfo, xfer);
2742
2743 return ret;
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2757 u8 proc_id, u32 control_flags_set,
2758 u32 control_flags_clear)
2759{
2760 struct ti_sci_msg_req_set_ctrl *req;
2761 struct ti_sci_msg_hdr *resp;
2762 struct ti_sci_info *info;
2763 struct ti_sci_xfer *xfer;
2764 struct device *dev;
2765 int ret = 0;
2766
2767 if (!handle)
2768 return -EINVAL;
2769 if (IS_ERR(handle))
2770 return PTR_ERR(handle);
2771
2772 info = handle_to_ti_sci_info(handle);
2773 dev = info->dev;
2774
2775 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2776 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2777 sizeof(*req), sizeof(*resp));
2778 if (IS_ERR(xfer)) {
2779 ret = PTR_ERR(xfer);
2780 dev_err(dev, "Message alloc failed(%d)\n", ret);
2781 return ret;
2782 }
2783 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2784 req->processor_id = proc_id;
2785 req->control_flags_set = control_flags_set;
2786 req->control_flags_clear = control_flags_clear;
2787
2788 ret = ti_sci_do_xfer(info, xfer);
2789 if (ret) {
2790 dev_err(dev, "Mbox send fail %d\n", ret);
2791 goto fail;
2792 }
2793
2794 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2795
2796 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2797
2798fail:
2799 ti_sci_put_one_xfer(&info->minfo, xfer);
2800
2801 return ret;
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2812 u8 proc_id, u64 *bv, u32 *cfg_flags,
2813 u32 *ctrl_flags, u32 *sts_flags)
2814{
2815 struct ti_sci_msg_resp_get_status *resp;
2816 struct ti_sci_msg_req_get_status *req;
2817 struct ti_sci_info *info;
2818 struct ti_sci_xfer *xfer;
2819 struct device *dev;
2820 int ret = 0;
2821
2822 if (!handle)
2823 return -EINVAL;
2824 if (IS_ERR(handle))
2825 return PTR_ERR(handle);
2826
2827 info = handle_to_ti_sci_info(handle);
2828 dev = info->dev;
2829
2830 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2831 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2832 sizeof(*req), sizeof(*resp));
2833 if (IS_ERR(xfer)) {
2834 ret = PTR_ERR(xfer);
2835 dev_err(dev, "Message alloc failed(%d)\n", ret);
2836 return ret;
2837 }
2838 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2839 req->processor_id = proc_id;
2840
2841 ret = ti_sci_do_xfer(info, xfer);
2842 if (ret) {
2843 dev_err(dev, "Mbox send fail %d\n", ret);
2844 goto fail;
2845 }
2846
2847 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2848
2849 if (!ti_sci_is_response_ack(resp)) {
2850 ret = -ENODEV;
2851 } else {
2852 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2853 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2854 TI_SCI_ADDR_HIGH_MASK);
2855 *cfg_flags = resp->config_flags;
2856 *ctrl_flags = resp->control_flags;
2857 *sts_flags = resp->status_flags;
2858 }
2859
2860fail:
2861 ti_sci_put_one_xfer(&info->minfo, xfer);
2862
2863 return ret;
2864}
2865
2866
2867
2868
2869
2870static void ti_sci_setup_ops(struct ti_sci_info *info)
2871{
2872 struct ti_sci_ops *ops = &info->handle.ops;
2873 struct ti_sci_core_ops *core_ops = &ops->core_ops;
2874 struct ti_sci_dev_ops *dops = &ops->dev_ops;
2875 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2876 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2877 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2878 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2879 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2880 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2881 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2882
2883 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2884
2885 dops->get_device = ti_sci_cmd_get_device;
2886 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2887 dops->idle_device = ti_sci_cmd_idle_device;
2888 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2889 dops->put_device = ti_sci_cmd_put_device;
2890
2891 dops->is_valid = ti_sci_cmd_dev_is_valid;
2892 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2893 dops->is_idle = ti_sci_cmd_dev_is_idle;
2894 dops->is_stop = ti_sci_cmd_dev_is_stop;
2895 dops->is_on = ti_sci_cmd_dev_is_on;
2896 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2897 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2898 dops->get_device_resets = ti_sci_cmd_get_device_resets;
2899
2900 cops->get_clock = ti_sci_cmd_get_clock;
2901 cops->idle_clock = ti_sci_cmd_idle_clock;
2902 cops->put_clock = ti_sci_cmd_put_clock;
2903 cops->is_auto = ti_sci_cmd_clk_is_auto;
2904 cops->is_on = ti_sci_cmd_clk_is_on;
2905 cops->is_off = ti_sci_cmd_clk_is_off;
2906
2907 cops->set_parent = ti_sci_cmd_clk_set_parent;
2908 cops->get_parent = ti_sci_cmd_clk_get_parent;
2909 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2910
2911 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2912 cops->set_freq = ti_sci_cmd_clk_set_freq;
2913 cops->get_freq = ti_sci_cmd_clk_get_freq;
2914
2915 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2916 rm_core_ops->get_range_from_shost =
2917 ti_sci_cmd_get_resource_range_from_shost;
2918
2919 iops->set_irq = ti_sci_cmd_set_irq;
2920 iops->set_event_map = ti_sci_cmd_set_event_map;
2921 iops->free_irq = ti_sci_cmd_free_irq;
2922 iops->free_event_map = ti_sci_cmd_free_event_map;
2923
2924 rops->config = ti_sci_cmd_ring_config;
2925 rops->get_config = ti_sci_cmd_ring_get_config;
2926
2927 psilops->pair = ti_sci_cmd_rm_psil_pair;
2928 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2929
2930 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2931 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2932 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2933
2934 pops->request = ti_sci_cmd_proc_request;
2935 pops->release = ti_sci_cmd_proc_release;
2936 pops->handover = ti_sci_cmd_proc_handover;
2937 pops->set_config = ti_sci_cmd_proc_set_config;
2938 pops->set_control = ti_sci_cmd_proc_set_control;
2939 pops->get_status = ti_sci_cmd_proc_get_status;
2940}
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2955{
2956 struct device_node *ti_sci_np;
2957 struct list_head *p;
2958 struct ti_sci_handle *handle = NULL;
2959 struct ti_sci_info *info;
2960
2961 if (!dev) {
2962 pr_err("I need a device pointer\n");
2963 return ERR_PTR(-EINVAL);
2964 }
2965 ti_sci_np = of_get_parent(dev->of_node);
2966 if (!ti_sci_np) {
2967 dev_err(dev, "No OF information\n");
2968 return ERR_PTR(-EINVAL);
2969 }
2970
2971 mutex_lock(&ti_sci_list_mutex);
2972 list_for_each(p, &ti_sci_list) {
2973 info = list_entry(p, struct ti_sci_info, node);
2974 if (ti_sci_np == info->dev->of_node) {
2975 handle = &info->handle;
2976 info->users++;
2977 break;
2978 }
2979 }
2980 mutex_unlock(&ti_sci_list_mutex);
2981 of_node_put(ti_sci_np);
2982
2983 if (!handle)
2984 return ERR_PTR(-EPROBE_DEFER);
2985
2986 return handle;
2987}
2988EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002int ti_sci_put_handle(const struct ti_sci_handle *handle)
3003{
3004 struct ti_sci_info *info;
3005
3006 if (IS_ERR(handle))
3007 return PTR_ERR(handle);
3008 if (!handle)
3009 return -EINVAL;
3010
3011 info = handle_to_ti_sci_info(handle);
3012 mutex_lock(&ti_sci_list_mutex);
3013 if (!WARN_ON(!info->users))
3014 info->users--;
3015 mutex_unlock(&ti_sci_list_mutex);
3016
3017 return 0;
3018}
3019EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3020
3021static void devm_ti_sci_release(struct device *dev, void *res)
3022{
3023 const struct ti_sci_handle **ptr = res;
3024 const struct ti_sci_handle *handle = *ptr;
3025 int ret;
3026
3027 ret = ti_sci_put_handle(handle);
3028 if (ret)
3029 dev_err(dev, "failed to put handle %d\n", ret);
3030}
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3044{
3045 const struct ti_sci_handle **ptr;
3046 const struct ti_sci_handle *handle;
3047
3048 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3049 if (!ptr)
3050 return ERR_PTR(-ENOMEM);
3051 handle = ti_sci_get_handle(dev);
3052
3053 if (!IS_ERR(handle)) {
3054 *ptr = handle;
3055 devres_add(dev, ptr);
3056 } else {
3057 devres_free(ptr);
3058 }
3059
3060 return handle;
3061}
3062EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3078 const char *property)
3079{
3080 struct ti_sci_handle *handle = NULL;
3081 struct device_node *ti_sci_np;
3082 struct ti_sci_info *info;
3083 struct list_head *p;
3084
3085 if (!np) {
3086 pr_err("I need a device pointer\n");
3087 return ERR_PTR(-EINVAL);
3088 }
3089
3090 ti_sci_np = of_parse_phandle(np, property, 0);
3091 if (!ti_sci_np)
3092 return ERR_PTR(-ENODEV);
3093
3094 mutex_lock(&ti_sci_list_mutex);
3095 list_for_each(p, &ti_sci_list) {
3096 info = list_entry(p, struct ti_sci_info, node);
3097 if (ti_sci_np == info->dev->of_node) {
3098 handle = &info->handle;
3099 info->users++;
3100 break;
3101 }
3102 }
3103 mutex_unlock(&ti_sci_list_mutex);
3104 of_node_put(ti_sci_np);
3105
3106 if (!handle)
3107 return ERR_PTR(-EPROBE_DEFER);
3108
3109 return handle;
3110}
3111EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3126 const char *property)
3127{
3128 const struct ti_sci_handle *handle;
3129 const struct ti_sci_handle **ptr;
3130
3131 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3132 if (!ptr)
3133 return ERR_PTR(-ENOMEM);
3134 handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3135
3136 if (!IS_ERR(handle)) {
3137 *ptr = handle;
3138 devres_add(dev, ptr);
3139 } else {
3140 devres_free(ptr);
3141 }
3142
3143 return handle;
3144}
3145EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3146
3147
3148
3149
3150
3151
3152
3153u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3154{
3155 unsigned long flags;
3156 u16 set, free_bit;
3157
3158 raw_spin_lock_irqsave(&res->lock, flags);
3159 for (set = 0; set < res->sets; set++) {
3160 free_bit = find_first_zero_bit(res->desc[set].res_map,
3161 res->desc[set].num);
3162 if (free_bit != res->desc[set].num) {
3163 set_bit(free_bit, res->desc[set].res_map);
3164 raw_spin_unlock_irqrestore(&res->lock, flags);
3165 return res->desc[set].start + free_bit;
3166 }
3167 }
3168 raw_spin_unlock_irqrestore(&res->lock, flags);
3169
3170 return TI_SCI_RESOURCE_NULL;
3171}
3172EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3173
3174
3175
3176
3177
3178
3179void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3180{
3181 unsigned long flags;
3182 u16 set;
3183
3184 raw_spin_lock_irqsave(&res->lock, flags);
3185 for (set = 0; set < res->sets; set++) {
3186 if (res->desc[set].start <= id &&
3187 (res->desc[set].num + res->desc[set].start) > id)
3188 clear_bit(id - res->desc[set].start,
3189 res->desc[set].res_map);
3190 }
3191 raw_spin_unlock_irqrestore(&res->lock, flags);
3192}
3193EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3194
3195
3196
3197
3198
3199
3200
3201u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3202{
3203 u32 set, count = 0;
3204
3205 for (set = 0; set < res->sets; set++)
3206 count += res->desc[set].num;
3207
3208 return count;
3209}
3210EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223static struct ti_sci_resource *
3224devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3225 struct device *dev, u32 dev_id, u32 *sub_types,
3226 u32 sets)
3227{
3228 struct ti_sci_resource *res;
3229 bool valid_set = false;
3230 int i, ret;
3231
3232 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3233 if (!res)
3234 return ERR_PTR(-ENOMEM);
3235
3236 res->sets = sets;
3237 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3238 GFP_KERNEL);
3239 if (!res->desc)
3240 return ERR_PTR(-ENOMEM);
3241
3242 for (i = 0; i < res->sets; i++) {
3243 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3244 sub_types[i],
3245 &res->desc[i].start,
3246 &res->desc[i].num);
3247 if (ret) {
3248 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3249 dev_id, sub_types[i]);
3250 res->desc[i].start = 0;
3251 res->desc[i].num = 0;
3252 continue;
3253 }
3254
3255 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
3256 dev_id, sub_types[i], res->desc[i].start,
3257 res->desc[i].num);
3258
3259 valid_set = true;
3260 res->desc[i].res_map =
3261 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3262 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3263 if (!res->desc[i].res_map)
3264 return ERR_PTR(-ENOMEM);
3265 }
3266 raw_spin_lock_init(&res->lock);
3267
3268 if (valid_set)
3269 return res;
3270
3271 return ERR_PTR(-EINVAL);
3272}
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284struct ti_sci_resource *
3285devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3286 struct device *dev, u32 dev_id, char *of_prop)
3287{
3288 struct ti_sci_resource *res;
3289 u32 *sub_types;
3290 int sets;
3291
3292 sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3293 sizeof(u32));
3294 if (sets < 0) {
3295 dev_err(dev, "%s resource type ids not available\n", of_prop);
3296 return ERR_PTR(sets);
3297 }
3298
3299 sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3300 if (!sub_types)
3301 return ERR_PTR(-ENOMEM);
3302
3303 of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3304 res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3305 sets);
3306
3307 kfree(sub_types);
3308 return res;
3309}
3310EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322struct ti_sci_resource *
3323devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3324 u32 dev_id, u32 sub_type)
3325{
3326 return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3327}
3328EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3329
3330static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3331 void *cmd)
3332{
3333 struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3334 const struct ti_sci_handle *handle = &info->handle;
3335
3336 ti_sci_cmd_core_reboot(handle);
3337
3338
3339 return NOTIFY_BAD;
3340}
3341
3342
3343static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3344 .default_host_id = 2,
3345
3346 .max_rx_timeout_ms = 1000,
3347
3348 .max_msgs = 20,
3349 .max_msg_size = 64,
3350};
3351
3352
3353static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3354 .default_host_id = 12,
3355
3356 .max_rx_timeout_ms = 10000,
3357
3358 .max_msgs = 20,
3359 .max_msg_size = 60,
3360};
3361
3362static const struct of_device_id ti_sci_of_match[] = {
3363 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3364 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3365 { },
3366};
3367MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3368
3369static int ti_sci_probe(struct platform_device *pdev)
3370{
3371 struct device *dev = &pdev->dev;
3372 const struct of_device_id *of_id;
3373 const struct ti_sci_desc *desc;
3374 struct ti_sci_xfer *xfer;
3375 struct ti_sci_info *info = NULL;
3376 struct ti_sci_xfers_info *minfo;
3377 struct mbox_client *cl;
3378 int ret = -EINVAL;
3379 int i;
3380 int reboot = 0;
3381 u32 h_id;
3382
3383 of_id = of_match_device(ti_sci_of_match, dev);
3384 if (!of_id) {
3385 dev_err(dev, "OF data missing\n");
3386 return -EINVAL;
3387 }
3388 desc = of_id->data;
3389
3390 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3391 if (!info)
3392 return -ENOMEM;
3393
3394 info->dev = dev;
3395 info->desc = desc;
3396 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3397
3398 if (ret < 0) {
3399 info->host_id = info->desc->default_host_id;
3400 } else {
3401 if (!h_id) {
3402 dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3403 info->host_id = info->desc->default_host_id;
3404 } else {
3405 info->host_id = h_id;
3406 }
3407 }
3408
3409 reboot = of_property_read_bool(dev->of_node,
3410 "ti,system-reboot-controller");
3411 INIT_LIST_HEAD(&info->node);
3412 minfo = &info->minfo;
3413
3414
3415
3416
3417
3418
3419 if (WARN_ON(desc->max_msgs >=
3420 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3421 return -EINVAL;
3422
3423 minfo->xfer_block = devm_kcalloc(dev,
3424 desc->max_msgs,
3425 sizeof(*minfo->xfer_block),
3426 GFP_KERNEL);
3427 if (!minfo->xfer_block)
3428 return -ENOMEM;
3429
3430 minfo->xfer_alloc_table = devm_kcalloc(dev,
3431 BITS_TO_LONGS(desc->max_msgs),
3432 sizeof(unsigned long),
3433 GFP_KERNEL);
3434 if (!minfo->xfer_alloc_table)
3435 return -ENOMEM;
3436 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3437
3438
3439 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3440 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3441 GFP_KERNEL);
3442 if (!xfer->xfer_buf)
3443 return -ENOMEM;
3444
3445 xfer->tx_message.buf = xfer->xfer_buf;
3446 init_completion(&xfer->done);
3447 }
3448
3449 ret = ti_sci_debugfs_create(pdev, info);
3450 if (ret)
3451 dev_warn(dev, "Failed to create debug file\n");
3452
3453 platform_set_drvdata(pdev, info);
3454
3455 cl = &info->cl;
3456 cl->dev = dev;
3457 cl->tx_block = false;
3458 cl->rx_callback = ti_sci_rx_callback;
3459 cl->knows_txdone = true;
3460
3461 spin_lock_init(&minfo->xfer_lock);
3462 sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3463
3464 info->chan_rx = mbox_request_channel_byname(cl, "rx");
3465 if (IS_ERR(info->chan_rx)) {
3466 ret = PTR_ERR(info->chan_rx);
3467 goto out;
3468 }
3469
3470 info->chan_tx = mbox_request_channel_byname(cl, "tx");
3471 if (IS_ERR(info->chan_tx)) {
3472 ret = PTR_ERR(info->chan_tx);
3473 goto out;
3474 }
3475 ret = ti_sci_cmd_get_revision(info);
3476 if (ret) {
3477 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3478 goto out;
3479 }
3480
3481 ti_sci_setup_ops(info);
3482
3483 if (reboot) {
3484 info->nb.notifier_call = tisci_reboot_handler;
3485 info->nb.priority = 128;
3486
3487 ret = register_restart_handler(&info->nb);
3488 if (ret) {
3489 dev_err(dev, "reboot registration fail(%d)\n", ret);
3490 return ret;
3491 }
3492 }
3493
3494 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3495 info->handle.version.abi_major, info->handle.version.abi_minor,
3496 info->handle.version.firmware_revision,
3497 info->handle.version.firmware_description);
3498
3499 mutex_lock(&ti_sci_list_mutex);
3500 list_add_tail(&info->node, &ti_sci_list);
3501 mutex_unlock(&ti_sci_list_mutex);
3502
3503 return of_platform_populate(dev->of_node, NULL, NULL, dev);
3504out:
3505 if (!IS_ERR(info->chan_tx))
3506 mbox_free_channel(info->chan_tx);
3507 if (!IS_ERR(info->chan_rx))
3508 mbox_free_channel(info->chan_rx);
3509 debugfs_remove(info->d);
3510 return ret;
3511}
3512
3513static int ti_sci_remove(struct platform_device *pdev)
3514{
3515 struct ti_sci_info *info;
3516 struct device *dev = &pdev->dev;
3517 int ret = 0;
3518
3519 of_platform_depopulate(dev);
3520
3521 info = platform_get_drvdata(pdev);
3522
3523 if (info->nb.notifier_call)
3524 unregister_restart_handler(&info->nb);
3525
3526 mutex_lock(&ti_sci_list_mutex);
3527 if (info->users)
3528 ret = -EBUSY;
3529 else
3530 list_del(&info->node);
3531 mutex_unlock(&ti_sci_list_mutex);
3532
3533 if (!ret) {
3534 ti_sci_debugfs_destroy(pdev, info);
3535
3536
3537 mbox_free_channel(info->chan_tx);
3538 mbox_free_channel(info->chan_rx);
3539 }
3540
3541 return ret;
3542}
3543
3544static struct platform_driver ti_sci_driver = {
3545 .probe = ti_sci_probe,
3546 .remove = ti_sci_remove,
3547 .driver = {
3548 .name = "ti-sci",
3549 .of_match_table = of_match_ptr(ti_sci_of_match),
3550 },
3551};
3552module_platform_driver(ti_sci_driver);
3553
3554MODULE_LICENSE("GPL v2");
3555MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3556MODULE_AUTHOR("Nishanth Menon");
3557MODULE_ALIAS("platform:ti-sci");
3558