1
2
3
4
5
6
7
8
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <log.h>
14#include <mailbox.h>
15#include <malloc.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <linux/bitops.h>
20#include <linux/compat.h>
21#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
26
27
28static LIST_HEAD(ti_sci_list);
29
30
31
32
33
34
35struct ti_sci_xfer {
36 struct k3_sec_proxy_msg tx_message;
37 u8 rx_len;
38};
39
40
41
42
43
44
45
46
47
48
49
50
51struct ti_sci_rm_type_map {
52 u32 dev_id;
53 u16 type;
54};
55
56
57
58
59
60
61
62
63
64struct ti_sci_desc {
65 u8 default_host_id;
66 int max_rx_timeout_ms;
67 int max_msgs;
68 int max_msg_size;
69};
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84struct ti_sci_info {
85 struct udevice *dev;
86 const struct ti_sci_desc *desc;
87 struct ti_sci_handle handle;
88 struct mbox_chan chan_tx;
89 struct mbox_chan chan_rx;
90 struct mbox_chan chan_notify;
91 struct ti_sci_xfer xfer;
92 struct list_head list;
93 struct list_head dev_list;
94 bool is_secure;
95 u8 host_id;
96 u8 seq;
97};
98
99struct ti_sci_exclusive_dev {
100 u32 id;
101 u32 count;
102 struct list_head list;
103};
104
105#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
124 u16 msg_type, u32 msg_flags,
125 u32 *buf,
126 size_t tx_message_size,
127 size_t rx_message_size)
128{
129 struct ti_sci_xfer *xfer = &info->xfer;
130 struct ti_sci_msg_hdr *hdr;
131
132
133 if (rx_message_size > info->desc->max_msg_size ||
134 tx_message_size > info->desc->max_msg_size ||
135 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
136 tx_message_size < sizeof(*hdr))
137 return ERR_PTR(-ERANGE);
138
139 info->seq = ~info->seq;
140 xfer->tx_message.buf = buf;
141 xfer->tx_message.len = tx_message_size;
142 xfer->rx_len = (u8)rx_message_size;
143
144 hdr = (struct ti_sci_msg_hdr *)buf;
145 hdr->seq = info->seq;
146 hdr->type = msg_type;
147 hdr->host = info->host_id;
148 hdr->flags = msg_flags;
149
150 return xfer;
151}
152
153
154
155
156
157
158
159
160
161
162
163static inline int ti_sci_get_response(struct ti_sci_info *info,
164 struct ti_sci_xfer *xfer,
165 struct mbox_chan *chan)
166{
167 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
168 struct ti_sci_secure_msg_hdr *secure_hdr;
169 struct ti_sci_msg_hdr *hdr;
170 int ret;
171
172
173 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
174 if (ret) {
175 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
176 __func__, ret);
177 return ret;
178 }
179
180
181 if (info->is_secure) {
182 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
183 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
184 }
185
186
187 hdr = (struct ti_sci_msg_hdr *)msg->buf;
188
189
190 if (hdr->seq != info->seq) {
191 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
192 __func__, hdr->seq);
193 return ret;
194 }
195
196 if (msg->len > info->desc->max_msg_size) {
197 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
198 __func__, msg->len, info->desc->max_msg_size);
199 return -EINVAL;
200 }
201
202 if (msg->len < xfer->rx_len) {
203 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
204 __func__, msg->len, xfer->rx_len);
205 }
206
207 return ret;
208}
209
210
211
212
213
214
215
216
217static inline int ti_sci_do_xfer(struct ti_sci_info *info,
218 struct ti_sci_xfer *xfer)
219{
220 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
221 u8 secure_buf[info->desc->max_msg_size];
222 struct ti_sci_secure_msg_hdr secure_hdr;
223 int ret;
224
225 if (info->is_secure) {
226
227 secure_hdr.checksum = 0;
228 secure_hdr.reserved = 0;
229 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
230 xfer->tx_message.len);
231
232 xfer->tx_message.buf = (u32 *)secure_buf;
233 xfer->tx_message.len += sizeof(secure_hdr);
234
235 if (xfer->rx_len)
236 xfer->rx_len += sizeof(secure_hdr);
237 }
238
239
240 ret = mbox_send(&info->chan_tx, msg);
241 if (ret) {
242 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
243 __func__, ret);
244 return ret;
245 }
246
247
248 if (xfer->rx_len)
249 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
250
251 return ret;
252}
253
254
255
256
257
258
259
260
261
262static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
263{
264 struct ti_sci_msg_resp_version *rev_info;
265 struct ti_sci_version_info *ver;
266 struct ti_sci_msg_hdr hdr;
267 struct ti_sci_info *info;
268 struct ti_sci_xfer *xfer;
269 int ret;
270
271 if (IS_ERR(handle))
272 return PTR_ERR(handle);
273 if (!handle)
274 return -EINVAL;
275
276 info = handle_to_ti_sci_info(handle);
277
278 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
279 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
280 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
281 sizeof(*rev_info));
282 if (IS_ERR(xfer)) {
283 ret = PTR_ERR(xfer);
284 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
285 return ret;
286 }
287
288 ret = ti_sci_do_xfer(info, xfer);
289 if (ret) {
290 dev_err(info->dev, "Mbox communication fail %d\n", ret);
291 return ret;
292 }
293
294 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
295
296 ver = &handle->version;
297 ver->abi_major = rev_info->abi_major;
298 ver->abi_minor = rev_info->abi_minor;
299 ver->firmware_revision = rev_info->firmware_revision;
300 strncpy(ver->firmware_description, rev_info->firmware_description,
301 sizeof(ver->firmware_description));
302
303 return 0;
304}
305
306
307
308
309
310
311
312static inline bool ti_sci_is_response_ack(void *r)
313{
314 struct ti_sci_msg_hdr *hdr = r;
315
316 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
317}
318
319
320
321
322
323
324
325
326
327
328
329static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
330 u16 msg_type, u64 addr, u32 size)
331{
332 struct ti_sci_msg_board_config req;
333 struct ti_sci_msg_hdr *resp;
334 struct ti_sci_info *info;
335 struct ti_sci_xfer *xfer;
336 int ret = 0;
337
338 if (IS_ERR(handle))
339 return PTR_ERR(handle);
340 if (!handle)
341 return -EINVAL;
342
343 info = handle_to_ti_sci_info(handle);
344
345 xfer = ti_sci_setup_one_xfer(info, msg_type,
346 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
347 (u32 *)&req, sizeof(req), sizeof(*resp));
348 if (IS_ERR(xfer)) {
349 ret = PTR_ERR(xfer);
350 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
351 return ret;
352 }
353 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
354 req.boardcfgp_low = addr & 0xffffffff;
355 req.boardcfg_size = size;
356
357 ret = ti_sci_do_xfer(info, xfer);
358 if (ret) {
359 dev_err(info->dev, "Mbox send fail %d\n", ret);
360 return ret;
361 }
362
363 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
364
365 if (!ti_sci_is_response_ack(resp))
366 return -ENODEV;
367
368 return ret;
369}
370
371
372
373
374
375
376
377
378
379static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
380 u64 addr, u32 size)
381{
382 return cmd_set_board_config_using_msg(handle,
383 TI_SCI_MSG_BOARD_CONFIG,
384 addr, size);
385}
386
387
388
389
390
391
392
393
394
395
396static
397int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
398 u64 addr, u32 size)
399{
400 return cmd_set_board_config_using_msg(handle,
401 TI_SCI_MSG_BOARD_CONFIG_RM,
402 addr, size);
403}
404
405
406
407
408
409
410
411
412
413
414static
415int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
416 u64 addr, u32 size)
417{
418 return cmd_set_board_config_using_msg(handle,
419 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
420 addr, size);
421}
422
423
424
425
426
427
428
429
430
431
432static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
433 u64 addr, u32 size)
434{
435 return cmd_set_board_config_using_msg(handle,
436 TI_SCI_MSG_BOARD_CONFIG_PM,
437 addr, size);
438}
439
440static struct ti_sci_exclusive_dev
441*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
442{
443 struct ti_sci_exclusive_dev *dev;
444
445 list_for_each_entry(dev, dev_list, list)
446 if (dev->id == id)
447 return dev;
448
449 return NULL;
450}
451
452static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
453{
454 struct ti_sci_exclusive_dev *dev;
455
456 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
457 if (dev) {
458 dev->count++;
459 return;
460 }
461
462 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
463 dev->id = id;
464 dev->count = 1;
465 INIT_LIST_HEAD(&dev->list);
466 list_add_tail(&dev->list, &info->dev_list);
467}
468
469static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
470{
471 struct ti_sci_exclusive_dev *dev;
472
473 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
474 if (!dev)
475 return;
476
477 if (dev->count > 0)
478 dev->count--;
479}
480
481
482
483
484
485
486
487
488
489
490static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
491 u32 id, u32 flags, u8 state)
492{
493 struct ti_sci_msg_req_set_device_state req;
494 struct ti_sci_msg_hdr *resp;
495 struct ti_sci_info *info;
496 struct ti_sci_xfer *xfer;
497 int ret = 0;
498
499 if (IS_ERR(handle))
500 return PTR_ERR(handle);
501 if (!handle)
502 return -EINVAL;
503
504 info = handle_to_ti_sci_info(handle);
505
506 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
507 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
508 (u32 *)&req, sizeof(req), sizeof(*resp));
509 if (IS_ERR(xfer)) {
510 ret = PTR_ERR(xfer);
511 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
512 return ret;
513 }
514 req.id = id;
515 req.state = state;
516
517 ret = ti_sci_do_xfer(info, xfer);
518 if (ret) {
519 dev_err(info->dev, "Mbox send fail %d\n", ret);
520 return ret;
521 }
522
523 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
524
525 if (!ti_sci_is_response_ack(resp))
526 return -ENODEV;
527
528 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
529 ti_sci_delete_exclusive_dev(info, id);
530 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
531 ti_sci_add_exclusive_dev(info, id);
532
533 return ret;
534}
535
536
537
538
539
540
541
542
543
544
545
546static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
547 u32 id, u32 flags, u8 state)
548{
549 struct ti_sci_msg_req_set_device_state req;
550 struct ti_sci_info *info;
551 struct ti_sci_xfer *xfer;
552 int ret = 0;
553
554 if (IS_ERR(handle))
555 return PTR_ERR(handle);
556 if (!handle)
557 return -EINVAL;
558
559 info = handle_to_ti_sci_info(handle);
560
561 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
562 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
563 (u32 *)&req, sizeof(req), 0);
564 if (IS_ERR(xfer)) {
565 ret = PTR_ERR(xfer);
566 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
567 return ret;
568 }
569 req.id = id;
570 req.state = state;
571
572 ret = ti_sci_do_xfer(info, xfer);
573 if (ret)
574 dev_err(info->dev, "Mbox send fail %d\n", ret);
575
576 return ret;
577}
578
579
580
581
582
583
584
585
586
587
588
589
590static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
591 u32 id, u32 *clcnt, u32 *resets,
592 u8 *p_state, u8 *c_state)
593{
594 struct ti_sci_msg_resp_get_device_state *resp;
595 struct ti_sci_msg_req_get_device_state req;
596 struct ti_sci_info *info;
597 struct ti_sci_xfer *xfer;
598 int ret = 0;
599
600 if (IS_ERR(handle))
601 return PTR_ERR(handle);
602 if (!handle)
603 return -EINVAL;
604
605 if (!clcnt && !resets && !p_state && !c_state)
606 return -EINVAL;
607
608 info = handle_to_ti_sci_info(handle);
609
610 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
611 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
612 (u32 *)&req, sizeof(req), sizeof(*resp));
613 if (IS_ERR(xfer)) {
614 ret = PTR_ERR(xfer);
615 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
616 return ret;
617 }
618 req.id = id;
619
620 ret = ti_sci_do_xfer(info, xfer);
621 if (ret) {
622 dev_err(info->dev, "Mbox send fail %d\n", ret);
623 return ret;
624 }
625
626 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
627 if (!ti_sci_is_response_ack(resp))
628 return -ENODEV;
629
630 if (clcnt)
631 *clcnt = resp->context_loss_count;
632 if (resets)
633 *resets = resp->resets;
634 if (p_state)
635 *p_state = resp->programmed_state;
636 if (c_state)
637 *c_state = resp->current_state;
638
639 return ret;
640}
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
656{
657 return ti_sci_set_device_state(handle, id, 0,
658 MSG_DEVICE_SW_STATE_ON);
659}
660
661static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
662 u32 id)
663{
664 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
665 MSG_DEVICE_SW_STATE_ON);
666}
667
668
669
670
671
672
673
674
675
676
677
678
679static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
680{
681 return ti_sci_set_device_state(handle, id,
682 0,
683 MSG_DEVICE_SW_STATE_RETENTION);
684}
685
686static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
687 u32 id)
688{
689 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
690 MSG_DEVICE_SW_STATE_RETENTION);
691}
692
693
694
695
696
697
698
699
700
701
702
703
704static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
705{
706 return ti_sci_set_device_state(handle, id, 0,
707 MSG_DEVICE_SW_STATE_AUTO_OFF);
708}
709
710static
711int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
712{
713 struct ti_sci_exclusive_dev *dev, *tmp;
714 struct ti_sci_info *info;
715 int i, cnt;
716
717 info = handle_to_ti_sci_info(handle);
718
719 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
720 cnt = dev->count;
721 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
722 for (i = 0; i < cnt; i++)
723 ti_sci_cmd_put_device(handle, dev->id);
724 }
725
726 return 0;
727}
728
729
730
731
732
733
734
735
736
737static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
738{
739 u8 unused;
740
741
742 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
743}
744
745
746
747
748
749
750
751
752
753static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
754 u32 *count)
755{
756 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
757}
758
759
760
761
762
763
764
765
766
767static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
768 bool *r_state)
769{
770 int ret;
771 u8 state;
772
773 if (!r_state)
774 return -EINVAL;
775
776 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
777 if (ret)
778 return ret;
779
780 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
781
782 return 0;
783}
784
785
786
787
788
789
790
791
792
793
794static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
795 bool *r_state, bool *curr_state)
796{
797 int ret;
798 u8 p_state, c_state;
799
800 if (!r_state && !curr_state)
801 return -EINVAL;
802
803 ret =
804 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
805 if (ret)
806 return ret;
807
808 if (r_state)
809 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
810 if (curr_state)
811 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
812
813 return 0;
814}
815
816
817
818
819
820
821
822
823
824
825static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
826 bool *r_state, bool *curr_state)
827{
828 int ret;
829 u8 p_state, c_state;
830
831 if (!r_state && !curr_state)
832 return -EINVAL;
833
834 ret =
835 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
836 if (ret)
837 return ret;
838
839 if (r_state)
840 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
841 if (curr_state)
842 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
843
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
856 bool *curr_state)
857{
858 int ret;
859 u8 state;
860
861 if (!curr_state)
862 return -EINVAL;
863
864 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
865 if (ret)
866 return ret;
867
868 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
869
870 return 0;
871}
872
873
874
875
876
877
878
879
880
881
882static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
883 u32 id, u32 reset_state)
884{
885 struct ti_sci_msg_req_set_device_resets req;
886 struct ti_sci_msg_hdr *resp;
887 struct ti_sci_info *info;
888 struct ti_sci_xfer *xfer;
889 int ret = 0;
890
891 if (IS_ERR(handle))
892 return PTR_ERR(handle);
893 if (!handle)
894 return -EINVAL;
895
896 info = handle_to_ti_sci_info(handle);
897
898 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
899 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
900 (u32 *)&req, sizeof(req), sizeof(*resp));
901 if (IS_ERR(xfer)) {
902 ret = PTR_ERR(xfer);
903 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
904 return ret;
905 }
906 req.id = id;
907 req.resets = reset_state;
908
909 ret = ti_sci_do_xfer(info, xfer);
910 if (ret) {
911 dev_err(info->dev, "Mbox send fail %d\n", ret);
912 return ret;
913 }
914
915 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
916
917 if (!ti_sci_is_response_ack(resp))
918 return -ENODEV;
919
920 return ret;
921}
922
923
924
925
926
927
928
929
930
931
932static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
933 u32 id, u32 *reset_state)
934{
935 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
936 NULL);
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
952 u32 dev_id, u8 clk_id,
953 u32 flags, u8 state)
954{
955 struct ti_sci_msg_req_set_clock_state req;
956 struct ti_sci_msg_hdr *resp;
957 struct ti_sci_info *info;
958 struct ti_sci_xfer *xfer;
959 int ret = 0;
960
961 if (IS_ERR(handle))
962 return PTR_ERR(handle);
963 if (!handle)
964 return -EINVAL;
965
966 info = handle_to_ti_sci_info(handle);
967
968 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
969 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
970 (u32 *)&req, sizeof(req), sizeof(*resp));
971 if (IS_ERR(xfer)) {
972 ret = PTR_ERR(xfer);
973 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
974 return ret;
975 }
976 req.dev_id = dev_id;
977 req.clk_id = clk_id;
978 req.request_state = state;
979
980 ret = ti_sci_do_xfer(info, xfer);
981 if (ret) {
982 dev_err(info->dev, "Mbox send fail %d\n", ret);
983 return ret;
984 }
985
986 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
987
988 if (!ti_sci_is_response_ack(resp))
989 return -ENODEV;
990
991 return ret;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1007 u32 dev_id, u8 clk_id,
1008 u8 *programmed_state, u8 *current_state)
1009{
1010 struct ti_sci_msg_resp_get_clock_state *resp;
1011 struct ti_sci_msg_req_get_clock_state req;
1012 struct ti_sci_info *info;
1013 struct ti_sci_xfer *xfer;
1014 int ret = 0;
1015
1016 if (IS_ERR(handle))
1017 return PTR_ERR(handle);
1018 if (!handle)
1019 return -EINVAL;
1020
1021 if (!programmed_state && !current_state)
1022 return -EINVAL;
1023
1024 info = handle_to_ti_sci_info(handle);
1025
1026 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1027 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1028 (u32 *)&req, sizeof(req), sizeof(*resp));
1029 if (IS_ERR(xfer)) {
1030 ret = PTR_ERR(xfer);
1031 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1032 return ret;
1033 }
1034 req.dev_id = dev_id;
1035 req.clk_id = clk_id;
1036
1037 ret = ti_sci_do_xfer(info, xfer);
1038 if (ret) {
1039 dev_err(info->dev, "Mbox send fail %d\n", ret);
1040 return ret;
1041 }
1042
1043 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1044
1045 if (!ti_sci_is_response_ack(resp))
1046 return -ENODEV;
1047
1048 if (programmed_state)
1049 *programmed_state = resp->programmed_state;
1050 if (current_state)
1051 *current_state = resp->current_state;
1052
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1070 u8 clk_id, bool needs_ssc, bool can_change_freq,
1071 bool enable_input_term)
1072{
1073 u32 flags = 0;
1074
1075 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1076 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1077 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1078
1079 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1080 MSG_CLOCK_SW_STATE_REQ);
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1096 u32 dev_id, u8 clk_id)
1097{
1098 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1099 MSG_CLOCK_SW_STATE_UNREQ);
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1115 u32 dev_id, u8 clk_id)
1116{
1117 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1118 MSG_CLOCK_SW_STATE_AUTO);
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1133 u32 dev_id, u8 clk_id, bool *req_state)
1134{
1135 u8 state = 0;
1136 int ret;
1137
1138 if (!req_state)
1139 return -EINVAL;
1140
1141 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1142 if (ret)
1143 return ret;
1144
1145 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1146 return 0;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1162 u8 clk_id, bool *req_state, bool *curr_state)
1163{
1164 u8 c_state = 0, r_state = 0;
1165 int ret;
1166
1167 if (!req_state && !curr_state)
1168 return -EINVAL;
1169
1170 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1171 &r_state, &c_state);
1172 if (ret)
1173 return ret;
1174
1175 if (req_state)
1176 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1177 if (curr_state)
1178 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1179 return 0;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1195 u8 clk_id, bool *req_state, bool *curr_state)
1196{
1197 u8 c_state = 0, r_state = 0;
1198 int ret;
1199
1200 if (!req_state && !curr_state)
1201 return -EINVAL;
1202
1203 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1204 &r_state, &c_state);
1205 if (ret)
1206 return ret;
1207
1208 if (req_state)
1209 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1210 if (curr_state)
1211 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1212 return 0;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1227 u32 dev_id, u8 clk_id, u8 parent_id)
1228{
1229 struct ti_sci_msg_req_set_clock_parent req;
1230 struct ti_sci_msg_hdr *resp;
1231 struct ti_sci_info *info;
1232 struct ti_sci_xfer *xfer;
1233 int ret = 0;
1234
1235 if (IS_ERR(handle))
1236 return PTR_ERR(handle);
1237 if (!handle)
1238 return -EINVAL;
1239
1240 info = handle_to_ti_sci_info(handle);
1241
1242 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1243 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1244 (u32 *)&req, sizeof(req), sizeof(*resp));
1245 if (IS_ERR(xfer)) {
1246 ret = PTR_ERR(xfer);
1247 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1248 return ret;
1249 }
1250 req.dev_id = dev_id;
1251 req.clk_id = clk_id;
1252 req.parent_id = parent_id;
1253
1254 ret = ti_sci_do_xfer(info, xfer);
1255 if (ret) {
1256 dev_err(info->dev, "Mbox send fail %d\n", ret);
1257 return ret;
1258 }
1259
1260 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1261
1262 if (!ti_sci_is_response_ack(resp))
1263 return -ENODEV;
1264
1265 return ret;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1280 u32 dev_id, u8 clk_id, u8 *parent_id)
1281{
1282 struct ti_sci_msg_resp_get_clock_parent *resp;
1283 struct ti_sci_msg_req_get_clock_parent req;
1284 struct ti_sci_info *info;
1285 struct ti_sci_xfer *xfer;
1286 int ret = 0;
1287
1288 if (IS_ERR(handle))
1289 return PTR_ERR(handle);
1290 if (!handle || !parent_id)
1291 return -EINVAL;
1292
1293 info = handle_to_ti_sci_info(handle);
1294
1295 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1296 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1297 (u32 *)&req, sizeof(req), sizeof(*resp));
1298 if (IS_ERR(xfer)) {
1299 ret = PTR_ERR(xfer);
1300 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1301 return ret;
1302 }
1303 req.dev_id = dev_id;
1304 req.clk_id = clk_id;
1305
1306 ret = ti_sci_do_xfer(info, xfer);
1307 if (ret) {
1308 dev_err(info->dev, "Mbox send fail %d\n", ret);
1309 return ret;
1310 }
1311
1312 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1313
1314 if (!ti_sci_is_response_ack(resp))
1315 ret = -ENODEV;
1316 else
1317 *parent_id = resp->parent_id;
1318
1319 return ret;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1334 u32 dev_id, u8 clk_id,
1335 u8 *num_parents)
1336{
1337 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1338 struct ti_sci_msg_req_get_clock_num_parents req;
1339 struct ti_sci_info *info;
1340 struct ti_sci_xfer *xfer;
1341 int ret = 0;
1342
1343 if (IS_ERR(handle))
1344 return PTR_ERR(handle);
1345 if (!handle || !num_parents)
1346 return -EINVAL;
1347
1348 info = handle_to_ti_sci_info(handle);
1349
1350 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1351 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1352 (u32 *)&req, sizeof(req), sizeof(*resp));
1353 if (IS_ERR(xfer)) {
1354 ret = PTR_ERR(xfer);
1355 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1356 return ret;
1357 }
1358 req.dev_id = dev_id;
1359 req.clk_id = clk_id;
1360
1361 ret = ti_sci_do_xfer(info, xfer);
1362 if (ret) {
1363 dev_err(info->dev, "Mbox send fail %d\n", ret);
1364 return ret;
1365 }
1366
1367 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1368 xfer->tx_message.buf;
1369
1370 if (!ti_sci_is_response_ack(resp))
1371 ret = -ENODEV;
1372 else
1373 *num_parents = resp->num_parents;
1374
1375 return ret;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1398 u32 dev_id, u8 clk_id, u64 min_freq,
1399 u64 target_freq, u64 max_freq,
1400 u64 *match_freq)
1401{
1402 struct ti_sci_msg_resp_query_clock_freq *resp;
1403 struct ti_sci_msg_req_query_clock_freq req;
1404 struct ti_sci_info *info;
1405 struct ti_sci_xfer *xfer;
1406 int ret = 0;
1407
1408 if (IS_ERR(handle))
1409 return PTR_ERR(handle);
1410 if (!handle || !match_freq)
1411 return -EINVAL;
1412
1413 info = handle_to_ti_sci_info(handle);
1414
1415 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1416 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1417 (u32 *)&req, sizeof(req), sizeof(*resp));
1418 if (IS_ERR(xfer)) {
1419 ret = PTR_ERR(xfer);
1420 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1421 return ret;
1422 }
1423 req.dev_id = dev_id;
1424 req.clk_id = clk_id;
1425 req.min_freq_hz = min_freq;
1426 req.target_freq_hz = target_freq;
1427 req.max_freq_hz = max_freq;
1428
1429 ret = ti_sci_do_xfer(info, xfer);
1430 if (ret) {
1431 dev_err(info->dev, "Mbox send fail %d\n", ret);
1432 return ret;
1433 }
1434
1435 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1436
1437 if (!ti_sci_is_response_ack(resp))
1438 ret = -ENODEV;
1439 else
1440 *match_freq = resp->freq_hz;
1441
1442 return ret;
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1464 u32 dev_id, u8 clk_id, u64 min_freq,
1465 u64 target_freq, u64 max_freq)
1466{
1467 struct ti_sci_msg_req_set_clock_freq req;
1468 struct ti_sci_msg_hdr *resp;
1469 struct ti_sci_info *info;
1470 struct ti_sci_xfer *xfer;
1471 int ret = 0;
1472
1473 if (IS_ERR(handle))
1474 return PTR_ERR(handle);
1475 if (!handle)
1476 return -EINVAL;
1477
1478 info = handle_to_ti_sci_info(handle);
1479
1480 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1481 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1482 (u32 *)&req, sizeof(req), sizeof(*resp));
1483 if (IS_ERR(xfer)) {
1484 ret = PTR_ERR(xfer);
1485 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1486 return ret;
1487 }
1488 req.dev_id = dev_id;
1489 req.clk_id = clk_id;
1490 req.min_freq_hz = min_freq;
1491 req.target_freq_hz = target_freq;
1492 req.max_freq_hz = max_freq;
1493
1494 ret = ti_sci_do_xfer(info, xfer);
1495 if (ret) {
1496 dev_err(info->dev, "Mbox send fail %d\n", ret);
1497 return ret;
1498 }
1499
1500 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1501
1502 if (!ti_sci_is_response_ack(resp))
1503 return -ENODEV;
1504
1505 return ret;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1520 u32 dev_id, u8 clk_id, u64 *freq)
1521{
1522 struct ti_sci_msg_resp_get_clock_freq *resp;
1523 struct ti_sci_msg_req_get_clock_freq req;
1524 struct ti_sci_info *info;
1525 struct ti_sci_xfer *xfer;
1526 int ret = 0;
1527
1528 if (IS_ERR(handle))
1529 return PTR_ERR(handle);
1530 if (!handle || !freq)
1531 return -EINVAL;
1532
1533 info = handle_to_ti_sci_info(handle);
1534
1535 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1536 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1537 (u32 *)&req, sizeof(req), sizeof(*resp));
1538 if (IS_ERR(xfer)) {
1539 ret = PTR_ERR(xfer);
1540 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1541 return ret;
1542 }
1543 req.dev_id = dev_id;
1544 req.clk_id = clk_id;
1545
1546 ret = ti_sci_do_xfer(info, xfer);
1547 if (ret) {
1548 dev_err(info->dev, "Mbox send fail %d\n", ret);
1549 return ret;
1550 }
1551
1552 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1553
1554 if (!ti_sci_is_response_ack(resp))
1555 ret = -ENODEV;
1556 else
1557 *freq = resp->freq_hz;
1558
1559 return ret;
1560}
1561
1562
1563
1564
1565
1566
1567
1568static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1569{
1570 struct ti_sci_msg_req_reboot req;
1571 struct ti_sci_msg_hdr *resp;
1572 struct ti_sci_info *info;
1573 struct ti_sci_xfer *xfer;
1574 int ret = 0;
1575
1576 if (IS_ERR(handle))
1577 return PTR_ERR(handle);
1578 if (!handle)
1579 return -EINVAL;
1580
1581 info = handle_to_ti_sci_info(handle);
1582
1583 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1584 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1585 (u32 *)&req, sizeof(req), sizeof(*resp));
1586 if (IS_ERR(xfer)) {
1587 ret = PTR_ERR(xfer);
1588 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1589 return ret;
1590 }
1591
1592 ret = ti_sci_do_xfer(info, xfer);
1593 if (ret) {
1594 dev_err(info->dev, "Mbox send fail %d\n", ret);
1595 return ret;
1596 }
1597
1598 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1599
1600 if (!ti_sci_is_response_ack(resp))
1601 return -ENODEV;
1602
1603 return ret;
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1621 u32 dev_id, u8 subtype, u8 s_host,
1622 u16 *range_start, u16 *range_num)
1623{
1624 struct ti_sci_msg_resp_get_resource_range *resp;
1625 struct ti_sci_msg_req_get_resource_range req;
1626 struct ti_sci_xfer *xfer;
1627 struct ti_sci_info *info;
1628 int ret = 0;
1629
1630 if (IS_ERR(handle))
1631 return PTR_ERR(handle);
1632 if (!handle)
1633 return -EINVAL;
1634
1635 info = handle_to_ti_sci_info(handle);
1636
1637 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1638 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1639 (u32 *)&req, sizeof(req), sizeof(*resp));
1640 if (IS_ERR(xfer)) {
1641 ret = PTR_ERR(xfer);
1642 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1643 return ret;
1644 }
1645
1646 req.secondary_host = s_host;
1647 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1648 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1649
1650 ret = ti_sci_do_xfer(info, xfer);
1651 if (ret) {
1652 dev_err(info->dev, "Mbox send fail %d\n", ret);
1653 goto fail;
1654 }
1655
1656 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1657 if (!ti_sci_is_response_ack(resp)) {
1658 ret = -ENODEV;
1659 } else if (!resp->range_start && !resp->range_num) {
1660 ret = -ENODEV;
1661 } else {
1662 *range_start = resp->range_start;
1663 *range_num = resp->range_num;
1664 };
1665
1666fail:
1667 return ret;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1683 u32 dev_id, u8 subtype,
1684 u16 *range_start, u16 *range_num)
1685{
1686 return ti_sci_get_resource_range(handle, dev_id, subtype,
1687 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1688 range_start, range_num);
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static
1705int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1706 u32 dev_id, u8 subtype, u8 s_host,
1707 u16 *range_start, u16 *range_num)
1708{
1709 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1710 range_start, range_num);
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1722 u64 *msmc_start, u64 *msmc_end)
1723{
1724 struct ti_sci_msg_resp_query_msmc *resp;
1725 struct ti_sci_msg_hdr req;
1726 struct ti_sci_info *info;
1727 struct ti_sci_xfer *xfer;
1728 int ret = 0;
1729
1730 if (IS_ERR(handle))
1731 return PTR_ERR(handle);
1732 if (!handle)
1733 return -EINVAL;
1734
1735 info = handle_to_ti_sci_info(handle);
1736
1737 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1738 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1739 (u32 *)&req, sizeof(req), sizeof(*resp));
1740 if (IS_ERR(xfer)) {
1741 ret = PTR_ERR(xfer);
1742 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1743 return ret;
1744 }
1745
1746 ret = ti_sci_do_xfer(info, xfer);
1747 if (ret) {
1748 dev_err(info->dev, "Mbox send fail %d\n", ret);
1749 return ret;
1750 }
1751
1752 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1753
1754 if (!ti_sci_is_response_ack(resp))
1755 return -ENODEV;
1756
1757 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1758 resp->msmc_start_low;
1759 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1760 resp->msmc_end_low;
1761
1762 return ret;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1773 u8 proc_id)
1774{
1775 struct ti_sci_msg_req_proc_request req;
1776 struct ti_sci_msg_hdr *resp;
1777 struct ti_sci_info *info;
1778 struct ti_sci_xfer *xfer;
1779 int ret = 0;
1780
1781 if (IS_ERR(handle))
1782 return PTR_ERR(handle);
1783 if (!handle)
1784 return -EINVAL;
1785
1786 info = handle_to_ti_sci_info(handle);
1787
1788 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1789 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1790 (u32 *)&req, sizeof(req), sizeof(*resp));
1791 if (IS_ERR(xfer)) {
1792 ret = PTR_ERR(xfer);
1793 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1794 return ret;
1795 }
1796 req.processor_id = proc_id;
1797
1798 ret = ti_sci_do_xfer(info, xfer);
1799 if (ret) {
1800 dev_err(info->dev, "Mbox send fail %d\n", ret);
1801 return ret;
1802 }
1803
1804 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1805
1806 if (!ti_sci_is_response_ack(resp))
1807 ret = -ENODEV;
1808
1809 return ret;
1810}
1811
1812
1813
1814
1815
1816
1817
1818
1819static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1820 u8 proc_id)
1821{
1822 struct ti_sci_msg_req_proc_release req;
1823 struct ti_sci_msg_hdr *resp;
1824 struct ti_sci_info *info;
1825 struct ti_sci_xfer *xfer;
1826 int ret = 0;
1827
1828 if (IS_ERR(handle))
1829 return PTR_ERR(handle);
1830 if (!handle)
1831 return -EINVAL;
1832
1833 info = handle_to_ti_sci_info(handle);
1834
1835 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1836 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1837 (u32 *)&req, sizeof(req), sizeof(*resp));
1838 if (IS_ERR(xfer)) {
1839 ret = PTR_ERR(xfer);
1840 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1841 return ret;
1842 }
1843 req.processor_id = proc_id;
1844
1845 ret = ti_sci_do_xfer(info, xfer);
1846 if (ret) {
1847 dev_err(info->dev, "Mbox send fail %d\n", ret);
1848 return ret;
1849 }
1850
1851 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1852
1853 if (!ti_sci_is_response_ack(resp))
1854 ret = -ENODEV;
1855
1856 return ret;
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1870 u8 proc_id, u8 host_id)
1871{
1872 struct ti_sci_msg_req_proc_handover req;
1873 struct ti_sci_msg_hdr *resp;
1874 struct ti_sci_info *info;
1875 struct ti_sci_xfer *xfer;
1876 int ret = 0;
1877
1878 if (IS_ERR(handle))
1879 return PTR_ERR(handle);
1880 if (!handle)
1881 return -EINVAL;
1882
1883 info = handle_to_ti_sci_info(handle);
1884
1885 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1886 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1887 (u32 *)&req, sizeof(req), sizeof(*resp));
1888 if (IS_ERR(xfer)) {
1889 ret = PTR_ERR(xfer);
1890 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1891 return ret;
1892 }
1893 req.processor_id = proc_id;
1894 req.host_id = host_id;
1895
1896 ret = ti_sci_do_xfer(info, xfer);
1897 if (ret) {
1898 dev_err(info->dev, "Mbox send fail %d\n", ret);
1899 return ret;
1900 }
1901
1902 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1903
1904 if (!ti_sci_is_response_ack(resp))
1905 ret = -ENODEV;
1906
1907 return ret;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1921 u8 proc_id, u64 bootvector,
1922 u32 config_flags_set,
1923 u32 config_flags_clear)
1924{
1925 struct ti_sci_msg_req_set_proc_boot_config req;
1926 struct ti_sci_msg_hdr *resp;
1927 struct ti_sci_info *info;
1928 struct ti_sci_xfer *xfer;
1929 int ret = 0;
1930
1931 if (IS_ERR(handle))
1932 return PTR_ERR(handle);
1933 if (!handle)
1934 return -EINVAL;
1935
1936 info = handle_to_ti_sci_info(handle);
1937
1938 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1939 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1940 (u32 *)&req, sizeof(req), sizeof(*resp));
1941 if (IS_ERR(xfer)) {
1942 ret = PTR_ERR(xfer);
1943 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1944 return ret;
1945 }
1946 req.processor_id = proc_id;
1947 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1948 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1949 TISCI_ADDR_HIGH_SHIFT;
1950 req.config_flags_set = config_flags_set;
1951 req.config_flags_clear = config_flags_clear;
1952
1953 ret = ti_sci_do_xfer(info, xfer);
1954 if (ret) {
1955 dev_err(info->dev, "Mbox send fail %d\n", ret);
1956 return ret;
1957 }
1958
1959 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1960
1961 if (!ti_sci_is_response_ack(resp))
1962 ret = -ENODEV;
1963
1964 return ret;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1978 u8 proc_id, u32 control_flags_set,
1979 u32 control_flags_clear)
1980{
1981 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1982 struct ti_sci_msg_hdr *resp;
1983 struct ti_sci_info *info;
1984 struct ti_sci_xfer *xfer;
1985 int ret = 0;
1986
1987 if (IS_ERR(handle))
1988 return PTR_ERR(handle);
1989 if (!handle)
1990 return -EINVAL;
1991
1992 info = handle_to_ti_sci_info(handle);
1993
1994 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1995 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1996 (u32 *)&req, sizeof(req), sizeof(*resp));
1997 if (IS_ERR(xfer)) {
1998 ret = PTR_ERR(xfer);
1999 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2000 return ret;
2001 }
2002 req.processor_id = proc_id;
2003 req.control_flags_set = control_flags_set;
2004 req.control_flags_clear = control_flags_clear;
2005
2006 ret = ti_sci_do_xfer(info, xfer);
2007 if (ret) {
2008 dev_err(info->dev, "Mbox send fail %d\n", ret);
2009 return ret;
2010 }
2011
2012 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2013
2014 if (!ti_sci_is_response_ack(resp))
2015 ret = -ENODEV;
2016
2017 return ret;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
2033 u64 *image_addr, u32 *image_size)
2034{
2035 struct ti_sci_msg_req_proc_auth_boot_image req;
2036 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
2037 struct ti_sci_info *info;
2038 struct ti_sci_xfer *xfer;
2039 int ret = 0;
2040
2041 if (IS_ERR(handle))
2042 return PTR_ERR(handle);
2043 if (!handle)
2044 return -EINVAL;
2045
2046 info = handle_to_ti_sci_info(handle);
2047
2048 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
2049 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2050 (u32 *)&req, sizeof(req), sizeof(*resp));
2051 if (IS_ERR(xfer)) {
2052 ret = PTR_ERR(xfer);
2053 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2054 return ret;
2055 }
2056 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
2057 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
2058 TISCI_ADDR_HIGH_SHIFT;
2059
2060 ret = ti_sci_do_xfer(info, xfer);
2061 if (ret) {
2062 dev_err(info->dev, "Mbox send fail %d\n", ret);
2063 return ret;
2064 }
2065
2066 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
2067
2068 if (!ti_sci_is_response_ack(resp))
2069 return -ENODEV;
2070
2071 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2072 (((u64)resp->image_addr_high <<
2073 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2074 *image_size = resp->image_size;
2075
2076 return ret;
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2087 u8 proc_id, u64 *bv, u32 *cfg_flags,
2088 u32 *ctrl_flags, u32 *sts_flags)
2089{
2090 struct ti_sci_msg_resp_get_proc_boot_status *resp;
2091 struct ti_sci_msg_req_get_proc_boot_status req;
2092 struct ti_sci_info *info;
2093 struct ti_sci_xfer *xfer;
2094 int ret = 0;
2095
2096 if (IS_ERR(handle))
2097 return PTR_ERR(handle);
2098 if (!handle)
2099 return -EINVAL;
2100
2101 info = handle_to_ti_sci_info(handle);
2102
2103 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2104 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2105 (u32 *)&req, sizeof(req), sizeof(*resp));
2106 if (IS_ERR(xfer)) {
2107 ret = PTR_ERR(xfer);
2108 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2109 return ret;
2110 }
2111 req.processor_id = proc_id;
2112
2113 ret = ti_sci_do_xfer(info, xfer);
2114 if (ret) {
2115 dev_err(info->dev, "Mbox send fail %d\n", ret);
2116 return ret;
2117 }
2118
2119 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2120 xfer->tx_message.buf;
2121
2122 if (!ti_sci_is_response_ack(resp))
2123 return -ENODEV;
2124 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2125 (((u64)resp->bootvector_high <<
2126 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2127 *cfg_flags = resp->config_flags;
2128 *ctrl_flags = resp->control_flags;
2129 *sts_flags = resp->status_flags;
2130
2131 return ret;
2132}
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static int
2170ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2171 u8 proc_id,
2172 u8 num_wait_iterations,
2173 u8 num_match_iterations,
2174 u8 delay_per_iteration_us,
2175 u8 delay_before_iterations_us,
2176 u32 status_flags_1_set_all_wait,
2177 u32 status_flags_1_set_any_wait,
2178 u32 status_flags_1_clr_all_wait,
2179 u32 status_flags_1_clr_any_wait)
2180{
2181 struct ti_sci_msg_req_wait_proc_boot_status req;
2182 struct ti_sci_info *info;
2183 struct ti_sci_xfer *xfer;
2184 int ret = 0;
2185
2186 if (IS_ERR(handle))
2187 return PTR_ERR(handle);
2188 if (!handle)
2189 return -EINVAL;
2190
2191 info = handle_to_ti_sci_info(handle);
2192
2193 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2194 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2195 (u32 *)&req, sizeof(req), 0);
2196 if (IS_ERR(xfer)) {
2197 ret = PTR_ERR(xfer);
2198 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2199 return ret;
2200 }
2201 req.processor_id = proc_id;
2202 req.num_wait_iterations = num_wait_iterations;
2203 req.num_match_iterations = num_match_iterations;
2204 req.delay_per_iteration_us = delay_per_iteration_us;
2205 req.delay_before_iterations_us = delay_before_iterations_us;
2206 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2207 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2208 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2209 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2210
2211 ret = ti_sci_do_xfer(info, xfer);
2212 if (ret)
2213 dev_err(info->dev, "Mbox send fail %d\n", ret);
2214
2215 return ret;
2216}
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2229 u8 proc_id)
2230{
2231 int ret;
2232 struct ti_sci_info *info;
2233
2234 if (IS_ERR(handle))
2235 return PTR_ERR(handle);
2236 if (!handle)
2237 return -EINVAL;
2238
2239 info = handle_to_ti_sci_info(handle);
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2251 U8_MAX, 100, U8_MAX, U8_MAX,
2252 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2253 0, 0);
2254 if (ret) {
2255 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2256 proc_id, ret);
2257 return ret;
2258 }
2259
2260
2261
2262
2263
2264 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2265 MSG_DEVICE_SW_STATE_AUTO_OFF);
2266 if (ret)
2267 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2268 proc_id, ret);
2269
2270 return ret;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2291 u32 valid_params, u16 nav_id, u16 index,
2292 u32 addr_lo, u32 addr_hi, u32 count,
2293 u8 mode, u8 size, u8 order_id)
2294{
2295 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2296 struct ti_sci_msg_rm_ring_cfg_req req;
2297 struct ti_sci_xfer *xfer;
2298 struct ti_sci_info *info;
2299 int ret = 0;
2300
2301 if (IS_ERR(handle))
2302 return PTR_ERR(handle);
2303 if (!handle)
2304 return -EINVAL;
2305
2306 info = handle_to_ti_sci_info(handle);
2307
2308 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2309 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2310 (u32 *)&req, sizeof(req), sizeof(*resp));
2311 if (IS_ERR(xfer)) {
2312 ret = PTR_ERR(xfer);
2313 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2314 return ret;
2315 }
2316 req.valid_params = valid_params;
2317 req.nav_id = nav_id;
2318 req.index = index;
2319 req.addr_lo = addr_lo;
2320 req.addr_hi = addr_hi;
2321 req.count = count;
2322 req.mode = mode;
2323 req.size = size;
2324 req.order_id = order_id;
2325
2326 ret = ti_sci_do_xfer(info, xfer);
2327 if (ret) {
2328 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2329 goto fail;
2330 }
2331
2332 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2333
2334 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2335
2336fail:
2337 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2338 return ret;
2339}
2340
2341static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2342 u32 nav_id, u32 src_thread, u32 dst_thread)
2343{
2344 struct ti_sci_msg_hdr *resp;
2345 struct ti_sci_msg_psil_pair req;
2346 struct ti_sci_xfer *xfer;
2347 struct ti_sci_info *info;
2348 int ret = 0;
2349
2350 if (IS_ERR(handle))
2351 return PTR_ERR(handle);
2352 if (!handle)
2353 return -EINVAL;
2354
2355 info = handle_to_ti_sci_info(handle);
2356
2357 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2358 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2359 (u32 *)&req, sizeof(req), sizeof(*resp));
2360 if (IS_ERR(xfer)) {
2361 ret = PTR_ERR(xfer);
2362 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2363 return ret;
2364 }
2365 req.nav_id = nav_id;
2366 req.src_thread = src_thread;
2367 req.dst_thread = dst_thread;
2368
2369 ret = ti_sci_do_xfer(info, xfer);
2370 if (ret) {
2371 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2372 goto fail;
2373 }
2374
2375 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2376 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2377
2378fail:
2379 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2380 nav_id, src_thread, dst_thread, ret);
2381 return ret;
2382}
2383
2384static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2385 u32 nav_id, u32 src_thread, u32 dst_thread)
2386{
2387 struct ti_sci_msg_hdr *resp;
2388 struct ti_sci_msg_psil_unpair req;
2389 struct ti_sci_xfer *xfer;
2390 struct ti_sci_info *info;
2391 int ret = 0;
2392
2393 if (IS_ERR(handle))
2394 return PTR_ERR(handle);
2395 if (!handle)
2396 return -EINVAL;
2397
2398 info = handle_to_ti_sci_info(handle);
2399
2400 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2401 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2402 (u32 *)&req, sizeof(req), sizeof(*resp));
2403 if (IS_ERR(xfer)) {
2404 ret = PTR_ERR(xfer);
2405 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2406 return ret;
2407 }
2408 req.nav_id = nav_id;
2409 req.src_thread = src_thread;
2410 req.dst_thread = dst_thread;
2411
2412 ret = ti_sci_do_xfer(info, xfer);
2413 if (ret) {
2414 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2415 goto fail;
2416 }
2417
2418 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2419 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2420
2421fail:
2422 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2423 src_thread, dst_thread, ret);
2424 return ret;
2425}
2426
2427static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2428 const struct ti_sci_handle *handle,
2429 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2430{
2431 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2432 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2433 struct ti_sci_xfer *xfer;
2434 struct ti_sci_info *info;
2435 int ret = 0;
2436
2437 if (IS_ERR(handle))
2438 return PTR_ERR(handle);
2439 if (!handle)
2440 return -EINVAL;
2441
2442 info = handle_to_ti_sci_info(handle);
2443
2444 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2445 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2446 (u32 *)&req, sizeof(req), sizeof(*resp));
2447 if (IS_ERR(xfer)) {
2448 ret = PTR_ERR(xfer);
2449 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2450 return ret;
2451 }
2452 req.valid_params = params->valid_params;
2453 req.nav_id = params->nav_id;
2454 req.index = params->index;
2455 req.tx_pause_on_err = params->tx_pause_on_err;
2456 req.tx_filt_einfo = params->tx_filt_einfo;
2457 req.tx_filt_pswords = params->tx_filt_pswords;
2458 req.tx_atype = params->tx_atype;
2459 req.tx_chan_type = params->tx_chan_type;
2460 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2461 req.tx_fetch_size = params->tx_fetch_size;
2462 req.tx_credit_count = params->tx_credit_count;
2463 req.txcq_qnum = params->txcq_qnum;
2464 req.tx_priority = params->tx_priority;
2465 req.tx_qos = params->tx_qos;
2466 req.tx_orderid = params->tx_orderid;
2467 req.fdepth = params->fdepth;
2468 req.tx_sched_priority = params->tx_sched_priority;
2469
2470 ret = ti_sci_do_xfer(info, xfer);
2471 if (ret) {
2472 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2473 goto fail;
2474 }
2475
2476 resp =
2477 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2478 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2479
2480fail:
2481 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2482 return ret;
2483}
2484
2485static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2486 const struct ti_sci_handle *handle,
2487 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2488{
2489 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2490 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2491 struct ti_sci_xfer *xfer;
2492 struct ti_sci_info *info;
2493 int ret = 0;
2494
2495 if (IS_ERR(handle))
2496 return PTR_ERR(handle);
2497 if (!handle)
2498 return -EINVAL;
2499
2500 info = handle_to_ti_sci_info(handle);
2501
2502 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2503 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2504 (u32 *)&req, sizeof(req), sizeof(*resp));
2505 if (IS_ERR(xfer)) {
2506 ret = PTR_ERR(xfer);
2507 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2508 return ret;
2509 }
2510
2511 req.valid_params = params->valid_params;
2512 req.nav_id = params->nav_id;
2513 req.index = params->index;
2514 req.rx_fetch_size = params->rx_fetch_size;
2515 req.rxcq_qnum = params->rxcq_qnum;
2516 req.rx_priority = params->rx_priority;
2517 req.rx_qos = params->rx_qos;
2518 req.rx_orderid = params->rx_orderid;
2519 req.rx_sched_priority = params->rx_sched_priority;
2520 req.flowid_start = params->flowid_start;
2521 req.flowid_cnt = params->flowid_cnt;
2522 req.rx_pause_on_err = params->rx_pause_on_err;
2523 req.rx_atype = params->rx_atype;
2524 req.rx_chan_type = params->rx_chan_type;
2525 req.rx_ignore_short = params->rx_ignore_short;
2526 req.rx_ignore_long = params->rx_ignore_long;
2527
2528 ret = ti_sci_do_xfer(info, xfer);
2529 if (ret) {
2530 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2531 goto fail;
2532 }
2533
2534 resp =
2535 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2536 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2537
2538fail:
2539 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2540 return ret;
2541}
2542
2543static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2544 const struct ti_sci_handle *handle,
2545 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2546{
2547 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2548 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2549 struct ti_sci_xfer *xfer;
2550 struct ti_sci_info *info;
2551 int ret = 0;
2552
2553 if (IS_ERR(handle))
2554 return PTR_ERR(handle);
2555 if (!handle)
2556 return -EINVAL;
2557
2558 info = handle_to_ti_sci_info(handle);
2559
2560 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2561 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2562 (u32 *)&req, sizeof(req), sizeof(*resp));
2563 if (IS_ERR(xfer)) {
2564 ret = PTR_ERR(xfer);
2565 dev_err(info->dev, "RX_FL_CFG: Message alloc failed(%d)\n",
2566 ret);
2567 return ret;
2568 }
2569
2570 req.valid_params = params->valid_params;
2571 req.nav_id = params->nav_id;
2572 req.flow_index = params->flow_index;
2573 req.rx_einfo_present = params->rx_einfo_present;
2574 req.rx_psinfo_present = params->rx_psinfo_present;
2575 req.rx_error_handling = params->rx_error_handling;
2576 req.rx_desc_type = params->rx_desc_type;
2577 req.rx_sop_offset = params->rx_sop_offset;
2578 req.rx_dest_qnum = params->rx_dest_qnum;
2579 req.rx_src_tag_hi = params->rx_src_tag_hi;
2580 req.rx_src_tag_lo = params->rx_src_tag_lo;
2581 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2582 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2583 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2584 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2585 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2586 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2587 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2588 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2589 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2590 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2591 req.rx_ps_location = params->rx_ps_location;
2592
2593 ret = ti_sci_do_xfer(info, xfer);
2594 if (ret) {
2595 dev_err(info->dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2596 goto fail;
2597 }
2598
2599 resp =
2600 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2601 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2602
2603fail:
2604 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2605 return ret;
2606}
2607
2608
2609
2610
2611
2612
2613
2614
2615static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2616 const struct ti_sci_msg_fwl_region *region)
2617{
2618 struct ti_sci_msg_fwl_set_firewall_region_req req;
2619 struct ti_sci_msg_hdr *resp;
2620 struct ti_sci_info *info;
2621 struct ti_sci_xfer *xfer;
2622 int ret = 0;
2623
2624 if (IS_ERR(handle))
2625 return PTR_ERR(handle);
2626 if (!handle)
2627 return -EINVAL;
2628
2629 info = handle_to_ti_sci_info(handle);
2630
2631 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2633 (u32 *)&req, sizeof(req), sizeof(*resp));
2634 if (IS_ERR(xfer)) {
2635 ret = PTR_ERR(xfer);
2636 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2637 return ret;
2638 }
2639
2640 req.fwl_id = region->fwl_id;
2641 req.region = region->region;
2642 req.n_permission_regs = region->n_permission_regs;
2643 req.control = region->control;
2644 req.permissions[0] = region->permissions[0];
2645 req.permissions[1] = region->permissions[1];
2646 req.permissions[2] = region->permissions[2];
2647 req.start_address = region->start_address;
2648 req.end_address = region->end_address;
2649
2650 ret = ti_sci_do_xfer(info, xfer);
2651 if (ret) {
2652 dev_err(info->dev, "Mbox send fail %d\n", ret);
2653 return ret;
2654 }
2655
2656 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2657
2658 if (!ti_sci_is_response_ack(resp))
2659 return -ENODEV;
2660
2661 return 0;
2662}
2663
2664
2665
2666
2667
2668
2669
2670
2671static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2672 struct ti_sci_msg_fwl_region *region)
2673{
2674 struct ti_sci_msg_fwl_get_firewall_region_req req;
2675 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2676 struct ti_sci_info *info;
2677 struct ti_sci_xfer *xfer;
2678 int ret = 0;
2679
2680 if (IS_ERR(handle))
2681 return PTR_ERR(handle);
2682 if (!handle)
2683 return -EINVAL;
2684
2685 info = handle_to_ti_sci_info(handle);
2686
2687 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2688 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2689 (u32 *)&req, sizeof(req), sizeof(*resp));
2690 if (IS_ERR(xfer)) {
2691 ret = PTR_ERR(xfer);
2692 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2693 return ret;
2694 }
2695
2696 req.fwl_id = region->fwl_id;
2697 req.region = region->region;
2698 req.n_permission_regs = region->n_permission_regs;
2699
2700 ret = ti_sci_do_xfer(info, xfer);
2701 if (ret) {
2702 dev_err(info->dev, "Mbox send fail %d\n", ret);
2703 return ret;
2704 }
2705
2706 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2707
2708 if (!ti_sci_is_response_ack(resp))
2709 return -ENODEV;
2710
2711 region->fwl_id = resp->fwl_id;
2712 region->region = resp->region;
2713 region->n_permission_regs = resp->n_permission_regs;
2714 region->control = resp->control;
2715 region->permissions[0] = resp->permissions[0];
2716 region->permissions[1] = resp->permissions[1];
2717 region->permissions[2] = resp->permissions[2];
2718 region->start_address = resp->start_address;
2719 region->end_address = resp->end_address;
2720
2721 return 0;
2722}
2723
2724
2725
2726
2727
2728
2729
2730
2731static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2732 struct ti_sci_msg_fwl_owner *owner)
2733{
2734 struct ti_sci_msg_fwl_change_owner_info_req req;
2735 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2736 struct ti_sci_info *info;
2737 struct ti_sci_xfer *xfer;
2738 int ret = 0;
2739
2740 if (IS_ERR(handle))
2741 return PTR_ERR(handle);
2742 if (!handle)
2743 return -EINVAL;
2744
2745 info = handle_to_ti_sci_info(handle);
2746
2747 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2748 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2749 (u32 *)&req, sizeof(req), sizeof(*resp));
2750 if (IS_ERR(xfer)) {
2751 ret = PTR_ERR(xfer);
2752 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2753 return ret;
2754 }
2755
2756 req.fwl_id = owner->fwl_id;
2757 req.region = owner->region;
2758 req.owner_index = owner->owner_index;
2759
2760 ret = ti_sci_do_xfer(info, xfer);
2761 if (ret) {
2762 dev_err(info->dev, "Mbox send fail %d\n", ret);
2763 return ret;
2764 }
2765
2766 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2767
2768 if (!ti_sci_is_response_ack(resp))
2769 return -ENODEV;
2770
2771 owner->fwl_id = resp->fwl_id;
2772 owner->region = resp->region;
2773 owner->owner_index = resp->owner_index;
2774 owner->owner_privid = resp->owner_privid;
2775 owner->owner_permission_bits = resp->owner_permission_bits;
2776
2777 return ret;
2778}
2779
2780
2781
2782
2783
2784static void ti_sci_setup_ops(struct ti_sci_info *info)
2785{
2786 struct ti_sci_ops *ops = &info->handle.ops;
2787 struct ti_sci_board_ops *bops = &ops->board_ops;
2788 struct ti_sci_dev_ops *dops = &ops->dev_ops;
2789 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2790 struct ti_sci_core_ops *core_ops = &ops->core_ops;
2791 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2792 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2793 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2794 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2795 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2796 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
2797
2798 bops->board_config = ti_sci_cmd_set_board_config;
2799 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2800 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2801 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
2802
2803 dops->get_device = ti_sci_cmd_get_device;
2804 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2805 dops->idle_device = ti_sci_cmd_idle_device;
2806 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2807 dops->put_device = ti_sci_cmd_put_device;
2808 dops->is_valid = ti_sci_cmd_dev_is_valid;
2809 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2810 dops->is_idle = ti_sci_cmd_dev_is_idle;
2811 dops->is_stop = ti_sci_cmd_dev_is_stop;
2812 dops->is_on = ti_sci_cmd_dev_is_on;
2813 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2814 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2815 dops->get_device_resets = ti_sci_cmd_get_device_resets;
2816 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
2817
2818 cops->get_clock = ti_sci_cmd_get_clock;
2819 cops->idle_clock = ti_sci_cmd_idle_clock;
2820 cops->put_clock = ti_sci_cmd_put_clock;
2821 cops->is_auto = ti_sci_cmd_clk_is_auto;
2822 cops->is_on = ti_sci_cmd_clk_is_on;
2823 cops->is_off = ti_sci_cmd_clk_is_off;
2824
2825 cops->set_parent = ti_sci_cmd_clk_set_parent;
2826 cops->get_parent = ti_sci_cmd_clk_get_parent;
2827 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2828
2829 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2830 cops->set_freq = ti_sci_cmd_clk_set_freq;
2831 cops->get_freq = ti_sci_cmd_clk_get_freq;
2832
2833 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2834 core_ops->query_msmc = ti_sci_cmd_query_msmc;
2835
2836 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2837 rm_core_ops->get_range_from_shost =
2838 ti_sci_cmd_get_resource_range_from_shost;
2839
2840 pops->proc_request = ti_sci_cmd_proc_request;
2841 pops->proc_release = ti_sci_cmd_proc_release;
2842 pops->proc_handover = ti_sci_cmd_proc_handover;
2843 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2844 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2845 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2846 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
2847 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
2848
2849 rops->config = ti_sci_cmd_ring_config;
2850
2851 psilops->pair = ti_sci_cmd_rm_psil_pair;
2852 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2853
2854 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2855 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2856 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2857
2858 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2859 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2860 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870const
2871struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2872{
2873 if (!sci_dev)
2874 return ERR_PTR(-EINVAL);
2875
2876 struct ti_sci_info *info = dev_get_priv(sci_dev);
2877
2878 if (!info)
2879 return ERR_PTR(-EINVAL);
2880
2881 struct ti_sci_handle *handle = &info->handle;
2882
2883 if (!handle)
2884 return ERR_PTR(-EINVAL);
2885
2886 return handle;
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2897{
2898 if (!dev)
2899 return ERR_PTR(-EINVAL);
2900
2901 struct udevice *sci_dev = dev_get_parent(dev);
2902
2903 return ti_sci_get_handle_from_sysfw(sci_dev);
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2914 const char *property)
2915{
2916 struct ti_sci_info *entry, *info = NULL;
2917 u32 phandle, err;
2918 ofnode node;
2919
2920 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2921 if (err)
2922 return ERR_PTR(err);
2923
2924 node = ofnode_get_by_phandle(phandle);
2925 if (!ofnode_valid(node))
2926 return ERR_PTR(-EINVAL);
2927
2928 list_for_each_entry(entry, &ti_sci_list, list)
2929 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2930 info = entry;
2931 break;
2932 }
2933
2934 if (!info)
2935 return ERR_PTR(-ENODEV);
2936
2937 return &info->handle;
2938}
2939
2940
2941
2942
2943
2944
2945
2946
2947static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2948{
2949 int ret;
2950
2951 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2952 if (ret) {
2953 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2954 __func__, ret);
2955 return ret;
2956 }
2957
2958 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2959 if (ret) {
2960 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2961 __func__, ret);
2962 return ret;
2963 }
2964
2965
2966 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2967 if (ret) {
2968 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2969 __func__, ret);
2970 }
2971
2972 info->host_id = dev_read_u32_default(dev, "ti,host-id",
2973 info->desc->default_host_id);
2974
2975 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2976
2977 return 0;
2978}
2979
2980
2981
2982
2983
2984
2985
2986static int ti_sci_probe(struct udevice *dev)
2987{
2988 struct ti_sci_info *info;
2989 int ret;
2990
2991 debug("%s(dev=%p)\n", __func__, dev);
2992
2993 info = dev_get_priv(dev);
2994 info->desc = (void *)dev_get_driver_data(dev);
2995
2996 ret = ti_sci_of_to_info(dev, info);
2997 if (ret) {
2998 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2999 return ret;
3000 }
3001
3002 info->dev = dev;
3003 info->seq = 0xA;
3004
3005 list_add_tail(&info->list, &ti_sci_list);
3006 ti_sci_setup_ops(info);
3007
3008 ret = ti_sci_cmd_get_revision(&info->handle);
3009
3010 INIT_LIST_HEAD(&info->dev_list);
3011
3012 return ret;
3013}
3014
3015
3016
3017
3018
3019
3020
3021u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3022{
3023 u16 set, free_bit;
3024
3025 for (set = 0; set < res->sets; set++) {
3026 free_bit = find_first_zero_bit(res->desc[set].res_map,
3027 res->desc[set].num);
3028 if (free_bit != res->desc[set].num) {
3029 set_bit(free_bit, res->desc[set].res_map);
3030 return res->desc[set].start + free_bit;
3031 }
3032 }
3033
3034 return TI_SCI_RESOURCE_NULL;
3035}
3036
3037
3038
3039
3040
3041void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3042{
3043 u16 set;
3044
3045 for (set = 0; set < res->sets; set++) {
3046 if (res->desc[set].start <= id &&
3047 (res->desc[set].num + res->desc[set].start) > id)
3048 clear_bit(id - res->desc[set].start,
3049 res->desc[set].res_map);
3050 }
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067struct ti_sci_resource *
3068devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3069 struct udevice *dev, u32 dev_id, char *of_prop)
3070{
3071 u32 resource_subtype;
3072 struct ti_sci_resource *res;
3073 bool valid_set = false;
3074 int sets, i, ret;
3075 u32 *temp;
3076
3077 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3078 if (!res)
3079 return ERR_PTR(-ENOMEM);
3080
3081 sets = dev_read_size(dev, of_prop);
3082 if (sets < 0) {
3083 dev_err(dev, "%s resource type ids not available\n", of_prop);
3084 return ERR_PTR(sets);
3085 }
3086 temp = malloc(sets);
3087 sets /= sizeof(u32);
3088 res->sets = sets;
3089
3090 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3091 GFP_KERNEL);
3092 if (!res->desc)
3093 return ERR_PTR(-ENOMEM);
3094
3095 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3096 if (ret)
3097 return ERR_PTR(-EINVAL);
3098
3099 for (i = 0; i < res->sets; i++) {
3100 resource_subtype = temp[i];
3101 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3102 resource_subtype,
3103 &res->desc[i].start,
3104 &res->desc[i].num);
3105 if (ret) {
3106 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
3107 dev_id, resource_subtype,
3108 handle_to_ti_sci_info(handle)->host_id);
3109 res->desc[i].start = 0;
3110 res->desc[i].num = 0;
3111 continue;
3112 }
3113
3114 valid_set = true;
3115 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
3116 dev_id, resource_subtype, res->desc[i].start,
3117 res->desc[i].num);
3118
3119 res->desc[i].res_map =
3120 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3121 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3122 if (!res->desc[i].res_map)
3123 return ERR_PTR(-ENOMEM);
3124 }
3125
3126 if (valid_set)
3127 return res;
3128
3129 return ERR_PTR(-EINVAL);
3130}
3131
3132
3133static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3134 .default_host_id = 2,
3135
3136 .max_rx_timeout_ms = 10000,
3137
3138 .max_msgs = 20,
3139 .max_msg_size = 64,
3140};
3141
3142
3143static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3144 .default_host_id = 12,
3145
3146 .max_rx_timeout_ms = 10000,
3147
3148 .max_msgs = 20,
3149 .max_msg_size = 60,
3150};
3151
3152static const struct udevice_id ti_sci_ids[] = {
3153 {
3154 .compatible = "ti,k2g-sci",
3155 .data = (ulong)&ti_sci_pmmc_k2g_desc
3156 },
3157 {
3158 .compatible = "ti,am654-sci",
3159 .data = (ulong)&ti_sci_pmmc_am654_desc
3160 },
3161 { },
3162};
3163
3164U_BOOT_DRIVER(ti_sci) = {
3165 .name = "ti_sci",
3166 .id = UCLASS_FIRMWARE,
3167 .of_match = ti_sci_ids,
3168 .probe = ti_sci_probe,
3169 .priv_auto = sizeof(struct ti_sci_info),
3170};
3171