1
2
3
4
5
6
7
8
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <log.h>
14#include <mailbox.h>
15#include <malloc.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <linux/bitops.h>
20#include <linux/compat.h>
21#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
26#include "ti_sci_static_data.h"
27
28
29static LIST_HEAD(ti_sci_list);
30
31
32
33
34
35
36struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 u8 rx_len;
39};
40
41
42
43
44
45
46
47
48
49
50
51
52struct ti_sci_rm_type_map {
53 u32 dev_id;
54 u16 type;
55};
56
57
58
59
60
61
62
63
64
65struct ti_sci_desc {
66 u8 default_host_id;
67 int max_rx_timeout_ms;
68 int max_msgs;
69 int max_msg_size;
70};
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85struct ti_sci_info {
86 struct udevice *dev;
87 const struct ti_sci_desc *desc;
88 struct ti_sci_handle handle;
89 struct mbox_chan chan_tx;
90 struct mbox_chan chan_rx;
91 struct mbox_chan chan_notify;
92 struct ti_sci_xfer xfer;
93 struct list_head list;
94 struct list_head dev_list;
95 bool is_secure;
96 u8 host_id;
97 u8 seq;
98};
99
100struct ti_sci_exclusive_dev {
101 u32 id;
102 u32 count;
103 struct list_head list;
104};
105
106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
125 u16 msg_type, u32 msg_flags,
126 u32 *buf,
127 size_t tx_message_size,
128 size_t rx_message_size)
129{
130 struct ti_sci_xfer *xfer = &info->xfer;
131 struct ti_sci_msg_hdr *hdr;
132
133
134 if (rx_message_size > info->desc->max_msg_size ||
135 tx_message_size > info->desc->max_msg_size ||
136 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
137 tx_message_size < sizeof(*hdr))
138 return ERR_PTR(-ERANGE);
139
140 info->seq = ~info->seq;
141 xfer->tx_message.buf = buf;
142 xfer->tx_message.len = tx_message_size;
143 xfer->rx_len = (u8)rx_message_size;
144
145 hdr = (struct ti_sci_msg_hdr *)buf;
146 hdr->seq = info->seq;
147 hdr->type = msg_type;
148 hdr->host = info->host_id;
149 hdr->flags = msg_flags;
150
151 return xfer;
152}
153
154
155
156
157
158
159
160
161
162
163
164static inline int ti_sci_get_response(struct ti_sci_info *info,
165 struct ti_sci_xfer *xfer,
166 struct mbox_chan *chan)
167{
168 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
169 struct ti_sci_secure_msg_hdr *secure_hdr;
170 struct ti_sci_msg_hdr *hdr;
171 int ret;
172
173
174 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
175 if (ret) {
176 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
177 __func__, ret);
178 return ret;
179 }
180
181
182 if (info->is_secure) {
183 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
184 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
185 }
186
187
188 hdr = (struct ti_sci_msg_hdr *)msg->buf;
189
190
191 if (hdr->seq != info->seq) {
192 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
193 __func__, hdr->seq);
194 return ret;
195 }
196
197 if (msg->len > info->desc->max_msg_size) {
198 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
199 __func__, msg->len, info->desc->max_msg_size);
200 return -EINVAL;
201 }
202
203 if (msg->len < xfer->rx_len) {
204 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
205 __func__, msg->len, xfer->rx_len);
206 }
207
208 return ret;
209}
210
211
212
213
214
215
216
217
218static inline int ti_sci_do_xfer(struct ti_sci_info *info,
219 struct ti_sci_xfer *xfer)
220{
221 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
222 u8 secure_buf[info->desc->max_msg_size];
223 struct ti_sci_secure_msg_hdr secure_hdr;
224 int ret;
225
226 if (info->is_secure) {
227
228 secure_hdr.checksum = 0;
229 secure_hdr.reserved = 0;
230 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
231 xfer->tx_message.len);
232
233 xfer->tx_message.buf = (u32 *)secure_buf;
234 xfer->tx_message.len += sizeof(secure_hdr);
235
236 if (xfer->rx_len)
237 xfer->rx_len += sizeof(secure_hdr);
238 }
239
240
241 ret = mbox_send(&info->chan_tx, msg);
242 if (ret) {
243 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
244 __func__, ret);
245 return ret;
246 }
247
248
249 if (xfer->rx_len)
250 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
251
252 return ret;
253}
254
255
256
257
258
259
260
261
262
263static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
264{
265 struct ti_sci_msg_resp_version *rev_info;
266 struct ti_sci_version_info *ver;
267 struct ti_sci_msg_hdr hdr;
268 struct ti_sci_info *info;
269 struct ti_sci_xfer *xfer;
270 int ret;
271
272 if (IS_ERR(handle))
273 return PTR_ERR(handle);
274 if (!handle)
275 return -EINVAL;
276
277 info = handle_to_ti_sci_info(handle);
278
279 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
280 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
281 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
282 sizeof(*rev_info));
283 if (IS_ERR(xfer)) {
284 ret = PTR_ERR(xfer);
285 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
286 return ret;
287 }
288
289 ret = ti_sci_do_xfer(info, xfer);
290 if (ret) {
291 dev_err(info->dev, "Mbox communication fail %d\n", ret);
292 return ret;
293 }
294
295 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
296
297 ver = &handle->version;
298 ver->abi_major = rev_info->abi_major;
299 ver->abi_minor = rev_info->abi_minor;
300 ver->firmware_revision = rev_info->firmware_revision;
301 strncpy(ver->firmware_description, rev_info->firmware_description,
302 sizeof(ver->firmware_description));
303
304 return 0;
305}
306
307
308
309
310
311
312
313static inline bool ti_sci_is_response_ack(void *r)
314{
315 struct ti_sci_msg_hdr *hdr = r;
316
317 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
318}
319
320
321
322
323
324
325
326
327
328
329
330static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
331 u16 msg_type, u64 addr, u32 size)
332{
333 struct ti_sci_msg_board_config req;
334 struct ti_sci_msg_hdr *resp;
335 struct ti_sci_info *info;
336 struct ti_sci_xfer *xfer;
337 int ret = 0;
338
339 if (IS_ERR(handle))
340 return PTR_ERR(handle);
341 if (!handle)
342 return -EINVAL;
343
344 info = handle_to_ti_sci_info(handle);
345
346 xfer = ti_sci_setup_one_xfer(info, msg_type,
347 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
348 (u32 *)&req, sizeof(req), sizeof(*resp));
349 if (IS_ERR(xfer)) {
350 ret = PTR_ERR(xfer);
351 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
352 return ret;
353 }
354 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
355 req.boardcfgp_low = addr & 0xffffffff;
356 req.boardcfg_size = size;
357
358 ret = ti_sci_do_xfer(info, xfer);
359 if (ret) {
360 dev_err(info->dev, "Mbox send fail %d\n", ret);
361 return ret;
362 }
363
364 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
365
366 if (!ti_sci_is_response_ack(resp))
367 return -ENODEV;
368
369 return ret;
370}
371
372
373
374
375
376
377
378
379
380static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
381 u64 addr, u32 size)
382{
383 return cmd_set_board_config_using_msg(handle,
384 TI_SCI_MSG_BOARD_CONFIG,
385 addr, size);
386}
387
388
389
390
391
392
393
394
395
396
397static
398int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
399 u64 addr, u32 size)
400{
401 return cmd_set_board_config_using_msg(handle,
402 TI_SCI_MSG_BOARD_CONFIG_RM,
403 addr, size);
404}
405
406
407
408
409
410
411
412
413
414
415static
416int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
417 u64 addr, u32 size)
418{
419 return cmd_set_board_config_using_msg(handle,
420 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
421 addr, size);
422}
423
424
425
426
427
428
429
430
431
432
433static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
434 u64 addr, u32 size)
435{
436 return cmd_set_board_config_using_msg(handle,
437 TI_SCI_MSG_BOARD_CONFIG_PM,
438 addr, size);
439}
440
441static struct ti_sci_exclusive_dev
442*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
443{
444 struct ti_sci_exclusive_dev *dev;
445
446 list_for_each_entry(dev, dev_list, list)
447 if (dev->id == id)
448 return dev;
449
450 return NULL;
451}
452
453static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
454{
455 struct ti_sci_exclusive_dev *dev;
456
457 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
458 if (dev) {
459 dev->count++;
460 return;
461 }
462
463 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
464 dev->id = id;
465 dev->count = 1;
466 INIT_LIST_HEAD(&dev->list);
467 list_add_tail(&dev->list, &info->dev_list);
468}
469
470static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
471{
472 struct ti_sci_exclusive_dev *dev;
473
474 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
475 if (!dev)
476 return;
477
478 if (dev->count > 0)
479 dev->count--;
480}
481
482
483
484
485
486
487
488
489
490
491static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
492 u32 id, u32 flags, u8 state)
493{
494 struct ti_sci_msg_req_set_device_state req;
495 struct ti_sci_msg_hdr *resp;
496 struct ti_sci_info *info;
497 struct ti_sci_xfer *xfer;
498 int ret = 0;
499
500 if (IS_ERR(handle))
501 return PTR_ERR(handle);
502 if (!handle)
503 return -EINVAL;
504
505 info = handle_to_ti_sci_info(handle);
506
507 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
508 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
509 (u32 *)&req, sizeof(req), sizeof(*resp));
510 if (IS_ERR(xfer)) {
511 ret = PTR_ERR(xfer);
512 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
513 return ret;
514 }
515 req.id = id;
516 req.state = state;
517
518 ret = ti_sci_do_xfer(info, xfer);
519 if (ret) {
520 dev_err(info->dev, "Mbox send fail %d\n", ret);
521 return ret;
522 }
523
524 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
525
526 if (!ti_sci_is_response_ack(resp))
527 return -ENODEV;
528
529 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
530 ti_sci_delete_exclusive_dev(info, id);
531 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
532 ti_sci_add_exclusive_dev(info, id);
533
534 return ret;
535}
536
537
538
539
540
541
542
543
544
545
546
547static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
548 u32 id, u32 flags, u8 state)
549{
550 struct ti_sci_msg_req_set_device_state req;
551 struct ti_sci_info *info;
552 struct ti_sci_xfer *xfer;
553 int ret = 0;
554
555 if (IS_ERR(handle))
556 return PTR_ERR(handle);
557 if (!handle)
558 return -EINVAL;
559
560 info = handle_to_ti_sci_info(handle);
561
562 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
563 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
564 (u32 *)&req, sizeof(req), 0);
565 if (IS_ERR(xfer)) {
566 ret = PTR_ERR(xfer);
567 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
568 return ret;
569 }
570 req.id = id;
571 req.state = state;
572
573 ret = ti_sci_do_xfer(info, xfer);
574 if (ret)
575 dev_err(info->dev, "Mbox send fail %d\n", ret);
576
577 return ret;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
592 u32 id, u32 *clcnt, u32 *resets,
593 u8 *p_state, u8 *c_state)
594{
595 struct ti_sci_msg_resp_get_device_state *resp;
596 struct ti_sci_msg_req_get_device_state req;
597 struct ti_sci_info *info;
598 struct ti_sci_xfer *xfer;
599 int ret = 0;
600
601 if (IS_ERR(handle))
602 return PTR_ERR(handle);
603 if (!handle)
604 return -EINVAL;
605
606 if (!clcnt && !resets && !p_state && !c_state)
607 return -EINVAL;
608
609 info = handle_to_ti_sci_info(handle);
610
611 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
612 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
613 (u32 *)&req, sizeof(req), sizeof(*resp));
614 if (IS_ERR(xfer)) {
615 ret = PTR_ERR(xfer);
616 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
617 return ret;
618 }
619 req.id = id;
620
621 ret = ti_sci_do_xfer(info, xfer);
622 if (ret) {
623 dev_err(info->dev, "Mbox send fail %d\n", ret);
624 return ret;
625 }
626
627 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
628 if (!ti_sci_is_response_ack(resp))
629 return -ENODEV;
630
631 if (clcnt)
632 *clcnt = resp->context_loss_count;
633 if (resets)
634 *resets = resp->resets;
635 if (p_state)
636 *p_state = resp->programmed_state;
637 if (c_state)
638 *c_state = resp->current_state;
639
640 return ret;
641}
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
657{
658 return ti_sci_set_device_state(handle, id, 0,
659 MSG_DEVICE_SW_STATE_ON);
660}
661
662static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
663 u32 id)
664{
665 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
666 MSG_DEVICE_SW_STATE_ON);
667}
668
669
670
671
672
673
674
675
676
677
678
679
680static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
681{
682 return ti_sci_set_device_state(handle, id,
683 0,
684 MSG_DEVICE_SW_STATE_RETENTION);
685}
686
687static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
688 u32 id)
689{
690 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
691 MSG_DEVICE_SW_STATE_RETENTION);
692}
693
694
695
696
697
698
699
700
701
702
703
704
705static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
706{
707 return ti_sci_set_device_state(handle, id, 0,
708 MSG_DEVICE_SW_STATE_AUTO_OFF);
709}
710
711static
712int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
713{
714 struct ti_sci_exclusive_dev *dev, *tmp;
715 struct ti_sci_info *info;
716 int i, cnt;
717
718 info = handle_to_ti_sci_info(handle);
719
720 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
721 cnt = dev->count;
722 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
723 for (i = 0; i < cnt; i++)
724 ti_sci_cmd_put_device(handle, dev->id);
725 }
726
727 return 0;
728}
729
730
731
732
733
734
735
736
737
738static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
739{
740 u8 unused;
741
742
743 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
744}
745
746
747
748
749
750
751
752
753
754static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
755 u32 *count)
756{
757 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
758}
759
760
761
762
763
764
765
766
767
768static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
769 bool *r_state)
770{
771 int ret;
772 u8 state;
773
774 if (!r_state)
775 return -EINVAL;
776
777 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
778 if (ret)
779 return ret;
780
781 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
782
783 return 0;
784}
785
786
787
788
789
790
791
792
793
794
795static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
796 bool *r_state, bool *curr_state)
797{
798 int ret;
799 u8 p_state, c_state;
800
801 if (!r_state && !curr_state)
802 return -EINVAL;
803
804 ret =
805 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
806 if (ret)
807 return ret;
808
809 if (r_state)
810 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
811 if (curr_state)
812 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
813
814 return 0;
815}
816
817
818
819
820
821
822
823
824
825
826static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
827 bool *r_state, bool *curr_state)
828{
829 int ret;
830 u8 p_state, c_state;
831
832 if (!r_state && !curr_state)
833 return -EINVAL;
834
835 ret =
836 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
837 if (ret)
838 return ret;
839
840 if (r_state)
841 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
842 if (curr_state)
843 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
844
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
857 bool *curr_state)
858{
859 int ret;
860 u8 state;
861
862 if (!curr_state)
863 return -EINVAL;
864
865 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
866 if (ret)
867 return ret;
868
869 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
870
871 return 0;
872}
873
874
875
876
877
878
879
880
881
882
883static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
884 u32 id, u32 reset_state)
885{
886 struct ti_sci_msg_req_set_device_resets req;
887 struct ti_sci_msg_hdr *resp;
888 struct ti_sci_info *info;
889 struct ti_sci_xfer *xfer;
890 int ret = 0;
891
892 if (IS_ERR(handle))
893 return PTR_ERR(handle);
894 if (!handle)
895 return -EINVAL;
896
897 info = handle_to_ti_sci_info(handle);
898
899 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
900 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
901 (u32 *)&req, sizeof(req), sizeof(*resp));
902 if (IS_ERR(xfer)) {
903 ret = PTR_ERR(xfer);
904 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
905 return ret;
906 }
907 req.id = id;
908 req.resets = reset_state;
909
910 ret = ti_sci_do_xfer(info, xfer);
911 if (ret) {
912 dev_err(info->dev, "Mbox send fail %d\n", ret);
913 return ret;
914 }
915
916 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
917
918 if (!ti_sci_is_response_ack(resp))
919 return -ENODEV;
920
921 return ret;
922}
923
924
925
926
927
928
929
930
931
932
933static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
934 u32 id, u32 *reset_state)
935{
936 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
937 NULL);
938}
939
940
941
942
943
944
945
946
947
948
949
950
951
952static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
953 u32 dev_id, u8 clk_id,
954 u32 flags, u8 state)
955{
956 struct ti_sci_msg_req_set_clock_state req;
957 struct ti_sci_msg_hdr *resp;
958 struct ti_sci_info *info;
959 struct ti_sci_xfer *xfer;
960 int ret = 0;
961
962 if (IS_ERR(handle))
963 return PTR_ERR(handle);
964 if (!handle)
965 return -EINVAL;
966
967 info = handle_to_ti_sci_info(handle);
968
969 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
970 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
971 (u32 *)&req, sizeof(req), sizeof(*resp));
972 if (IS_ERR(xfer)) {
973 ret = PTR_ERR(xfer);
974 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
975 return ret;
976 }
977 req.dev_id = dev_id;
978 req.clk_id = clk_id;
979 req.request_state = state;
980
981 ret = ti_sci_do_xfer(info, xfer);
982 if (ret) {
983 dev_err(info->dev, "Mbox send fail %d\n", ret);
984 return ret;
985 }
986
987 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
988
989 if (!ti_sci_is_response_ack(resp))
990 return -ENODEV;
991
992 return ret;
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1008 u32 dev_id, u8 clk_id,
1009 u8 *programmed_state, u8 *current_state)
1010{
1011 struct ti_sci_msg_resp_get_clock_state *resp;
1012 struct ti_sci_msg_req_get_clock_state req;
1013 struct ti_sci_info *info;
1014 struct ti_sci_xfer *xfer;
1015 int ret = 0;
1016
1017 if (IS_ERR(handle))
1018 return PTR_ERR(handle);
1019 if (!handle)
1020 return -EINVAL;
1021
1022 if (!programmed_state && !current_state)
1023 return -EINVAL;
1024
1025 info = handle_to_ti_sci_info(handle);
1026
1027 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1028 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1029 (u32 *)&req, sizeof(req), sizeof(*resp));
1030 if (IS_ERR(xfer)) {
1031 ret = PTR_ERR(xfer);
1032 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1033 return ret;
1034 }
1035 req.dev_id = dev_id;
1036 req.clk_id = clk_id;
1037
1038 ret = ti_sci_do_xfer(info, xfer);
1039 if (ret) {
1040 dev_err(info->dev, "Mbox send fail %d\n", ret);
1041 return ret;
1042 }
1043
1044 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1045
1046 if (!ti_sci_is_response_ack(resp))
1047 return -ENODEV;
1048
1049 if (programmed_state)
1050 *programmed_state = resp->programmed_state;
1051 if (current_state)
1052 *current_state = resp->current_state;
1053
1054 return ret;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1071 u8 clk_id, bool needs_ssc, bool can_change_freq,
1072 bool enable_input_term)
1073{
1074 u32 flags = 0;
1075
1076 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1077 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1078 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1079
1080 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1081 MSG_CLOCK_SW_STATE_REQ);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1097 u32 dev_id, u8 clk_id)
1098{
1099 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1100 MSG_CLOCK_SW_STATE_UNREQ);
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1116 u32 dev_id, u8 clk_id)
1117{
1118 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1119 MSG_CLOCK_SW_STATE_AUTO);
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1134 u32 dev_id, u8 clk_id, bool *req_state)
1135{
1136 u8 state = 0;
1137 int ret;
1138
1139 if (!req_state)
1140 return -EINVAL;
1141
1142 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1143 if (ret)
1144 return ret;
1145
1146 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1147 return 0;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1163 u8 clk_id, bool *req_state, bool *curr_state)
1164{
1165 u8 c_state = 0, r_state = 0;
1166 int ret;
1167
1168 if (!req_state && !curr_state)
1169 return -EINVAL;
1170
1171 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1172 &r_state, &c_state);
1173 if (ret)
1174 return ret;
1175
1176 if (req_state)
1177 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1178 if (curr_state)
1179 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1180 return 0;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1196 u8 clk_id, bool *req_state, bool *curr_state)
1197{
1198 u8 c_state = 0, r_state = 0;
1199 int ret;
1200
1201 if (!req_state && !curr_state)
1202 return -EINVAL;
1203
1204 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1205 &r_state, &c_state);
1206 if (ret)
1207 return ret;
1208
1209 if (req_state)
1210 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1211 if (curr_state)
1212 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1213 return 0;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1228 u32 dev_id, u8 clk_id, u8 parent_id)
1229{
1230 struct ti_sci_msg_req_set_clock_parent req;
1231 struct ti_sci_msg_hdr *resp;
1232 struct ti_sci_info *info;
1233 struct ti_sci_xfer *xfer;
1234 int ret = 0;
1235
1236 if (IS_ERR(handle))
1237 return PTR_ERR(handle);
1238 if (!handle)
1239 return -EINVAL;
1240
1241 info = handle_to_ti_sci_info(handle);
1242
1243 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1244 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1245 (u32 *)&req, sizeof(req), sizeof(*resp));
1246 if (IS_ERR(xfer)) {
1247 ret = PTR_ERR(xfer);
1248 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1249 return ret;
1250 }
1251 req.dev_id = dev_id;
1252 req.clk_id = clk_id;
1253 req.parent_id = parent_id;
1254
1255 ret = ti_sci_do_xfer(info, xfer);
1256 if (ret) {
1257 dev_err(info->dev, "Mbox send fail %d\n", ret);
1258 return ret;
1259 }
1260
1261 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1262
1263 if (!ti_sci_is_response_ack(resp))
1264 return -ENODEV;
1265
1266 return ret;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1281 u32 dev_id, u8 clk_id, u8 *parent_id)
1282{
1283 struct ti_sci_msg_resp_get_clock_parent *resp;
1284 struct ti_sci_msg_req_get_clock_parent req;
1285 struct ti_sci_info *info;
1286 struct ti_sci_xfer *xfer;
1287 int ret = 0;
1288
1289 if (IS_ERR(handle))
1290 return PTR_ERR(handle);
1291 if (!handle || !parent_id)
1292 return -EINVAL;
1293
1294 info = handle_to_ti_sci_info(handle);
1295
1296 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1297 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1298 (u32 *)&req, sizeof(req), sizeof(*resp));
1299 if (IS_ERR(xfer)) {
1300 ret = PTR_ERR(xfer);
1301 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1302 return ret;
1303 }
1304 req.dev_id = dev_id;
1305 req.clk_id = clk_id;
1306
1307 ret = ti_sci_do_xfer(info, xfer);
1308 if (ret) {
1309 dev_err(info->dev, "Mbox send fail %d\n", ret);
1310 return ret;
1311 }
1312
1313 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1314
1315 if (!ti_sci_is_response_ack(resp))
1316 ret = -ENODEV;
1317 else
1318 *parent_id = resp->parent_id;
1319
1320 return ret;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1335 u32 dev_id, u8 clk_id,
1336 u8 *num_parents)
1337{
1338 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1339 struct ti_sci_msg_req_get_clock_num_parents req;
1340 struct ti_sci_info *info;
1341 struct ti_sci_xfer *xfer;
1342 int ret = 0;
1343
1344 if (IS_ERR(handle))
1345 return PTR_ERR(handle);
1346 if (!handle || !num_parents)
1347 return -EINVAL;
1348
1349 info = handle_to_ti_sci_info(handle);
1350
1351 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1352 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1353 (u32 *)&req, sizeof(req), sizeof(*resp));
1354 if (IS_ERR(xfer)) {
1355 ret = PTR_ERR(xfer);
1356 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1357 return ret;
1358 }
1359 req.dev_id = dev_id;
1360 req.clk_id = clk_id;
1361
1362 ret = ti_sci_do_xfer(info, xfer);
1363 if (ret) {
1364 dev_err(info->dev, "Mbox send fail %d\n", ret);
1365 return ret;
1366 }
1367
1368 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1369 xfer->tx_message.buf;
1370
1371 if (!ti_sci_is_response_ack(resp))
1372 ret = -ENODEV;
1373 else
1374 *num_parents = resp->num_parents;
1375
1376 return ret;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1399 u32 dev_id, u8 clk_id, u64 min_freq,
1400 u64 target_freq, u64 max_freq,
1401 u64 *match_freq)
1402{
1403 struct ti_sci_msg_resp_query_clock_freq *resp;
1404 struct ti_sci_msg_req_query_clock_freq req;
1405 struct ti_sci_info *info;
1406 struct ti_sci_xfer *xfer;
1407 int ret = 0;
1408
1409 if (IS_ERR(handle))
1410 return PTR_ERR(handle);
1411 if (!handle || !match_freq)
1412 return -EINVAL;
1413
1414 info = handle_to_ti_sci_info(handle);
1415
1416 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1417 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1418 (u32 *)&req, sizeof(req), sizeof(*resp));
1419 if (IS_ERR(xfer)) {
1420 ret = PTR_ERR(xfer);
1421 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1422 return ret;
1423 }
1424 req.dev_id = dev_id;
1425 req.clk_id = clk_id;
1426 req.min_freq_hz = min_freq;
1427 req.target_freq_hz = target_freq;
1428 req.max_freq_hz = max_freq;
1429
1430 ret = ti_sci_do_xfer(info, xfer);
1431 if (ret) {
1432 dev_err(info->dev, "Mbox send fail %d\n", ret);
1433 return ret;
1434 }
1435
1436 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1437
1438 if (!ti_sci_is_response_ack(resp))
1439 ret = -ENODEV;
1440 else
1441 *match_freq = resp->freq_hz;
1442
1443 return ret;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1465 u32 dev_id, u8 clk_id, u64 min_freq,
1466 u64 target_freq, u64 max_freq)
1467{
1468 struct ti_sci_msg_req_set_clock_freq req;
1469 struct ti_sci_msg_hdr *resp;
1470 struct ti_sci_info *info;
1471 struct ti_sci_xfer *xfer;
1472 int ret = 0;
1473
1474 if (IS_ERR(handle))
1475 return PTR_ERR(handle);
1476 if (!handle)
1477 return -EINVAL;
1478
1479 info = handle_to_ti_sci_info(handle);
1480
1481 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1482 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1483 (u32 *)&req, sizeof(req), sizeof(*resp));
1484 if (IS_ERR(xfer)) {
1485 ret = PTR_ERR(xfer);
1486 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1487 return ret;
1488 }
1489 req.dev_id = dev_id;
1490 req.clk_id = clk_id;
1491 req.min_freq_hz = min_freq;
1492 req.target_freq_hz = target_freq;
1493 req.max_freq_hz = max_freq;
1494
1495 ret = ti_sci_do_xfer(info, xfer);
1496 if (ret) {
1497 dev_err(info->dev, "Mbox send fail %d\n", ret);
1498 return ret;
1499 }
1500
1501 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1502
1503 if (!ti_sci_is_response_ack(resp))
1504 return -ENODEV;
1505
1506 return ret;
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1521 u32 dev_id, u8 clk_id, u64 *freq)
1522{
1523 struct ti_sci_msg_resp_get_clock_freq *resp;
1524 struct ti_sci_msg_req_get_clock_freq req;
1525 struct ti_sci_info *info;
1526 struct ti_sci_xfer *xfer;
1527 int ret = 0;
1528
1529 if (IS_ERR(handle))
1530 return PTR_ERR(handle);
1531 if (!handle || !freq)
1532 return -EINVAL;
1533
1534 info = handle_to_ti_sci_info(handle);
1535
1536 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1537 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1538 (u32 *)&req, sizeof(req), sizeof(*resp));
1539 if (IS_ERR(xfer)) {
1540 ret = PTR_ERR(xfer);
1541 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1542 return ret;
1543 }
1544 req.dev_id = dev_id;
1545 req.clk_id = clk_id;
1546
1547 ret = ti_sci_do_xfer(info, xfer);
1548 if (ret) {
1549 dev_err(info->dev, "Mbox send fail %d\n", ret);
1550 return ret;
1551 }
1552
1553 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1554
1555 if (!ti_sci_is_response_ack(resp))
1556 ret = -ENODEV;
1557 else
1558 *freq = resp->freq_hz;
1559
1560 return ret;
1561}
1562
1563
1564
1565
1566
1567
1568
1569static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1570{
1571 struct ti_sci_msg_req_reboot req;
1572 struct ti_sci_msg_hdr *resp;
1573 struct ti_sci_info *info;
1574 struct ti_sci_xfer *xfer;
1575 int ret = 0;
1576
1577 if (IS_ERR(handle))
1578 return PTR_ERR(handle);
1579 if (!handle)
1580 return -EINVAL;
1581
1582 info = handle_to_ti_sci_info(handle);
1583
1584 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1585 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1586 (u32 *)&req, sizeof(req), sizeof(*resp));
1587 if (IS_ERR(xfer)) {
1588 ret = PTR_ERR(xfer);
1589 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1590 return ret;
1591 }
1592 req.domain = 0;
1593
1594 ret = ti_sci_do_xfer(info, xfer);
1595 if (ret) {
1596 dev_err(info->dev, "Mbox send fail %d\n", ret);
1597 return ret;
1598 }
1599
1600 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1601
1602 if (!ti_sci_is_response_ack(resp))
1603 return -ENODEV;
1604
1605 return ret;
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1623 u32 dev_id, u8 subtype, u8 s_host,
1624 u16 *range_start, u16 *range_num)
1625{
1626 struct ti_sci_msg_resp_get_resource_range *resp;
1627 struct ti_sci_msg_req_get_resource_range req;
1628 struct ti_sci_xfer *xfer;
1629 struct ti_sci_info *info;
1630 int ret = 0;
1631
1632 if (IS_ERR(handle))
1633 return PTR_ERR(handle);
1634 if (!handle)
1635 return -EINVAL;
1636
1637 info = handle_to_ti_sci_info(handle);
1638
1639 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1640 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1641 (u32 *)&req, sizeof(req), sizeof(*resp));
1642 if (IS_ERR(xfer)) {
1643 ret = PTR_ERR(xfer);
1644 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1645 return ret;
1646 }
1647
1648 req.secondary_host = s_host;
1649 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1650 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1651
1652 ret = ti_sci_do_xfer(info, xfer);
1653 if (ret) {
1654 dev_err(info->dev, "Mbox send fail %d\n", ret);
1655 goto fail;
1656 }
1657
1658 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1659 if (!ti_sci_is_response_ack(resp)) {
1660 ret = -ENODEV;
1661 } else if (!resp->range_start && !resp->range_num) {
1662 ret = -ENODEV;
1663 } else {
1664 *range_start = resp->range_start;
1665 *range_num = resp->range_num;
1666 };
1667
1668fail:
1669 return ret;
1670}
1671
1672static int __maybe_unused
1673ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1674 u32 dev_id, u8 subtype,
1675 u16 *range_start, u16 *range_num)
1676{
1677 struct ti_sci_resource_static_data *data;
1678 int i = 0;
1679
1680 while (1) {
1681 data = &rm_static_data[i];
1682
1683 if (!data->dev_id)
1684 return -EINVAL;
1685
1686 if (data->dev_id != dev_id || data->subtype != subtype) {
1687 i++;
1688 continue;
1689 }
1690
1691 *range_start = data->range_start;
1692 *range_num = data->range_num;
1693
1694 return 0;
1695 }
1696
1697 return -EINVAL;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1713 u32 dev_id, u8 subtype,
1714 u16 *range_start, u16 *range_num)
1715{
1716 return ti_sci_get_resource_range(handle, dev_id, subtype,
1717 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1718 range_start, range_num);
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static
1735int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1736 u32 dev_id, u8 subtype, u8 s_host,
1737 u16 *range_start, u16 *range_num)
1738{
1739 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1740 range_start, range_num);
1741}
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1752 u64 *msmc_start, u64 *msmc_end)
1753{
1754 struct ti_sci_msg_resp_query_msmc *resp;
1755 struct ti_sci_msg_hdr req;
1756 struct ti_sci_info *info;
1757 struct ti_sci_xfer *xfer;
1758 int ret = 0;
1759
1760 if (IS_ERR(handle))
1761 return PTR_ERR(handle);
1762 if (!handle)
1763 return -EINVAL;
1764
1765 info = handle_to_ti_sci_info(handle);
1766
1767 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1768 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1769 (u32 *)&req, sizeof(req), sizeof(*resp));
1770 if (IS_ERR(xfer)) {
1771 ret = PTR_ERR(xfer);
1772 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1773 return ret;
1774 }
1775
1776 ret = ti_sci_do_xfer(info, xfer);
1777 if (ret) {
1778 dev_err(info->dev, "Mbox send fail %d\n", ret);
1779 return ret;
1780 }
1781
1782 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1783
1784 if (!ti_sci_is_response_ack(resp))
1785 return -ENODEV;
1786
1787 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1788 resp->msmc_start_low;
1789 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1790 resp->msmc_end_low;
1791
1792 return ret;
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1803 u8 proc_id)
1804{
1805 struct ti_sci_msg_req_proc_request req;
1806 struct ti_sci_msg_hdr *resp;
1807 struct ti_sci_info *info;
1808 struct ti_sci_xfer *xfer;
1809 int ret = 0;
1810
1811 if (IS_ERR(handle))
1812 return PTR_ERR(handle);
1813 if (!handle)
1814 return -EINVAL;
1815
1816 info = handle_to_ti_sci_info(handle);
1817
1818 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1819 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1820 (u32 *)&req, sizeof(req), sizeof(*resp));
1821 if (IS_ERR(xfer)) {
1822 ret = PTR_ERR(xfer);
1823 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1824 return ret;
1825 }
1826 req.processor_id = proc_id;
1827
1828 ret = ti_sci_do_xfer(info, xfer);
1829 if (ret) {
1830 dev_err(info->dev, "Mbox send fail %d\n", ret);
1831 return ret;
1832 }
1833
1834 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1835
1836 if (!ti_sci_is_response_ack(resp))
1837 ret = -ENODEV;
1838
1839 return ret;
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1850 u8 proc_id)
1851{
1852 struct ti_sci_msg_req_proc_release req;
1853 struct ti_sci_msg_hdr *resp;
1854 struct ti_sci_info *info;
1855 struct ti_sci_xfer *xfer;
1856 int ret = 0;
1857
1858 if (IS_ERR(handle))
1859 return PTR_ERR(handle);
1860 if (!handle)
1861 return -EINVAL;
1862
1863 info = handle_to_ti_sci_info(handle);
1864
1865 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1866 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1867 (u32 *)&req, sizeof(req), sizeof(*resp));
1868 if (IS_ERR(xfer)) {
1869 ret = PTR_ERR(xfer);
1870 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1871 return ret;
1872 }
1873 req.processor_id = proc_id;
1874
1875 ret = ti_sci_do_xfer(info, xfer);
1876 if (ret) {
1877 dev_err(info->dev, "Mbox send fail %d\n", ret);
1878 return ret;
1879 }
1880
1881 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1882
1883 if (!ti_sci_is_response_ack(resp))
1884 ret = -ENODEV;
1885
1886 return ret;
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1900 u8 proc_id, u8 host_id)
1901{
1902 struct ti_sci_msg_req_proc_handover req;
1903 struct ti_sci_msg_hdr *resp;
1904 struct ti_sci_info *info;
1905 struct ti_sci_xfer *xfer;
1906 int ret = 0;
1907
1908 if (IS_ERR(handle))
1909 return PTR_ERR(handle);
1910 if (!handle)
1911 return -EINVAL;
1912
1913 info = handle_to_ti_sci_info(handle);
1914
1915 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1916 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1917 (u32 *)&req, sizeof(req), sizeof(*resp));
1918 if (IS_ERR(xfer)) {
1919 ret = PTR_ERR(xfer);
1920 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1921 return ret;
1922 }
1923 req.processor_id = proc_id;
1924 req.host_id = host_id;
1925
1926 ret = ti_sci_do_xfer(info, xfer);
1927 if (ret) {
1928 dev_err(info->dev, "Mbox send fail %d\n", ret);
1929 return ret;
1930 }
1931
1932 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1933
1934 if (!ti_sci_is_response_ack(resp))
1935 ret = -ENODEV;
1936
1937 return ret;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1951 u8 proc_id, u64 bootvector,
1952 u32 config_flags_set,
1953 u32 config_flags_clear)
1954{
1955 struct ti_sci_msg_req_set_proc_boot_config req;
1956 struct ti_sci_msg_hdr *resp;
1957 struct ti_sci_info *info;
1958 struct ti_sci_xfer *xfer;
1959 int ret = 0;
1960
1961 if (IS_ERR(handle))
1962 return PTR_ERR(handle);
1963 if (!handle)
1964 return -EINVAL;
1965
1966 info = handle_to_ti_sci_info(handle);
1967
1968 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1969 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1970 (u32 *)&req, sizeof(req), sizeof(*resp));
1971 if (IS_ERR(xfer)) {
1972 ret = PTR_ERR(xfer);
1973 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1974 return ret;
1975 }
1976 req.processor_id = proc_id;
1977 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1978 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1979 TISCI_ADDR_HIGH_SHIFT;
1980 req.config_flags_set = config_flags_set;
1981 req.config_flags_clear = config_flags_clear;
1982
1983 ret = ti_sci_do_xfer(info, xfer);
1984 if (ret) {
1985 dev_err(info->dev, "Mbox send fail %d\n", ret);
1986 return ret;
1987 }
1988
1989 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1990
1991 if (!ti_sci_is_response_ack(resp))
1992 ret = -ENODEV;
1993
1994 return ret;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
2008 u8 proc_id, u32 control_flags_set,
2009 u32 control_flags_clear)
2010{
2011 struct ti_sci_msg_req_set_proc_boot_ctrl req;
2012 struct ti_sci_msg_hdr *resp;
2013 struct ti_sci_info *info;
2014 struct ti_sci_xfer *xfer;
2015 int ret = 0;
2016
2017 if (IS_ERR(handle))
2018 return PTR_ERR(handle);
2019 if (!handle)
2020 return -EINVAL;
2021
2022 info = handle_to_ti_sci_info(handle);
2023
2024 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
2025 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2026 (u32 *)&req, sizeof(req), sizeof(*resp));
2027 if (IS_ERR(xfer)) {
2028 ret = PTR_ERR(xfer);
2029 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2030 return ret;
2031 }
2032 req.processor_id = proc_id;
2033 req.control_flags_set = control_flags_set;
2034 req.control_flags_clear = control_flags_clear;
2035
2036 ret = ti_sci_do_xfer(info, xfer);
2037 if (ret) {
2038 dev_err(info->dev, "Mbox send fail %d\n", ret);
2039 return ret;
2040 }
2041
2042 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2043
2044 if (!ti_sci_is_response_ack(resp))
2045 ret = -ENODEV;
2046
2047 return ret;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
2063 u64 *image_addr, u32 *image_size)
2064{
2065 struct ti_sci_msg_req_proc_auth_boot_image req;
2066 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
2067 struct ti_sci_info *info;
2068 struct ti_sci_xfer *xfer;
2069 int ret = 0;
2070
2071 if (IS_ERR(handle))
2072 return PTR_ERR(handle);
2073 if (!handle)
2074 return -EINVAL;
2075
2076 info = handle_to_ti_sci_info(handle);
2077
2078 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
2079 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2080 (u32 *)&req, sizeof(req), sizeof(*resp));
2081 if (IS_ERR(xfer)) {
2082 ret = PTR_ERR(xfer);
2083 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2084 return ret;
2085 }
2086 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
2087 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
2088 TISCI_ADDR_HIGH_SHIFT;
2089
2090 ret = ti_sci_do_xfer(info, xfer);
2091 if (ret) {
2092 dev_err(info->dev, "Mbox send fail %d\n", ret);
2093 return ret;
2094 }
2095
2096 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
2097
2098 if (!ti_sci_is_response_ack(resp))
2099 return -ENODEV;
2100
2101 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2102 (((u64)resp->image_addr_high <<
2103 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2104 *image_size = resp->image_size;
2105
2106 return ret;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2117 u8 proc_id, u64 *bv, u32 *cfg_flags,
2118 u32 *ctrl_flags, u32 *sts_flags)
2119{
2120 struct ti_sci_msg_resp_get_proc_boot_status *resp;
2121 struct ti_sci_msg_req_get_proc_boot_status req;
2122 struct ti_sci_info *info;
2123 struct ti_sci_xfer *xfer;
2124 int ret = 0;
2125
2126 if (IS_ERR(handle))
2127 return PTR_ERR(handle);
2128 if (!handle)
2129 return -EINVAL;
2130
2131 info = handle_to_ti_sci_info(handle);
2132
2133 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2134 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2135 (u32 *)&req, sizeof(req), sizeof(*resp));
2136 if (IS_ERR(xfer)) {
2137 ret = PTR_ERR(xfer);
2138 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2139 return ret;
2140 }
2141 req.processor_id = proc_id;
2142
2143 ret = ti_sci_do_xfer(info, xfer);
2144 if (ret) {
2145 dev_err(info->dev, "Mbox send fail %d\n", ret);
2146 return ret;
2147 }
2148
2149 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2150 xfer->tx_message.buf;
2151
2152 if (!ti_sci_is_response_ack(resp))
2153 return -ENODEV;
2154 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2155 (((u64)resp->bootvector_high <<
2156 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2157 *cfg_flags = resp->config_flags;
2158 *ctrl_flags = resp->control_flags;
2159 *sts_flags = resp->status_flags;
2160
2161 return ret;
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static int
2200ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2201 u8 proc_id,
2202 u8 num_wait_iterations,
2203 u8 num_match_iterations,
2204 u8 delay_per_iteration_us,
2205 u8 delay_before_iterations_us,
2206 u32 status_flags_1_set_all_wait,
2207 u32 status_flags_1_set_any_wait,
2208 u32 status_flags_1_clr_all_wait,
2209 u32 status_flags_1_clr_any_wait)
2210{
2211 struct ti_sci_msg_req_wait_proc_boot_status req;
2212 struct ti_sci_info *info;
2213 struct ti_sci_xfer *xfer;
2214 int ret = 0;
2215
2216 if (IS_ERR(handle))
2217 return PTR_ERR(handle);
2218 if (!handle)
2219 return -EINVAL;
2220
2221 info = handle_to_ti_sci_info(handle);
2222
2223 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2224 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2225 (u32 *)&req, sizeof(req), 0);
2226 if (IS_ERR(xfer)) {
2227 ret = PTR_ERR(xfer);
2228 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2229 return ret;
2230 }
2231 req.processor_id = proc_id;
2232 req.num_wait_iterations = num_wait_iterations;
2233 req.num_match_iterations = num_match_iterations;
2234 req.delay_per_iteration_us = delay_per_iteration_us;
2235 req.delay_before_iterations_us = delay_before_iterations_us;
2236 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2237 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2238 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2239 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2240
2241 ret = ti_sci_do_xfer(info, xfer);
2242 if (ret)
2243 dev_err(info->dev, "Mbox send fail %d\n", ret);
2244
2245 return ret;
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2259 u8 proc_id)
2260{
2261 int ret;
2262 struct ti_sci_info *info;
2263
2264 if (IS_ERR(handle))
2265 return PTR_ERR(handle);
2266 if (!handle)
2267 return -EINVAL;
2268
2269 info = handle_to_ti_sci_info(handle);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2281 U8_MAX, 100, U8_MAX, U8_MAX,
2282 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2283 0, 0);
2284 if (ret) {
2285 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2286 proc_id, ret);
2287 return ret;
2288 }
2289
2290
2291
2292
2293
2294 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2295 MSG_DEVICE_SW_STATE_AUTO_OFF);
2296 if (ret)
2297 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2298 proc_id, ret);
2299
2300 return ret;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2321 u32 valid_params, u16 nav_id, u16 index,
2322 u32 addr_lo, u32 addr_hi, u32 count,
2323 u8 mode, u8 size, u8 order_id)
2324{
2325 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2326 struct ti_sci_msg_rm_ring_cfg_req req;
2327 struct ti_sci_xfer *xfer;
2328 struct ti_sci_info *info;
2329 int ret = 0;
2330
2331 if (IS_ERR(handle))
2332 return PTR_ERR(handle);
2333 if (!handle)
2334 return -EINVAL;
2335
2336 info = handle_to_ti_sci_info(handle);
2337
2338 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2339 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2340 (u32 *)&req, sizeof(req), sizeof(*resp));
2341 if (IS_ERR(xfer)) {
2342 ret = PTR_ERR(xfer);
2343 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2344 return ret;
2345 }
2346 req.valid_params = valid_params;
2347 req.nav_id = nav_id;
2348 req.index = index;
2349 req.addr_lo = addr_lo;
2350 req.addr_hi = addr_hi;
2351 req.count = count;
2352 req.mode = mode;
2353 req.size = size;
2354 req.order_id = order_id;
2355
2356 ret = ti_sci_do_xfer(info, xfer);
2357 if (ret) {
2358 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2359 goto fail;
2360 }
2361
2362 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2363
2364 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2365
2366fail:
2367 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2368 return ret;
2369}
2370
2371static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2372 u32 nav_id, u32 src_thread, u32 dst_thread)
2373{
2374 struct ti_sci_msg_hdr *resp;
2375 struct ti_sci_msg_psil_pair req;
2376 struct ti_sci_xfer *xfer;
2377 struct ti_sci_info *info;
2378 int ret = 0;
2379
2380 if (IS_ERR(handle))
2381 return PTR_ERR(handle);
2382 if (!handle)
2383 return -EINVAL;
2384
2385 info = handle_to_ti_sci_info(handle);
2386
2387 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2388 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2389 (u32 *)&req, sizeof(req), sizeof(*resp));
2390 if (IS_ERR(xfer)) {
2391 ret = PTR_ERR(xfer);
2392 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2393 return ret;
2394 }
2395 req.nav_id = nav_id;
2396 req.src_thread = src_thread;
2397 req.dst_thread = dst_thread;
2398
2399 ret = ti_sci_do_xfer(info, xfer);
2400 if (ret) {
2401 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2402 goto fail;
2403 }
2404
2405 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2406 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2407
2408fail:
2409 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2410 nav_id, src_thread, dst_thread, ret);
2411 return ret;
2412}
2413
2414static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2415 u32 nav_id, u32 src_thread, u32 dst_thread)
2416{
2417 struct ti_sci_msg_hdr *resp;
2418 struct ti_sci_msg_psil_unpair req;
2419 struct ti_sci_xfer *xfer;
2420 struct ti_sci_info *info;
2421 int ret = 0;
2422
2423 if (IS_ERR(handle))
2424 return PTR_ERR(handle);
2425 if (!handle)
2426 return -EINVAL;
2427
2428 info = handle_to_ti_sci_info(handle);
2429
2430 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2431 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2432 (u32 *)&req, sizeof(req), sizeof(*resp));
2433 if (IS_ERR(xfer)) {
2434 ret = PTR_ERR(xfer);
2435 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2436 return ret;
2437 }
2438 req.nav_id = nav_id;
2439 req.src_thread = src_thread;
2440 req.dst_thread = dst_thread;
2441
2442 ret = ti_sci_do_xfer(info, xfer);
2443 if (ret) {
2444 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2445 goto fail;
2446 }
2447
2448 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2449 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2450
2451fail:
2452 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2453 src_thread, dst_thread, ret);
2454 return ret;
2455}
2456
2457static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2458 const struct ti_sci_handle *handle,
2459 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2460{
2461 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2462 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2463 struct ti_sci_xfer *xfer;
2464 struct ti_sci_info *info;
2465 int ret = 0;
2466
2467 if (IS_ERR(handle))
2468 return PTR_ERR(handle);
2469 if (!handle)
2470 return -EINVAL;
2471
2472 info = handle_to_ti_sci_info(handle);
2473
2474 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2475 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2476 (u32 *)&req, sizeof(req), sizeof(*resp));
2477 if (IS_ERR(xfer)) {
2478 ret = PTR_ERR(xfer);
2479 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2480 return ret;
2481 }
2482 req.valid_params = params->valid_params;
2483 req.nav_id = params->nav_id;
2484 req.index = params->index;
2485 req.tx_pause_on_err = params->tx_pause_on_err;
2486 req.tx_filt_einfo = params->tx_filt_einfo;
2487 req.tx_filt_pswords = params->tx_filt_pswords;
2488 req.tx_atype = params->tx_atype;
2489 req.tx_chan_type = params->tx_chan_type;
2490 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2491 req.tx_fetch_size = params->tx_fetch_size;
2492 req.tx_credit_count = params->tx_credit_count;
2493 req.txcq_qnum = params->txcq_qnum;
2494 req.tx_priority = params->tx_priority;
2495 req.tx_qos = params->tx_qos;
2496 req.tx_orderid = params->tx_orderid;
2497 req.fdepth = params->fdepth;
2498 req.tx_sched_priority = params->tx_sched_priority;
2499 req.tx_burst_size = params->tx_burst_size;
2500 req.tx_tdtype = params->tx_tdtype;
2501 req.extended_ch_type = params->extended_ch_type;
2502
2503 ret = ti_sci_do_xfer(info, xfer);
2504 if (ret) {
2505 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2506 goto fail;
2507 }
2508
2509 resp =
2510 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2511 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2512
2513fail:
2514 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2515 return ret;
2516}
2517
2518static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2519 const struct ti_sci_handle *handle,
2520 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2521{
2522 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2523 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2524 struct ti_sci_xfer *xfer;
2525 struct ti_sci_info *info;
2526 int ret = 0;
2527
2528 if (IS_ERR(handle))
2529 return PTR_ERR(handle);
2530 if (!handle)
2531 return -EINVAL;
2532
2533 info = handle_to_ti_sci_info(handle);
2534
2535 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2536 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2537 (u32 *)&req, sizeof(req), sizeof(*resp));
2538 if (IS_ERR(xfer)) {
2539 ret = PTR_ERR(xfer);
2540 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2541 return ret;
2542 }
2543
2544 req.valid_params = params->valid_params;
2545 req.nav_id = params->nav_id;
2546 req.index = params->index;
2547 req.rx_fetch_size = params->rx_fetch_size;
2548 req.rxcq_qnum = params->rxcq_qnum;
2549 req.rx_priority = params->rx_priority;
2550 req.rx_qos = params->rx_qos;
2551 req.rx_orderid = params->rx_orderid;
2552 req.rx_sched_priority = params->rx_sched_priority;
2553 req.flowid_start = params->flowid_start;
2554 req.flowid_cnt = params->flowid_cnt;
2555 req.rx_pause_on_err = params->rx_pause_on_err;
2556 req.rx_atype = params->rx_atype;
2557 req.rx_chan_type = params->rx_chan_type;
2558 req.rx_ignore_short = params->rx_ignore_short;
2559 req.rx_ignore_long = params->rx_ignore_long;
2560
2561 ret = ti_sci_do_xfer(info, xfer);
2562 if (ret) {
2563 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2564 goto fail;
2565 }
2566
2567 resp =
2568 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2569 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2570
2571fail:
2572 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2573 return ret;
2574}
2575
2576static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2577 const struct ti_sci_handle *handle,
2578 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2579{
2580 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2581 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2582 struct ti_sci_xfer *xfer;
2583 struct ti_sci_info *info;
2584 int ret = 0;
2585
2586 if (IS_ERR(handle))
2587 return PTR_ERR(handle);
2588 if (!handle)
2589 return -EINVAL;
2590
2591 info = handle_to_ti_sci_info(handle);
2592
2593 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2594 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2595 (u32 *)&req, sizeof(req), sizeof(*resp));
2596 if (IS_ERR(xfer)) {
2597 ret = PTR_ERR(xfer);
2598 dev_err(info->dev, "RX_FL_CFG: Message alloc failed(%d)\n",
2599 ret);
2600 return ret;
2601 }
2602
2603 req.valid_params = params->valid_params;
2604 req.nav_id = params->nav_id;
2605 req.flow_index = params->flow_index;
2606 req.rx_einfo_present = params->rx_einfo_present;
2607 req.rx_psinfo_present = params->rx_psinfo_present;
2608 req.rx_error_handling = params->rx_error_handling;
2609 req.rx_desc_type = params->rx_desc_type;
2610 req.rx_sop_offset = params->rx_sop_offset;
2611 req.rx_dest_qnum = params->rx_dest_qnum;
2612 req.rx_src_tag_hi = params->rx_src_tag_hi;
2613 req.rx_src_tag_lo = params->rx_src_tag_lo;
2614 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2615 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2616 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2617 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2618 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2619 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2620 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2621 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2622 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2623 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2624 req.rx_ps_location = params->rx_ps_location;
2625
2626 ret = ti_sci_do_xfer(info, xfer);
2627 if (ret) {
2628 dev_err(info->dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2629 goto fail;
2630 }
2631
2632 resp =
2633 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2634 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2635
2636fail:
2637 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2638 return ret;
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2649 const struct ti_sci_msg_fwl_region *region)
2650{
2651 struct ti_sci_msg_fwl_set_firewall_region_req req;
2652 struct ti_sci_msg_hdr *resp;
2653 struct ti_sci_info *info;
2654 struct ti_sci_xfer *xfer;
2655 int ret = 0;
2656
2657 if (IS_ERR(handle))
2658 return PTR_ERR(handle);
2659 if (!handle)
2660 return -EINVAL;
2661
2662 info = handle_to_ti_sci_info(handle);
2663
2664 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2665 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2666 (u32 *)&req, sizeof(req), sizeof(*resp));
2667 if (IS_ERR(xfer)) {
2668 ret = PTR_ERR(xfer);
2669 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2670 return ret;
2671 }
2672
2673 req.fwl_id = region->fwl_id;
2674 req.region = region->region;
2675 req.n_permission_regs = region->n_permission_regs;
2676 req.control = region->control;
2677 req.permissions[0] = region->permissions[0];
2678 req.permissions[1] = region->permissions[1];
2679 req.permissions[2] = region->permissions[2];
2680 req.start_address = region->start_address;
2681 req.end_address = region->end_address;
2682
2683 ret = ti_sci_do_xfer(info, xfer);
2684 if (ret) {
2685 dev_err(info->dev, "Mbox send fail %d\n", ret);
2686 return ret;
2687 }
2688
2689 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2690
2691 if (!ti_sci_is_response_ack(resp))
2692 return -ENODEV;
2693
2694 return 0;
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2705 struct ti_sci_msg_fwl_region *region)
2706{
2707 struct ti_sci_msg_fwl_get_firewall_region_req req;
2708 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2709 struct ti_sci_info *info;
2710 struct ti_sci_xfer *xfer;
2711 int ret = 0;
2712
2713 if (IS_ERR(handle))
2714 return PTR_ERR(handle);
2715 if (!handle)
2716 return -EINVAL;
2717
2718 info = handle_to_ti_sci_info(handle);
2719
2720 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2721 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2722 (u32 *)&req, sizeof(req), sizeof(*resp));
2723 if (IS_ERR(xfer)) {
2724 ret = PTR_ERR(xfer);
2725 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2726 return ret;
2727 }
2728
2729 req.fwl_id = region->fwl_id;
2730 req.region = region->region;
2731 req.n_permission_regs = region->n_permission_regs;
2732
2733 ret = ti_sci_do_xfer(info, xfer);
2734 if (ret) {
2735 dev_err(info->dev, "Mbox send fail %d\n", ret);
2736 return ret;
2737 }
2738
2739 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2740
2741 if (!ti_sci_is_response_ack(resp))
2742 return -ENODEV;
2743
2744 region->fwl_id = resp->fwl_id;
2745 region->region = resp->region;
2746 region->n_permission_regs = resp->n_permission_regs;
2747 region->control = resp->control;
2748 region->permissions[0] = resp->permissions[0];
2749 region->permissions[1] = resp->permissions[1];
2750 region->permissions[2] = resp->permissions[2];
2751 region->start_address = resp->start_address;
2752 region->end_address = resp->end_address;
2753
2754 return 0;
2755}
2756
2757
2758
2759
2760
2761
2762
2763
2764static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2765 struct ti_sci_msg_fwl_owner *owner)
2766{
2767 struct ti_sci_msg_fwl_change_owner_info_req req;
2768 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2769 struct ti_sci_info *info;
2770 struct ti_sci_xfer *xfer;
2771 int ret = 0;
2772
2773 if (IS_ERR(handle))
2774 return PTR_ERR(handle);
2775 if (!handle)
2776 return -EINVAL;
2777
2778 info = handle_to_ti_sci_info(handle);
2779
2780 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2781 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2782 (u32 *)&req, sizeof(req), sizeof(*resp));
2783 if (IS_ERR(xfer)) {
2784 ret = PTR_ERR(xfer);
2785 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2786 return ret;
2787 }
2788
2789 req.fwl_id = owner->fwl_id;
2790 req.region = owner->region;
2791 req.owner_index = owner->owner_index;
2792
2793 ret = ti_sci_do_xfer(info, xfer);
2794 if (ret) {
2795 dev_err(info->dev, "Mbox send fail %d\n", ret);
2796 return ret;
2797 }
2798
2799 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2800
2801 if (!ti_sci_is_response_ack(resp))
2802 return -ENODEV;
2803
2804 owner->fwl_id = resp->fwl_id;
2805 owner->region = resp->region;
2806 owner->owner_index = resp->owner_index;
2807 owner->owner_privid = resp->owner_privid;
2808 owner->owner_permission_bits = resp->owner_permission_bits;
2809
2810 return ret;
2811}
2812
2813
2814
2815
2816
2817static void ti_sci_setup_ops(struct ti_sci_info *info)
2818{
2819 struct ti_sci_ops *ops = &info->handle.ops;
2820 struct ti_sci_board_ops *bops = &ops->board_ops;
2821 struct ti_sci_dev_ops *dops = &ops->dev_ops;
2822 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2823 struct ti_sci_core_ops *core_ops = &ops->core_ops;
2824 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2825 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2826 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2827 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2828 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2829 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
2830
2831 bops->board_config = ti_sci_cmd_set_board_config;
2832 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2833 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2834 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
2835
2836 dops->get_device = ti_sci_cmd_get_device;
2837 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2838 dops->idle_device = ti_sci_cmd_idle_device;
2839 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2840 dops->put_device = ti_sci_cmd_put_device;
2841 dops->is_valid = ti_sci_cmd_dev_is_valid;
2842 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2843 dops->is_idle = ti_sci_cmd_dev_is_idle;
2844 dops->is_stop = ti_sci_cmd_dev_is_stop;
2845 dops->is_on = ti_sci_cmd_dev_is_on;
2846 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2847 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2848 dops->get_device_resets = ti_sci_cmd_get_device_resets;
2849 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
2850
2851 cops->get_clock = ti_sci_cmd_get_clock;
2852 cops->idle_clock = ti_sci_cmd_idle_clock;
2853 cops->put_clock = ti_sci_cmd_put_clock;
2854 cops->is_auto = ti_sci_cmd_clk_is_auto;
2855 cops->is_on = ti_sci_cmd_clk_is_on;
2856 cops->is_off = ti_sci_cmd_clk_is_off;
2857
2858 cops->set_parent = ti_sci_cmd_clk_set_parent;
2859 cops->get_parent = ti_sci_cmd_clk_get_parent;
2860 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2861
2862 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2863 cops->set_freq = ti_sci_cmd_clk_set_freq;
2864 cops->get_freq = ti_sci_cmd_clk_get_freq;
2865
2866 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2867 core_ops->query_msmc = ti_sci_cmd_query_msmc;
2868
2869 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2870 rm_core_ops->get_range_from_shost =
2871 ti_sci_cmd_get_resource_range_from_shost;
2872
2873 pops->proc_request = ti_sci_cmd_proc_request;
2874 pops->proc_release = ti_sci_cmd_proc_release;
2875 pops->proc_handover = ti_sci_cmd_proc_handover;
2876 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2877 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2878 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2879 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
2880 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
2881
2882 rops->config = ti_sci_cmd_ring_config;
2883
2884 psilops->pair = ti_sci_cmd_rm_psil_pair;
2885 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2886
2887 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2888 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2889 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2890
2891 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2892 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2893 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903const
2904struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2905{
2906 if (!sci_dev)
2907 return ERR_PTR(-EINVAL);
2908
2909 struct ti_sci_info *info = dev_get_priv(sci_dev);
2910
2911 if (!info)
2912 return ERR_PTR(-EINVAL);
2913
2914 struct ti_sci_handle *handle = &info->handle;
2915
2916 if (!handle)
2917 return ERR_PTR(-EINVAL);
2918
2919 return handle;
2920}
2921
2922
2923
2924
2925
2926
2927
2928
2929const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2930{
2931 if (!dev)
2932 return ERR_PTR(-EINVAL);
2933
2934 struct udevice *sci_dev = dev_get_parent(dev);
2935
2936 return ti_sci_get_handle_from_sysfw(sci_dev);
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2947 const char *property)
2948{
2949 struct ti_sci_info *entry, *info = NULL;
2950 u32 phandle, err;
2951 ofnode node;
2952
2953 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2954 if (err)
2955 return ERR_PTR(err);
2956
2957 node = ofnode_get_by_phandle(phandle);
2958 if (!ofnode_valid(node))
2959 return ERR_PTR(-EINVAL);
2960
2961 list_for_each_entry(entry, &ti_sci_list, list)
2962 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2963 info = entry;
2964 break;
2965 }
2966
2967 if (!info)
2968 return ERR_PTR(-ENODEV);
2969
2970 return &info->handle;
2971}
2972
2973
2974
2975
2976
2977
2978
2979
2980static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2981{
2982 int ret;
2983
2984 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2985 if (ret) {
2986 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2987 __func__, ret);
2988 return ret;
2989 }
2990
2991 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2992 if (ret) {
2993 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2994 __func__, ret);
2995 return ret;
2996 }
2997
2998
2999 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
3000 if (ret) {
3001 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
3002 __func__, ret);
3003 }
3004
3005 info->host_id = dev_read_u32_default(dev, "ti,host-id",
3006 info->desc->default_host_id);
3007
3008 info->is_secure = dev_read_bool(dev, "ti,secure-host");
3009
3010 return 0;
3011}
3012
3013
3014
3015
3016
3017
3018
3019static int ti_sci_probe(struct udevice *dev)
3020{
3021 struct ti_sci_info *info;
3022 int ret;
3023
3024 debug("%s(dev=%p)\n", __func__, dev);
3025
3026 info = dev_get_priv(dev);
3027 info->desc = (void *)dev_get_driver_data(dev);
3028
3029 ret = ti_sci_of_to_info(dev, info);
3030 if (ret) {
3031 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
3032 return ret;
3033 }
3034
3035 info->dev = dev;
3036 info->seq = 0xA;
3037
3038 list_add_tail(&info->list, &ti_sci_list);
3039 ti_sci_setup_ops(info);
3040
3041 ret = ti_sci_cmd_get_revision(&info->handle);
3042
3043 INIT_LIST_HEAD(&info->dev_list);
3044
3045 return ret;
3046}
3047
3048
3049
3050
3051
3052
3053
3054static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
3055{
3056 struct ti_sci_rm_core_ops *rm_core_ops;
3057 struct ti_sci_rm_udmap_ops *udmap_ops;
3058 struct ti_sci_rm_ringacc_ops *rops;
3059 struct ti_sci_rm_psil_ops *psilops;
3060 struct ti_sci_ops *ops;
3061 struct ti_sci_info *info;
3062 int ret;
3063
3064 debug("%s(dev=%p)\n", __func__, dev);
3065
3066 info = dev_get_priv(dev);
3067 info->desc = (void *)dev_get_driver_data(dev);
3068
3069 ret = ti_sci_of_to_info(dev, info);
3070 if (ret) {
3071 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
3072 return ret;
3073 }
3074
3075 info->dev = dev;
3076 info->seq = 0xA;
3077
3078 list_add_tail(&info->list, &ti_sci_list);
3079
3080 ops = &info->handle.ops;
3081
3082 rm_core_ops = &ops->rm_core_ops;
3083 rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
3084
3085 rops = &ops->rm_ring_ops;
3086 rops->config = ti_sci_cmd_ring_config;
3087
3088 psilops = &ops->rm_psil_ops;
3089 psilops->pair = ti_sci_cmd_rm_psil_pair;
3090 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
3091
3092 udmap_ops = &ops->rm_udmap_ops;
3093 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
3094 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
3095 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3096
3097 return ret;
3098}
3099
3100
3101
3102
3103
3104
3105
3106u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3107{
3108 u16 set, free_bit;
3109
3110 for (set = 0; set < res->sets; set++) {
3111 free_bit = find_first_zero_bit(res->desc[set].res_map,
3112 res->desc[set].num);
3113 if (free_bit != res->desc[set].num) {
3114 set_bit(free_bit, res->desc[set].res_map);
3115 return res->desc[set].start + free_bit;
3116 }
3117 }
3118
3119 return TI_SCI_RESOURCE_NULL;
3120}
3121
3122
3123
3124
3125
3126void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3127{
3128 u16 set;
3129
3130 for (set = 0; set < res->sets; set++) {
3131 if (res->desc[set].start <= id &&
3132 (res->desc[set].num + res->desc[set].start) > id)
3133 clear_bit(id - res->desc[set].start,
3134 res->desc[set].res_map);
3135 }
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152struct ti_sci_resource *
3153devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3154 struct udevice *dev, u32 dev_id, char *of_prop)
3155{
3156 u32 resource_subtype;
3157 struct ti_sci_resource *res;
3158 bool valid_set = false;
3159 int sets, i, ret;
3160 u32 *temp;
3161
3162 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3163 if (!res)
3164 return ERR_PTR(-ENOMEM);
3165
3166 sets = dev_read_size(dev, of_prop);
3167 if (sets < 0) {
3168 dev_err(dev, "%s resource type ids not available\n", of_prop);
3169 return ERR_PTR(sets);
3170 }
3171 temp = malloc(sets);
3172 sets /= sizeof(u32);
3173 res->sets = sets;
3174
3175 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3176 GFP_KERNEL);
3177 if (!res->desc)
3178 return ERR_PTR(-ENOMEM);
3179
3180 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3181 if (ret)
3182 return ERR_PTR(-EINVAL);
3183
3184 for (i = 0; i < res->sets; i++) {
3185 resource_subtype = temp[i];
3186 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3187 resource_subtype,
3188 &res->desc[i].start,
3189 &res->desc[i].num);
3190 if (ret) {
3191 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
3192 dev_id, resource_subtype,
3193 handle_to_ti_sci_info(handle)->host_id);
3194 res->desc[i].start = 0;
3195 res->desc[i].num = 0;
3196 continue;
3197 }
3198
3199 valid_set = true;
3200 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
3201 dev_id, resource_subtype, res->desc[i].start,
3202 res->desc[i].num);
3203
3204 res->desc[i].res_map =
3205 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3206 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3207 if (!res->desc[i].res_map)
3208 return ERR_PTR(-ENOMEM);
3209 }
3210
3211 if (valid_set)
3212 return res;
3213
3214 return ERR_PTR(-EINVAL);
3215}
3216
3217
3218static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3219 .default_host_id = 2,
3220
3221 .max_rx_timeout_ms = 10000,
3222
3223 .max_msgs = 20,
3224 .max_msg_size = 64,
3225};
3226
3227
3228static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3229 .default_host_id = 12,
3230
3231 .max_rx_timeout_ms = 10000,
3232
3233 .max_msgs = 20,
3234 .max_msg_size = 60,
3235};
3236
3237
3238static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3239 .default_host_id = 3,
3240 .max_rx_timeout_ms = 10000,
3241 .max_msgs = 20,
3242 .max_msg_size = 60,
3243};
3244
3245static const struct udevice_id ti_sci_ids[] = {
3246 {
3247 .compatible = "ti,k2g-sci",
3248 .data = (ulong)&ti_sci_pmmc_k2g_desc
3249 },
3250 {
3251 .compatible = "ti,am654-sci",
3252 .data = (ulong)&ti_sci_pmmc_am654_desc
3253 },
3254 { },
3255};
3256
3257static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3258 {
3259 .compatible = "ti,j721e-dm-sci",
3260 .data = (ulong)&ti_sci_dm_j721e_desc
3261 },
3262 { },
3263};
3264
3265U_BOOT_DRIVER(ti_sci) = {
3266 .name = "ti_sci",
3267 .id = UCLASS_FIRMWARE,
3268 .of_match = ti_sci_ids,
3269 .probe = ti_sci_probe,
3270 .priv_auto = sizeof(struct ti_sci_info),
3271};
3272
3273#if IS_ENABLED(CONFIG_K3_DM_FW)
3274U_BOOT_DRIVER(ti_sci_dm) = {
3275 .name = "ti_sci_dm",
3276 .id = UCLASS_FIRMWARE,
3277 .of_match = ti_sci_dm_ids,
3278 .probe = ti_sci_dm_probe,
3279 .priv_auto = sizeof(struct ti_sci_info),
3280};
3281#endif
3282