1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/ktime.h>
22#include <linux/mailbox_client.h>
23#include <linux/module.h>
24#include <linux/of_address.h>
25#include <linux/of_device.h>
26#include <linux/processor.h>
27#include <linux/semaphore.h>
28#include <linux/slab.h>
29
30#include "common.h"
31
32#define MSG_ID_MASK GENMASK(7, 0)
33#define MSG_TYPE_MASK GENMASK(9, 8)
34#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
35#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
36#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
37#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
38
39enum scmi_error_codes {
40 SCMI_SUCCESS = 0,
41 SCMI_ERR_SUPPORT = -1,
42 SCMI_ERR_PARAMS = -2,
43 SCMI_ERR_ACCESS = -3,
44 SCMI_ERR_ENTRY = -4,
45 SCMI_ERR_RANGE = -5,
46 SCMI_ERR_BUSY = -6,
47 SCMI_ERR_COMMS = -7,
48 SCMI_ERR_GENERIC = -8,
49 SCMI_ERR_HARDWARE = -9,
50 SCMI_ERR_PROTOCOL = -10,
51 SCMI_ERR_MAX
52};
53
54
55static LIST_HEAD(scmi_list);
56
57static DEFINE_MUTEX(scmi_list_mutex);
58
59
60
61
62
63
64
65
66
67
68struct scmi_xfers_info {
69 struct scmi_xfer *xfer_block;
70 unsigned long *xfer_alloc_table;
71 spinlock_t xfer_lock;
72};
73
74
75
76
77
78
79
80
81
82struct scmi_desc {
83 int max_rx_timeout_ms;
84 int max_msg;
85 int max_msg_size;
86};
87
88
89
90
91
92
93
94
95
96
97
98struct scmi_chan_info {
99 struct mbox_client cl;
100 struct mbox_chan *chan;
101 void __iomem *payload;
102 struct device *dev;
103 struct scmi_handle *handle;
104};
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct scmi_info {
122 struct device *dev;
123 const struct scmi_desc *desc;
124 struct scmi_revision_info version;
125 struct scmi_handle handle;
126 struct scmi_xfers_info minfo;
127 struct idr tx_idr;
128 u8 *protocols_imp;
129 struct list_head node;
130 int users;
131};
132
133#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
134#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
135
136
137
138
139
140
141struct scmi_shared_mem {
142 __le32 reserved;
143 __le32 channel_status;
144#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
145#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
146 __le32 reserved1[2];
147 __le32 flags;
148#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
149 __le32 length;
150 __le32 msg_header;
151 u8 msg_payload[0];
152};
153
154static const int scmi_linux_errmap[] = {
155
156 0,
157 -EOPNOTSUPP,
158 -EINVAL,
159 -EACCES,
160 -ENOENT,
161 -ERANGE,
162 -EBUSY,
163 -ECOMM,
164 -EIO,
165 -EREMOTEIO,
166 -EPROTO,
167};
168
169static inline int scmi_to_linux_errno(int errno)
170{
171 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
172 return scmi_linux_errmap[-errno];
173 return -EIO;
174}
175
176
177
178
179
180
181
182static inline void scmi_dump_header_dbg(struct device *dev,
183 struct scmi_msg_hdr *hdr)
184{
185 dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
186 hdr->id, hdr->seq, hdr->protocol_id);
187}
188
189static void scmi_fetch_response(struct scmi_xfer *xfer,
190 struct scmi_shared_mem __iomem *mem)
191{
192 xfer->hdr.status = ioread32(mem->msg_payload);
193
194 xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
195
196
197 memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212static void scmi_rx_callback(struct mbox_client *cl, void *m)
213{
214 u16 xfer_id;
215 struct scmi_xfer *xfer;
216 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
217 struct device *dev = cinfo->dev;
218 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
219 struct scmi_xfers_info *minfo = &info->minfo;
220 struct scmi_shared_mem __iomem *mem = cinfo->payload;
221
222 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
223
224
225 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
226 dev_err(dev, "message for %d is not expected!\n", xfer_id);
227 return;
228 }
229
230 xfer = &minfo->xfer_block[xfer_id];
231
232 scmi_dump_header_dbg(dev, &xfer->hdr);
233
234 if (xfer->rx.len > info->desc->max_msg_size) {
235 dev_err(dev, "unable to handle %zu xfer(max %d)\n",
236 xfer->rx.len, info->desc->max_msg_size);
237 return;
238 }
239
240 scmi_fetch_response(xfer, mem);
241 complete(&xfer->done);
242}
243
244
245
246
247
248
249
250
251
252static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
253{
254 return FIELD_PREP(MSG_ID_MASK, hdr->id) |
255 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
256 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
257}
258
259
260
261
262
263
264
265
266
267
268static void scmi_tx_prepare(struct mbox_client *cl, void *m)
269{
270 struct scmi_xfer *t = m;
271 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
272 struct scmi_shared_mem __iomem *mem = cinfo->payload;
273
274
275 iowrite32(0x0, &mem->channel_status);
276 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
277 &mem->flags);
278 iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
279 iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
280 if (t->tx.buf)
281 memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
299{
300 u16 xfer_id;
301 struct scmi_xfer *xfer;
302 unsigned long flags, bit_pos;
303 struct scmi_info *info = handle_to_scmi_info(handle);
304 struct scmi_xfers_info *minfo = &info->minfo;
305
306
307 spin_lock_irqsave(&minfo->xfer_lock, flags);
308 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
309 info->desc->max_msg);
310 if (bit_pos == info->desc->max_msg) {
311 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
312 return ERR_PTR(-ENOMEM);
313 }
314 set_bit(bit_pos, minfo->xfer_alloc_table);
315 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
316
317 xfer_id = bit_pos;
318
319 xfer = &minfo->xfer_block[xfer_id];
320 xfer->hdr.seq = xfer_id;
321 reinit_completion(&xfer->done);
322
323 return xfer;
324}
325
326
327
328
329
330
331
332
333
334void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
335{
336 unsigned long flags;
337 struct scmi_info *info = handle_to_scmi_info(handle);
338 struct scmi_xfers_info *minfo = &info->minfo;
339
340
341
342
343
344
345 spin_lock_irqsave(&minfo->xfer_lock, flags);
346 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
347 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
348}
349
350static bool
351scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
352{
353 struct scmi_shared_mem __iomem *mem = cinfo->payload;
354 u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
355
356 if (xfer->hdr.seq != xfer_id)
357 return false;
358
359 return ioread32(&mem->channel_status) &
360 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
361 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
362}
363
364#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
365
366static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
367 struct scmi_xfer *xfer, ktime_t stop)
368{
369 ktime_t __cur = ktime_get();
370
371 return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
372}
373
374
375
376
377
378
379
380
381
382
383
384int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
385{
386 int ret;
387 int timeout;
388 struct scmi_info *info = handle_to_scmi_info(handle);
389 struct device *dev = info->dev;
390 struct scmi_chan_info *cinfo;
391
392 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
393 if (unlikely(!cinfo))
394 return -EINVAL;
395
396 ret = mbox_send_message(cinfo->chan, xfer);
397 if (ret < 0) {
398 dev_dbg(dev, "mbox send fail %d\n", ret);
399 return ret;
400 }
401
402
403 ret = 0;
404
405 if (xfer->hdr.poll_completion) {
406 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
407
408 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
409
410 if (ktime_before(ktime_get(), stop))
411 scmi_fetch_response(xfer, cinfo->payload);
412 else
413 ret = -ETIMEDOUT;
414 } else {
415
416 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
417 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
418 dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
419 (void *)_RET_IP_);
420 ret = -ETIMEDOUT;
421 }
422 }
423
424 if (!ret && xfer->hdr.status)
425 ret = scmi_to_linux_errno(xfer->hdr.status);
426
427
428
429
430
431
432
433 mbox_client_txdone(cinfo->chan, ret);
434
435 return ret;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
455 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
456{
457 int ret;
458 struct scmi_xfer *xfer;
459 struct scmi_info *info = handle_to_scmi_info(handle);
460 struct device *dev = info->dev;
461
462
463 if (rx_size > info->desc->max_msg_size ||
464 tx_size > info->desc->max_msg_size)
465 return -ERANGE;
466
467 xfer = scmi_xfer_get(handle);
468 if (IS_ERR(xfer)) {
469 ret = PTR_ERR(xfer);
470 dev_err(dev, "failed to get free message slot(%d)\n", ret);
471 return ret;
472 }
473
474 xfer->tx.len = tx_size;
475 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
476 xfer->hdr.id = msg_id;
477 xfer->hdr.protocol_id = prot_id;
478 xfer->hdr.poll_completion = false;
479
480 *p = xfer;
481
482 return 0;
483}
484
485
486
487
488
489
490
491
492
493
494
495
496int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
497 u32 *version)
498{
499 int ret;
500 __le32 *rev_info;
501 struct scmi_xfer *t;
502
503 ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
504 sizeof(*version), &t);
505 if (ret)
506 return ret;
507
508 ret = scmi_do_xfer(handle, t);
509 if (!ret) {
510 rev_info = t->rx.buf;
511 *version = le32_to_cpu(*rev_info);
512 }
513
514 scmi_xfer_put(handle, t);
515 return ret;
516}
517
518void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
519 u8 *prot_imp)
520{
521 struct scmi_info *info = handle_to_scmi_info(handle);
522
523 info->protocols_imp = prot_imp;
524}
525
526static bool
527scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
528{
529 int i;
530 struct scmi_info *info = handle_to_scmi_info(handle);
531
532 if (!info->protocols_imp)
533 return false;
534
535 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
536 if (info->protocols_imp[i] == prot_id)
537 return true;
538 return false;
539}
540
541
542
543
544
545
546
547
548
549
550
551
552struct scmi_handle *scmi_handle_get(struct device *dev)
553{
554 struct list_head *p;
555 struct scmi_info *info;
556 struct scmi_handle *handle = NULL;
557
558 mutex_lock(&scmi_list_mutex);
559 list_for_each(p, &scmi_list) {
560 info = list_entry(p, struct scmi_info, node);
561 if (dev->parent == info->dev) {
562 handle = &info->handle;
563 info->users++;
564 break;
565 }
566 }
567 mutex_unlock(&scmi_list_mutex);
568
569 return handle;
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584int scmi_handle_put(const struct scmi_handle *handle)
585{
586 struct scmi_info *info;
587
588 if (!handle)
589 return -EINVAL;
590
591 info = handle_to_scmi_info(handle);
592 mutex_lock(&scmi_list_mutex);
593 if (!WARN_ON(!info->users))
594 info->users--;
595 mutex_unlock(&scmi_list_mutex);
596
597 return 0;
598}
599
600static const struct scmi_desc scmi_generic_desc = {
601 .max_rx_timeout_ms = 30,
602 .max_msg = 20,
603 .max_msg_size = 128,
604};
605
606
607static const struct of_device_id scmi_of_match[] = {
608 { .compatible = "arm,scmi", .data = &scmi_generic_desc },
609 { },
610};
611
612MODULE_DEVICE_TABLE(of, scmi_of_match);
613
614static int scmi_xfer_info_init(struct scmi_info *sinfo)
615{
616 int i;
617 struct scmi_xfer *xfer;
618 struct device *dev = sinfo->dev;
619 const struct scmi_desc *desc = sinfo->desc;
620 struct scmi_xfers_info *info = &sinfo->minfo;
621
622
623 if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
624 dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
625 desc->max_msg, MSG_TOKEN_MAX);
626 return -EINVAL;
627 }
628
629 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
630 sizeof(*info->xfer_block), GFP_KERNEL);
631 if (!info->xfer_block)
632 return -ENOMEM;
633
634 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
635 sizeof(long), GFP_KERNEL);
636 if (!info->xfer_alloc_table)
637 return -ENOMEM;
638
639
640 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
641 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
642 GFP_KERNEL);
643 if (!xfer->rx.buf)
644 return -ENOMEM;
645
646 xfer->tx.buf = xfer->rx.buf;
647 init_completion(&xfer->done);
648 }
649
650 spin_lock_init(&info->xfer_lock);
651
652 return 0;
653}
654
655static int scmi_mailbox_check(struct device_node *np)
656{
657 struct of_phandle_args arg;
658
659 return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
660}
661
662static int scmi_mbox_free_channel(int id, void *p, void *data)
663{
664 struct scmi_chan_info *cinfo = p;
665 struct idr *idr = data;
666
667 if (!IS_ERR_OR_NULL(cinfo->chan)) {
668 mbox_free_channel(cinfo->chan);
669 cinfo->chan = NULL;
670 }
671
672 idr_remove(idr, id);
673
674 return 0;
675}
676
677static int scmi_remove(struct platform_device *pdev)
678{
679 int ret = 0;
680 struct scmi_info *info = platform_get_drvdata(pdev);
681 struct idr *idr = &info->tx_idr;
682
683 mutex_lock(&scmi_list_mutex);
684 if (info->users)
685 ret = -EBUSY;
686 else
687 list_del(&info->node);
688 mutex_unlock(&scmi_list_mutex);
689
690 if (ret)
691 return ret;
692
693
694 ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
695 idr_destroy(&info->tx_idr);
696
697 return ret;
698}
699
700static inline int
701scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
702{
703 int ret;
704 struct resource res;
705 resource_size_t size;
706 struct device_node *shmem, *np = dev->of_node;
707 struct scmi_chan_info *cinfo;
708 struct mbox_client *cl;
709
710 if (scmi_mailbox_check(np)) {
711 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
712 goto idr_alloc;
713 }
714
715 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
716 if (!cinfo)
717 return -ENOMEM;
718
719 cinfo->dev = dev;
720
721 cl = &cinfo->cl;
722 cl->dev = dev;
723 cl->rx_callback = scmi_rx_callback;
724 cl->tx_prepare = scmi_tx_prepare;
725 cl->tx_block = false;
726 cl->knows_txdone = true;
727
728 shmem = of_parse_phandle(np, "shmem", 0);
729 ret = of_address_to_resource(shmem, 0, &res);
730 of_node_put(shmem);
731 if (ret) {
732 dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
733 return ret;
734 }
735
736 size = resource_size(&res);
737 cinfo->payload = devm_ioremap(info->dev, res.start, size);
738 if (!cinfo->payload) {
739 dev_err(dev, "failed to ioremap SCMI Tx payload\n");
740 return -EADDRNOTAVAIL;
741 }
742
743
744 cinfo->chan = mbox_request_channel(cl, 0);
745 if (IS_ERR(cinfo->chan)) {
746 ret = PTR_ERR(cinfo->chan);
747 if (ret != -EPROBE_DEFER)
748 dev_err(dev, "failed to request SCMI Tx mailbox\n");
749 return ret;
750 }
751
752idr_alloc:
753 ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
754 if (ret != prot_id) {
755 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
756 return ret;
757 }
758
759 cinfo->handle = &info->handle;
760 return 0;
761}
762
763static inline void
764scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
765 int prot_id)
766{
767 struct scmi_device *sdev;
768
769 sdev = scmi_device_create(np, info->dev, prot_id);
770 if (!sdev) {
771 dev_err(info->dev, "failed to create %d protocol device\n",
772 prot_id);
773 return;
774 }
775
776 if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
777 dev_err(&sdev->dev, "failed to setup transport\n");
778 scmi_device_destroy(sdev);
779 return;
780 }
781
782
783 scmi_set_handle(sdev);
784}
785
786static int scmi_probe(struct platform_device *pdev)
787{
788 int ret;
789 struct scmi_handle *handle;
790 const struct scmi_desc *desc;
791 struct scmi_info *info;
792 struct device *dev = &pdev->dev;
793 struct device_node *child, *np = dev->of_node;
794
795
796 if (scmi_mailbox_check(np)) {
797 dev_err(dev, "no mailbox found in %pOF\n", np);
798 return -EINVAL;
799 }
800
801 desc = of_match_device(scmi_of_match, dev)->data;
802
803 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
804 if (!info)
805 return -ENOMEM;
806
807 info->dev = dev;
808 info->desc = desc;
809 INIT_LIST_HEAD(&info->node);
810
811 ret = scmi_xfer_info_init(info);
812 if (ret)
813 return ret;
814
815 platform_set_drvdata(pdev, info);
816 idr_init(&info->tx_idr);
817
818 handle = &info->handle;
819 handle->dev = info->dev;
820 handle->version = &info->version;
821
822 ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
823 if (ret)
824 return ret;
825
826 ret = scmi_base_protocol_init(handle);
827 if (ret) {
828 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
829 return ret;
830 }
831
832 mutex_lock(&scmi_list_mutex);
833 list_add_tail(&info->node, &scmi_list);
834 mutex_unlock(&scmi_list_mutex);
835
836 for_each_available_child_of_node(np, child) {
837 u32 prot_id;
838
839 if (of_property_read_u32(child, "reg", &prot_id))
840 continue;
841
842 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
843 dev_err(dev, "Out of range protocol %d\n", prot_id);
844
845 if (!scmi_is_protocol_implemented(handle, prot_id)) {
846 dev_err(dev, "SCMI protocol %d not implemented\n",
847 prot_id);
848 continue;
849 }
850
851 scmi_create_protocol_device(child, info, prot_id);
852 }
853
854 return 0;
855}
856
857static struct platform_driver scmi_driver = {
858 .driver = {
859 .name = "arm-scmi",
860 .of_match_table = scmi_of_match,
861 },
862 .probe = scmi_probe,
863 .remove = scmi_remove,
864};
865
866module_platform_driver(scmi_driver);
867
868MODULE_ALIAS("platform: arm-scmi");
869MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
870MODULE_DESCRIPTION("ARM SCMI protocol driver");
871MODULE_LICENSE("GPL v2");
872