1
2
3
4
5
6#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
7
8#include <linux/atomic.h>
9#include <linux/cpu_pm.h>
10#include <linux/delay.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/iopoll.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/wait.h>
24
25#include <soc/qcom/cmd-db.h>
26#include <soc/qcom/tcs.h>
27#include <dt-bindings/soc/qcom,rpmh-rsc.h>
28
29#include "rpmh-internal.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace-rpmh.h"
33
34#define RSC_DRV_TCS_OFFSET 672
35#define RSC_DRV_CMD_OFFSET 20
36
37
38#define DRV_SOLVER_CONFIG 0x04
39#define DRV_HW_SOLVER_MASK 1
40#define DRV_HW_SOLVER_SHIFT 24
41
42
43#define DRV_PRNT_CHLD_CONFIG 0x0C
44#define DRV_NUM_TCS_MASK 0x3F
45#define DRV_NUM_TCS_SHIFT 6
46#define DRV_NCPT_MASK 0x1F
47#define DRV_NCPT_SHIFT 27
48
49
50#define RSC_DRV_IRQ_ENABLE 0x00
51#define RSC_DRV_IRQ_STATUS 0x04
52#define RSC_DRV_IRQ_CLEAR 0x08
53
54
55
56
57
58
59
60
61#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
62#define RSC_DRV_CONTROL 0x14
63#define RSC_DRV_STATUS 0x18
64#define RSC_DRV_CMD_ENABLE 0x1C
65
66
67
68
69
70
71
72#define RSC_DRV_CMD_MSGID 0x30
73#define RSC_DRV_CMD_ADDR 0x34
74#define RSC_DRV_CMD_DATA 0x38
75#define RSC_DRV_CMD_STATUS 0x3C
76#define RSC_DRV_CMD_RESP_DATA 0x40
77
78#define TCS_AMC_MODE_ENABLE BIT(16)
79#define TCS_AMC_MODE_TRIGGER BIT(24)
80
81
82#define CMD_MSGID_LEN 8
83#define CMD_MSGID_RESP_REQ BIT(8)
84#define CMD_MSGID_WRITE BIT(16)
85#define CMD_STATUS_ISSUED BIT(8)
86#define CMD_STATUS_COMPL BIT(16)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142static inline void __iomem *
143tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
144{
145 return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
146}
147
148static inline void __iomem *
149tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
150{
151 return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
152}
153
154static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
155 int cmd_id)
156{
157 return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
158}
159
160static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
161{
162 return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
163}
164
165static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
166 int cmd_id, u32 data)
167{
168 writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
169}
170
171static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
172 u32 data)
173{
174 writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
175}
176
177static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
178 u32 data)
179{
180 int i;
181
182 writel(data, tcs_reg_addr(drv, reg, tcs_id));
183
184
185
186
187
188 for (i = 0; i < USEC_PER_SEC; i++) {
189 if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
190 return;
191 udelay(1);
192 }
193 pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
194 data, tcs_id, reg);
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209static void tcs_invalidate(struct rsc_drv *drv, int type)
210{
211 int m;
212 struct tcs_group *tcs = &drv->tcs[type];
213
214
215 if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
216 return;
217
218 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
219 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
220
221 bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
222}
223
224
225
226
227
228
229
230
231
232void rpmh_rsc_invalidate(struct rsc_drv *drv)
233{
234 tcs_invalidate(drv, SLEEP_TCS);
235 tcs_invalidate(drv, WAKE_TCS);
236}
237
238
239
240
241
242
243
244
245
246
247
248static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
249 const struct tcs_request *msg)
250{
251 int type;
252 struct tcs_group *tcs;
253
254 switch (msg->state) {
255 case RPMH_ACTIVE_ONLY_STATE:
256 type = ACTIVE_TCS;
257 break;
258 case RPMH_WAKE_ONLY_STATE:
259 type = WAKE_TCS;
260 break;
261 case RPMH_SLEEP_STATE:
262 type = SLEEP_TCS;
263 break;
264 default:
265 return ERR_PTR(-EINVAL);
266 }
267
268
269
270
271
272
273
274
275 tcs = &drv->tcs[type];
276 if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
277 tcs = &drv->tcs[WAKE_TCS];
278
279 return tcs;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
299 int tcs_id)
300{
301 struct tcs_group *tcs;
302 int i;
303
304 for (i = 0; i < TCS_TYPE_NR; i++) {
305 tcs = &drv->tcs[i];
306 if (tcs->mask & BIT(tcs_id))
307 return tcs->req[tcs_id - tcs->offset];
308 }
309
310 return NULL;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
331{
332 u32 enable;
333
334
335
336
337
338
339 enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
340 enable &= ~TCS_AMC_MODE_TRIGGER;
341 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
342 enable &= ~TCS_AMC_MODE_ENABLE;
343 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
344
345 if (trigger) {
346
347 enable = TCS_AMC_MODE_ENABLE;
348 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
349 enable |= TCS_AMC_MODE_TRIGGER;
350 write_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, enable);
351 }
352}
353
354
355
356
357
358
359
360
361
362
363static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
364{
365 u32 data;
366
367 data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
368 if (enable)
369 data |= BIT(tcs_id);
370 else
371 data &= ~BIT(tcs_id);
372 writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
373}
374
375
376
377
378
379
380
381
382
383
384
385static irqreturn_t tcs_tx_done(int irq, void *p)
386{
387 struct rsc_drv *drv = p;
388 int i, j, err = 0;
389 unsigned long irq_status;
390 const struct tcs_request *req;
391 struct tcs_cmd *cmd;
392
393 irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
394
395 for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
396 req = get_req_from_tcs(drv, i);
397 if (WARN_ON(!req))
398 goto skip;
399
400 err = 0;
401 for (j = 0; j < req->num_cmds; j++) {
402 u32 sts;
403
404 cmd = &req->cmds[j];
405 sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
406 if (!(sts & CMD_STATUS_ISSUED) ||
407 ((req->wait_for_compl || cmd->wait) &&
408 !(sts & CMD_STATUS_COMPL))) {
409 pr_err("Incomplete request: %s: addr=%#x data=%#x",
410 drv->name, cmd->addr, cmd->data);
411 err = -EIO;
412 }
413 }
414
415 trace_rpmh_tx_done(drv, i, req, err);
416
417
418
419
420
421
422 if (!drv->tcs[ACTIVE_TCS].num_tcs)
423 __tcs_set_trigger(drv, i, false);
424skip:
425
426 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
427 writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
428 spin_lock(&drv->lock);
429 clear_bit(i, drv->tcs_in_use);
430
431
432
433
434
435 if (!drv->tcs[ACTIVE_TCS].num_tcs)
436 enable_tcs_irq(drv, i, false);
437 spin_unlock(&drv->lock);
438 wake_up(&drv->tcs_wait);
439 if (req)
440 rpmh_tx_done(req, err);
441 }
442
443 return IRQ_HANDLED;
444}
445
446
447
448
449
450
451
452
453
454
455
456static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
457 const struct tcs_request *msg)
458{
459 u32 msgid;
460 u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
461 u32 cmd_enable = 0;
462 struct tcs_cmd *cmd;
463 int i, j;
464
465
466 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
467
468 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
469 cmd = &msg->cmds[i];
470 cmd_enable |= BIT(j);
471 msgid = cmd_msgid;
472
473
474
475
476 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
477
478 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
479 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
480 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
481 trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
482 }
483
484 cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
485 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
509 const struct tcs_request *msg)
510{
511 unsigned long curr_enabled;
512 u32 addr;
513 int j, k;
514 int i = tcs->offset;
515
516 for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
517 curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i);
518
519 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
520 addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, i, j);
521 for (k = 0; k < msg->num_cmds; k++) {
522 if (addr == msg->cmds[k].addr)
523 return -EBUSY;
524 }
525 }
526 }
527
528 return 0;
529}
530
531
532
533
534
535
536
537
538
539
540static int find_free_tcs(struct tcs_group *tcs)
541{
542 const struct rsc_drv *drv = tcs->drv;
543 unsigned long i;
544 unsigned long max = tcs->offset + tcs->num_tcs;
545
546 i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
547 if (i >= max)
548 return -EBUSY;
549
550 return i;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
569 const struct tcs_request *msg)
570{
571 int ret;
572
573
574
575
576
577 ret = check_for_req_inflight(drv, tcs, msg);
578 if (ret)
579 return ret;
580
581 return find_free_tcs(tcs);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
607{
608 struct tcs_group *tcs;
609 int tcs_id;
610 unsigned long flags;
611
612 tcs = get_tcs_for_msg(drv, msg);
613 if (IS_ERR(tcs))
614 return PTR_ERR(tcs);
615
616 spin_lock_irqsave(&drv->lock, flags);
617
618
619 wait_event_lock_irq(drv->tcs_wait,
620 (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
621 drv->lock);
622
623 tcs->req[tcs_id - tcs->offset] = msg;
624 set_bit(tcs_id, drv->tcs_in_use);
625 if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
626
627
628
629
630
631 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
632 enable_tcs_irq(drv, tcs_id, true);
633 }
634 spin_unlock_irqrestore(&drv->lock, flags);
635
636
637
638
639
640
641
642
643
644 __tcs_buffer_write(drv, tcs_id, 0, msg);
645 __tcs_set_trigger(drv, tcs_id, true);
646
647 return 0;
648}
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
666 int *tcs_id, int *cmd_id)
667{
668 int slot, offset;
669 int i = 0;
670
671
672 do {
673 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
674 i, msg->num_cmds, 0);
675 if (slot >= tcs->num_tcs * tcs->ncpt)
676 return -ENOMEM;
677 i += tcs->ncpt;
678 } while (slot + msg->num_cmds - 1 >= i);
679
680 bitmap_set(tcs->slots, slot, msg->num_cmds);
681
682 offset = slot / tcs->ncpt;
683 *tcs_id = offset + tcs->offset;
684 *cmd_id = slot % tcs->ncpt;
685
686 return 0;
687}
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
703{
704 struct tcs_group *tcs;
705 int tcs_id = 0, cmd_id = 0;
706 int ret;
707
708 tcs = get_tcs_for_msg(drv, msg);
709 if (IS_ERR(tcs))
710 return PTR_ERR(tcs);
711
712
713 ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
714 if (!ret)
715 __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
716
717 return ret;
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
736{
737 unsigned long set;
738 const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
739 unsigned long max;
740
741
742
743
744
745
746
747 if (!tcs->num_tcs)
748 tcs = &drv->tcs[WAKE_TCS];
749
750 max = tcs->offset + tcs->num_tcs;
751 set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
752
753 return set < max;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
774 unsigned long action, void *v)
775{
776 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
777 int ret = NOTIFY_OK;
778 int cpus_in_pm;
779
780 switch (action) {
781 case CPU_PM_ENTER:
782 cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
783
784
785
786
787
788
789
790
791
792
793 if (cpus_in_pm < num_online_cpus())
794 return NOTIFY_OK;
795 break;
796 case CPU_PM_ENTER_FAILED:
797 case CPU_PM_EXIT:
798 atomic_dec(&drv->cpus_in_pm);
799 return NOTIFY_OK;
800 default:
801 return NOTIFY_DONE;
802 }
803
804
805
806
807
808
809
810
811
812
813
814
815
816 if (spin_trylock(&drv->lock)) {
817 if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
818 ret = NOTIFY_BAD;
819 spin_unlock(&drv->lock);
820 } else {
821
822 return NOTIFY_OK;
823 }
824
825 if (ret == NOTIFY_BAD) {
826
827 if (cpus_in_pm < num_online_cpus())
828 ret = NOTIFY_OK;
829 else
830
831 atomic_dec(&drv->cpus_in_pm);
832 }
833
834 return ret;
835}
836
837static int rpmh_probe_tcs_config(struct platform_device *pdev,
838 struct rsc_drv *drv, void __iomem *base)
839{
840 struct tcs_type_config {
841 u32 type;
842 u32 n;
843 } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
844 struct device_node *dn = pdev->dev.of_node;
845 u32 config, max_tcs, ncpt, offset;
846 int i, ret, n, st = 0;
847 struct tcs_group *tcs;
848
849 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
850 if (ret)
851 return ret;
852 drv->tcs_base = base + offset;
853
854 config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
855
856 max_tcs = config;
857 max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
858 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
859
860 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
861 ncpt = ncpt >> DRV_NCPT_SHIFT;
862
863 n = of_property_count_u32_elems(dn, "qcom,tcs-config");
864 if (n != 2 * TCS_TYPE_NR)
865 return -EINVAL;
866
867 for (i = 0; i < TCS_TYPE_NR; i++) {
868 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
869 i * 2, &tcs_cfg[i].type);
870 if (ret)
871 return ret;
872 if (tcs_cfg[i].type >= TCS_TYPE_NR)
873 return -EINVAL;
874
875 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
876 i * 2 + 1, &tcs_cfg[i].n);
877 if (ret)
878 return ret;
879 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
880 return -EINVAL;
881 }
882
883 for (i = 0; i < TCS_TYPE_NR; i++) {
884 tcs = &drv->tcs[tcs_cfg[i].type];
885 if (tcs->drv)
886 return -EINVAL;
887 tcs->drv = drv;
888 tcs->type = tcs_cfg[i].type;
889 tcs->num_tcs = tcs_cfg[i].n;
890 tcs->ncpt = ncpt;
891
892 if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
893 continue;
894
895 if (st + tcs->num_tcs > max_tcs ||
896 st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
897 return -EINVAL;
898
899 tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
900 tcs->offset = st;
901 st += tcs->num_tcs;
902 }
903
904 drv->num_tcs = st;
905
906 return 0;
907}
908
909static int rpmh_rsc_probe(struct platform_device *pdev)
910{
911 struct device_node *dn = pdev->dev.of_node;
912 struct rsc_drv *drv;
913 char drv_id[10] = {0};
914 int ret, irq;
915 u32 solver_config;
916 void __iomem *base;
917
918
919
920
921
922 ret = cmd_db_ready();
923 if (ret) {
924 if (ret != -EPROBE_DEFER)
925 dev_err(&pdev->dev, "Command DB not available (%d)\n",
926 ret);
927 return ret;
928 }
929
930 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
931 if (!drv)
932 return -ENOMEM;
933
934 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
935 if (ret)
936 return ret;
937
938 drv->name = of_get_property(dn, "label", NULL);
939 if (!drv->name)
940 drv->name = dev_name(&pdev->dev);
941
942 snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
943 base = devm_platform_ioremap_resource_byname(pdev, drv_id);
944 if (IS_ERR(base))
945 return PTR_ERR(base);
946
947 ret = rpmh_probe_tcs_config(pdev, drv, base);
948 if (ret)
949 return ret;
950
951 spin_lock_init(&drv->lock);
952 init_waitqueue_head(&drv->tcs_wait);
953 bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
954
955 irq = platform_get_irq(pdev, drv->id);
956 if (irq < 0)
957 return irq;
958
959 ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
960 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
961 drv->name, drv);
962 if (ret)
963 return ret;
964
965
966
967
968
969
970 solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
971 solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
972 solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
973 if (!solver_config) {
974 drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
975 cpu_pm_register_notifier(&drv->rsc_pm);
976 }
977
978
979 writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
980 drv->tcs_base + RSC_DRV_IRQ_ENABLE);
981
982 spin_lock_init(&drv->client.cache_lock);
983 INIT_LIST_HEAD(&drv->client.cache);
984 INIT_LIST_HEAD(&drv->client.batch_cache);
985
986 dev_set_drvdata(&pdev->dev, drv);
987
988 return devm_of_platform_populate(&pdev->dev);
989}
990
991static const struct of_device_id rpmh_drv_match[] = {
992 { .compatible = "qcom,rpmh-rsc", },
993 { }
994};
995MODULE_DEVICE_TABLE(of, rpmh_drv_match);
996
997static struct platform_driver rpmh_driver = {
998 .probe = rpmh_rsc_probe,
999 .driver = {
1000 .name = "rpmh",
1001 .of_match_table = rpmh_drv_match,
1002 .suppress_bind_attrs = true,
1003 },
1004};
1005
1006static int __init rpmh_driver_init(void)
1007{
1008 return platform_driver_register(&rpmh_driver);
1009}
1010arch_initcall(rpmh_driver_init);
1011
1012MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
1013MODULE_LICENSE("GPL v2");
1014