1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/interrupt.h>
16#include <linux/mfd/syscon.h>
17#include <linux/module.h>
18#include <linux/of_irq.h>
19#include <linux/platform_device.h>
20#include <linux/spinlock.h>
21#include <linux/regmap.h>
22#include <linux/soc/qcom/smem.h>
23#include <linux/soc/qcom/smem_state.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#define SMEM_SMSM_SHARED_STATE 85
57#define SMEM_SMSM_CPU_INTR_MASK 333
58#define SMEM_SMSM_SIZE_INFO 419
59
60
61
62
63#define SMSM_DEFAULT_NUM_ENTRIES 8
64#define SMSM_DEFAULT_NUM_HOSTS 3
65
66struct smsm_entry;
67struct smsm_host;
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83struct qcom_smsm {
84 struct device *dev;
85
86 u32 local_host;
87
88 u32 num_hosts;
89 u32 num_entries;
90
91 u32 *local_state;
92 u32 *subscription;
93 struct qcom_smem_state *state;
94
95 spinlock_t lock;
96
97 struct smsm_entry *entries;
98 struct smsm_host *hosts;
99};
100
101
102
103
104
105
106
107
108
109
110
111
112
113struct smsm_entry {
114 struct qcom_smsm *smsm;
115
116 struct irq_domain *domain;
117 DECLARE_BITMAP(irq_enabled, 32);
118 DECLARE_BITMAP(irq_rising, 32);
119 DECLARE_BITMAP(irq_falling, 32);
120 u32 last_value;
121
122 u32 *remote_state;
123 u32 *subscription;
124};
125
126
127
128
129
130
131
132struct smsm_host {
133 struct regmap *ipc_regmap;
134 int ipc_offset;
135 int ipc_bit;
136};
137
138
139
140
141
142
143
144
145
146
147static int smsm_update_bits(void *data, u32 mask, u32 value)
148{
149 struct qcom_smsm *smsm = data;
150 struct smsm_host *hostp;
151 unsigned long flags;
152 u32 changes;
153 u32 host;
154 u32 orig;
155 u32 val;
156
157 spin_lock_irqsave(&smsm->lock, flags);
158
159
160 val = orig = readl(smsm->local_state);
161 val &= ~mask;
162 val |= value;
163
164
165 changes = val ^ orig;
166 if (!changes) {
167 spin_unlock_irqrestore(&smsm->lock, flags);
168 goto done;
169 }
170
171
172 writel(val, smsm->local_state);
173 spin_unlock_irqrestore(&smsm->lock, flags);
174
175
176 wmb();
177
178
179 for (host = 0; host < smsm->num_hosts; host++) {
180 hostp = &smsm->hosts[host];
181
182 val = readl(smsm->subscription + host);
183 if (val & changes && hostp->ipc_regmap) {
184 regmap_write(hostp->ipc_regmap,
185 hostp->ipc_offset,
186 BIT(hostp->ipc_bit));
187 }
188 }
189
190done:
191 return 0;
192}
193
194static const struct qcom_smem_state_ops smsm_state_ops = {
195 .update_bits = smsm_update_bits,
196};
197
198
199
200
201
202
203
204
205
206static irqreturn_t smsm_intr(int irq, void *data)
207{
208 struct smsm_entry *entry = data;
209 unsigned i;
210 int irq_pin;
211 u32 changed;
212 u32 val;
213
214 val = readl(entry->remote_state);
215 changed = val ^ entry->last_value;
216 entry->last_value = val;
217
218 for_each_set_bit(i, entry->irq_enabled, 32) {
219 if (!(changed & BIT(i)))
220 continue;
221
222 if (val & BIT(i)) {
223 if (test_bit(i, entry->irq_rising)) {
224 irq_pin = irq_find_mapping(entry->domain, i);
225 handle_nested_irq(irq_pin);
226 }
227 } else {
228 if (test_bit(i, entry->irq_falling)) {
229 irq_pin = irq_find_mapping(entry->domain, i);
230 handle_nested_irq(irq_pin);
231 }
232 }
233 }
234
235 return IRQ_HANDLED;
236}
237
238
239
240
241
242
243
244
245static void smsm_mask_irq(struct irq_data *irqd)
246{
247 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
248 irq_hw_number_t irq = irqd_to_hwirq(irqd);
249 struct qcom_smsm *smsm = entry->smsm;
250 u32 val;
251
252 if (entry->subscription) {
253 val = readl(entry->subscription + smsm->local_host);
254 val &= ~BIT(irq);
255 writel(val, entry->subscription + smsm->local_host);
256 }
257
258 clear_bit(irq, entry->irq_enabled);
259}
260
261
262
263
264
265
266
267
268
269
270static void smsm_unmask_irq(struct irq_data *irqd)
271{
272 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
273 irq_hw_number_t irq = irqd_to_hwirq(irqd);
274 struct qcom_smsm *smsm = entry->smsm;
275 u32 val;
276
277 set_bit(irq, entry->irq_enabled);
278
279 if (entry->subscription) {
280 val = readl(entry->subscription + smsm->local_host);
281 val |= BIT(irq);
282 writel(val, entry->subscription + smsm->local_host);
283 }
284}
285
286
287
288
289
290
291static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
292{
293 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
294 irq_hw_number_t irq = irqd_to_hwirq(irqd);
295
296 if (!(type & IRQ_TYPE_EDGE_BOTH))
297 return -EINVAL;
298
299 if (type & IRQ_TYPE_EDGE_RISING)
300 set_bit(irq, entry->irq_rising);
301 else
302 clear_bit(irq, entry->irq_rising);
303
304 if (type & IRQ_TYPE_EDGE_FALLING)
305 set_bit(irq, entry->irq_falling);
306 else
307 clear_bit(irq, entry->irq_falling);
308
309 return 0;
310}
311
312static struct irq_chip smsm_irq_chip = {
313 .name = "smsm",
314 .irq_mask = smsm_mask_irq,
315 .irq_unmask = smsm_unmask_irq,
316 .irq_set_type = smsm_set_irq_type,
317};
318
319
320
321
322
323
324
325static int smsm_irq_map(struct irq_domain *d,
326 unsigned int irq,
327 irq_hw_number_t hw)
328{
329 struct smsm_entry *entry = d->host_data;
330
331 irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
332 irq_set_chip_data(irq, entry);
333 irq_set_nested_thread(irq, 1);
334
335 return 0;
336}
337
338static const struct irq_domain_ops smsm_irq_ops = {
339 .map = smsm_irq_map,
340 .xlate = irq_domain_xlate_twocell,
341};
342
343
344
345
346
347
348
349
350
351static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
352{
353 struct device_node *syscon;
354 struct device_node *node = smsm->dev->of_node;
355 struct smsm_host *host = &smsm->hosts[host_id];
356 char key[16];
357 int ret;
358
359 snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
360 syscon = of_parse_phandle(node, key, 0);
361 if (!syscon)
362 return 0;
363
364 host->ipc_regmap = syscon_node_to_regmap(syscon);
365 if (IS_ERR(host->ipc_regmap))
366 return PTR_ERR(host->ipc_regmap);
367
368 ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
369 if (ret < 0) {
370 dev_err(smsm->dev, "no offset in %s\n", key);
371 return -EINVAL;
372 }
373
374 ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
375 if (ret < 0) {
376 dev_err(smsm->dev, "no bit in %s\n", key);
377 return -EINVAL;
378 }
379
380 return 0;
381}
382
383
384
385
386
387
388
389static int smsm_inbound_entry(struct qcom_smsm *smsm,
390 struct smsm_entry *entry,
391 struct device_node *node)
392{
393 int ret;
394 int irq;
395
396 irq = irq_of_parse_and_map(node, 0);
397 if (!irq) {
398 dev_err(smsm->dev, "failed to parse smsm interrupt\n");
399 return -EINVAL;
400 }
401
402 ret = devm_request_threaded_irq(smsm->dev, irq,
403 NULL, smsm_intr,
404 IRQF_ONESHOT,
405 "smsm", (void *)entry);
406 if (ret) {
407 dev_err(smsm->dev, "failed to request interrupt\n");
408 return ret;
409 }
410
411 entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
412 if (!entry->domain) {
413 dev_err(smsm->dev, "failed to add irq_domain\n");
414 return -ENOMEM;
415 }
416
417 return 0;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431static int smsm_get_size_info(struct qcom_smsm *smsm)
432{
433 size_t size;
434 struct {
435 u32 num_hosts;
436 u32 num_entries;
437 u32 reserved0;
438 u32 reserved1;
439 } *info;
440
441 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
442 if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
443 if (PTR_ERR(info) != -EPROBE_DEFER)
444 dev_err(smsm->dev, "unable to retrieve smsm size info\n");
445 return PTR_ERR(info);
446 } else if (IS_ERR(info) || size != sizeof(*info)) {
447 dev_warn(smsm->dev, "no smsm size info, using defaults\n");
448 smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
449 smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
450 return 0;
451 }
452
453 smsm->num_entries = info->num_entries;
454 smsm->num_hosts = info->num_hosts;
455
456 dev_dbg(smsm->dev,
457 "found custom size of smsm: %d entries %d hosts\n",
458 smsm->num_entries, smsm->num_hosts);
459
460 return 0;
461}
462
463static int qcom_smsm_probe(struct platform_device *pdev)
464{
465 struct device_node *local_node;
466 struct device_node *node;
467 struct smsm_entry *entry;
468 struct qcom_smsm *smsm;
469 u32 *intr_mask;
470 size_t size;
471 u32 *states;
472 u32 id;
473 int ret;
474
475 smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
476 if (!smsm)
477 return -ENOMEM;
478 smsm->dev = &pdev->dev;
479 spin_lock_init(&smsm->lock);
480
481 ret = smsm_get_size_info(smsm);
482 if (ret)
483 return ret;
484
485 smsm->entries = devm_kcalloc(&pdev->dev,
486 smsm->num_entries,
487 sizeof(struct smsm_entry),
488 GFP_KERNEL);
489 if (!smsm->entries)
490 return -ENOMEM;
491
492 smsm->hosts = devm_kcalloc(&pdev->dev,
493 smsm->num_hosts,
494 sizeof(struct smsm_host),
495 GFP_KERNEL);
496 if (!smsm->hosts)
497 return -ENOMEM;
498
499 for_each_child_of_node(pdev->dev.of_node, local_node) {
500 if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
501 break;
502 }
503 if (!local_node) {
504 dev_err(&pdev->dev, "no state entry\n");
505 return -EINVAL;
506 }
507
508 of_property_read_u32(pdev->dev.of_node,
509 "qcom,local-host",
510 &smsm->local_host);
511
512
513 for (id = 0; id < smsm->num_hosts; id++) {
514 ret = smsm_parse_ipc(smsm, id);
515 if (ret < 0)
516 return ret;
517 }
518
519
520 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
521 smsm->num_entries * sizeof(u32));
522 if (ret < 0 && ret != -EEXIST) {
523 dev_err(&pdev->dev, "unable to allocate shared state entry\n");
524 return ret;
525 }
526
527 states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
528 if (IS_ERR(states)) {
529 dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
530 return PTR_ERR(states);
531 }
532
533
534 size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
535 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
536 if (ret < 0 && ret != -EEXIST) {
537 dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
538 return ret;
539 }
540
541 intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
542 if (IS_ERR(intr_mask)) {
543 dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
544 return PTR_ERR(intr_mask);
545 }
546
547
548 smsm->local_state = states + smsm->local_host;
549 smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
550
551
552 smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
553 if (IS_ERR(smsm->state)) {
554 dev_err(smsm->dev, "failed to register qcom_smem_state\n");
555 return PTR_ERR(smsm->state);
556 }
557
558
559 for_each_available_child_of_node(pdev->dev.of_node, node) {
560 if (!of_property_read_bool(node, "interrupt-controller"))
561 continue;
562
563 ret = of_property_read_u32(node, "reg", &id);
564 if (ret || id >= smsm->num_entries) {
565 dev_err(&pdev->dev, "invalid reg of entry\n");
566 if (!ret)
567 ret = -EINVAL;
568 goto unwind_interfaces;
569 }
570 entry = &smsm->entries[id];
571
572 entry->smsm = smsm;
573 entry->remote_state = states + id;
574
575
576 entry->subscription = intr_mask + id * smsm->num_hosts;
577 writel(0, entry->subscription + smsm->local_host);
578
579 ret = smsm_inbound_entry(smsm, entry, node);
580 if (ret < 0)
581 goto unwind_interfaces;
582 }
583
584 platform_set_drvdata(pdev, smsm);
585
586 return 0;
587
588unwind_interfaces:
589 for (id = 0; id < smsm->num_entries; id++)
590 if (smsm->entries[id].domain)
591 irq_domain_remove(smsm->entries[id].domain);
592
593 qcom_smem_state_unregister(smsm->state);
594
595 return ret;
596}
597
598static int qcom_smsm_remove(struct platform_device *pdev)
599{
600 struct qcom_smsm *smsm = platform_get_drvdata(pdev);
601 unsigned id;
602
603 for (id = 0; id < smsm->num_entries; id++)
604 if (smsm->entries[id].domain)
605 irq_domain_remove(smsm->entries[id].domain);
606
607 qcom_smem_state_unregister(smsm->state);
608
609 return 0;
610}
611
612static const struct of_device_id qcom_smsm_of_match[] = {
613 { .compatible = "qcom,smsm" },
614 {}
615};
616MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
617
618static struct platform_driver qcom_smsm_driver = {
619 .probe = qcom_smsm_probe,
620 .remove = qcom_smsm_remove,
621 .driver = {
622 .name = "qcom-smsm",
623 .of_match_table = qcom_smsm_of_match,
624 },
625};
626module_platform_driver(qcom_smsm_driver);
627
628MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
629MODULE_LICENSE("GPL v2");
630