1
2
3
4
5
6
7
8#include <linux/bitfield.h>
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/mailbox_controller.h>
12#include <linux/module.h>
13#include <linux/of_irq.h>
14#include <linux/platform_device.h>
15#include <linux/pm_wakeirq.h>
16
17#define IPCC_XCR 0x000
18#define XCR_RXOIE BIT(0)
19#define XCR_TXOIE BIT(16)
20
21#define IPCC_XMR 0x004
22#define IPCC_XSCR 0x008
23#define IPCC_XTOYSR 0x00c
24
25#define IPCC_PROC_OFFST 0x010
26
27#define IPCC_HWCFGR 0x3f0
28#define IPCFGR_CHAN_MASK GENMASK(7, 0)
29
30#define IPCC_VER 0x3f4
31#define VER_MINREV_MASK GENMASK(3, 0)
32#define VER_MAJREV_MASK GENMASK(7, 4)
33
34#define RX_BIT_MASK GENMASK(15, 0)
35#define RX_BIT_CHAN(chan) BIT(chan)
36#define TX_BIT_SHIFT 16
37#define TX_BIT_MASK GENMASK(31, 16)
38#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
39
40#define STM32_MAX_PROCS 2
41
42enum {
43 IPCC_IRQ_RX,
44 IPCC_IRQ_TX,
45 IPCC_IRQ_NUM,
46};
47
48struct stm32_ipcc {
49 struct mbox_controller controller;
50 void __iomem *reg_base;
51 void __iomem *reg_proc;
52 struct clk *clk;
53 int irqs[IPCC_IRQ_NUM];
54 int wkp;
55 u32 proc_id;
56 u32 n_chans;
57 u32 xcr;
58 u32 xmr;
59};
60
61static inline void stm32_ipcc_set_bits(void __iomem *reg, u32 mask)
62{
63 writel_relaxed(readl_relaxed(reg) | mask, reg);
64}
65
66static inline void stm32_ipcc_clr_bits(void __iomem *reg, u32 mask)
67{
68 writel_relaxed(readl_relaxed(reg) & ~mask, reg);
69}
70
71static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
72{
73 struct stm32_ipcc *ipcc = data;
74 struct device *dev = ipcc->controller.dev;
75 u32 status, mr, tosr, chan;
76 irqreturn_t ret = IRQ_NONE;
77 int proc_offset;
78
79
80 proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
81 tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
82 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
83
84
85 status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
86
87 for (chan = 0; chan < ipcc->n_chans; chan++) {
88 if (!(status & (1 << chan)))
89 continue;
90
91 dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
92
93 mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
94
95 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR,
96 RX_BIT_CHAN(chan));
97
98 ret = IRQ_HANDLED;
99 }
100
101 return ret;
102}
103
104static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
105{
106 struct stm32_ipcc *ipcc = data;
107 struct device *dev = ipcc->controller.dev;
108 u32 status, mr, tosr, chan;
109 irqreturn_t ret = IRQ_NONE;
110
111 tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
112 mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
113
114
115 status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
116
117 for (chan = 0; chan < ipcc->n_chans ; chan++) {
118 if (!(status & (1 << chan)))
119 continue;
120
121 dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
122
123
124 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
125 TX_BIT_CHAN(chan));
126
127 mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
128
129 ret = IRQ_HANDLED;
130 }
131
132 return ret;
133}
134
135static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
136{
137 unsigned int chan = (unsigned int)link->con_priv;
138 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
139 controller);
140
141 dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
142
143
144 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, TX_BIT_CHAN(chan));
145
146
147 stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan));
148
149 return 0;
150}
151
152static int stm32_ipcc_startup(struct mbox_chan *link)
153{
154 unsigned int chan = (unsigned int)link->con_priv;
155 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
156 controller);
157 int ret;
158
159 ret = clk_prepare_enable(ipcc->clk);
160 if (ret) {
161 dev_err(ipcc->controller.dev, "can not enable the clock\n");
162 return ret;
163 }
164
165
166 stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan));
167
168 return 0;
169}
170
171static void stm32_ipcc_shutdown(struct mbox_chan *link)
172{
173 unsigned int chan = (unsigned int)link->con_priv;
174 struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
175 controller);
176
177
178 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
179 RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
180
181 clk_disable_unprepare(ipcc->clk);
182}
183
184static const struct mbox_chan_ops stm32_ipcc_ops = {
185 .send_data = stm32_ipcc_send_data,
186 .startup = stm32_ipcc_startup,
187 .shutdown = stm32_ipcc_shutdown,
188};
189
190static int stm32_ipcc_probe(struct platform_device *pdev)
191{
192 struct device *dev = &pdev->dev;
193 struct device_node *np = dev->of_node;
194 struct stm32_ipcc *ipcc;
195 struct resource *res;
196 unsigned int i;
197 int ret;
198 u32 ip_ver;
199 static const char * const irq_name[] = {"rx", "tx"};
200 irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
201
202 if (!np) {
203 dev_err(dev, "No DT found\n");
204 return -ENODEV;
205 }
206
207 ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
208 if (!ipcc)
209 return -ENOMEM;
210
211
212 if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
213 dev_err(dev, "Missing st,proc-id\n");
214 return -ENODEV;
215 }
216
217 if (ipcc->proc_id >= STM32_MAX_PROCS) {
218 dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
219 return -EINVAL;
220 }
221
222
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
224 ipcc->reg_base = devm_ioremap_resource(dev, res);
225 if (IS_ERR(ipcc->reg_base))
226 return PTR_ERR(ipcc->reg_base);
227
228 ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
229
230
231 ipcc->clk = devm_clk_get(dev, NULL);
232 if (IS_ERR(ipcc->clk))
233 return PTR_ERR(ipcc->clk);
234
235 ret = clk_prepare_enable(ipcc->clk);
236 if (ret) {
237 dev_err(dev, "can not enable the clock\n");
238 return ret;
239 }
240
241
242 for (i = 0; i < IPCC_IRQ_NUM; i++) {
243 ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
244 if (ipcc->irqs[i] < 0) {
245 dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
246 ret = ipcc->irqs[i];
247 goto err_clk;
248 }
249
250 ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
251 irq_thread[i], IRQF_ONESHOT,
252 dev_name(dev), ipcc);
253 if (ret) {
254 dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
255 goto err_clk;
256 }
257 }
258
259
260 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
261 RX_BIT_MASK | TX_BIT_MASK);
262 stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE);
263
264
265 if (of_property_read_bool(np, "wakeup-source")) {
266 ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
267 if (ipcc->wkp < 0) {
268 dev_err(dev, "could not get wakeup IRQ\n");
269 ret = ipcc->wkp;
270 goto err_clk;
271 }
272
273 device_init_wakeup(dev, true);
274 ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
275 if (ret) {
276 dev_err(dev, "Failed to set wake up irq\n");
277 goto err_init_wkp;
278 }
279 } else {
280 device_init_wakeup(dev, false);
281 }
282
283
284 ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
285 ipcc->n_chans &= IPCFGR_CHAN_MASK;
286
287 ipcc->controller.dev = dev;
288 ipcc->controller.txdone_irq = true;
289 ipcc->controller.ops = &stm32_ipcc_ops;
290 ipcc->controller.num_chans = ipcc->n_chans;
291 ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
292 sizeof(*ipcc->controller.chans),
293 GFP_KERNEL);
294 if (!ipcc->controller.chans) {
295 ret = -ENOMEM;
296 goto err_irq_wkp;
297 }
298
299 for (i = 0; i < ipcc->controller.num_chans; i++)
300 ipcc->controller.chans[i].con_priv = (void *)i;
301
302 ret = mbox_controller_register(&ipcc->controller);
303 if (ret)
304 goto err_irq_wkp;
305
306 platform_set_drvdata(pdev, ipcc);
307
308 ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
309
310 dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
311 FIELD_GET(VER_MAJREV_MASK, ip_ver),
312 FIELD_GET(VER_MINREV_MASK, ip_ver),
313 ipcc->controller.num_chans, ipcc->proc_id);
314
315 clk_disable_unprepare(ipcc->clk);
316 return 0;
317
318err_irq_wkp:
319 if (ipcc->wkp)
320 dev_pm_clear_wake_irq(dev);
321err_init_wkp:
322 device_init_wakeup(dev, false);
323err_clk:
324 clk_disable_unprepare(ipcc->clk);
325 return ret;
326}
327
328static int stm32_ipcc_remove(struct platform_device *pdev)
329{
330 struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
331
332 mbox_controller_unregister(&ipcc->controller);
333
334 if (ipcc->wkp)
335 dev_pm_clear_wake_irq(&pdev->dev);
336
337 device_init_wakeup(&pdev->dev, false);
338
339 return 0;
340}
341
342#ifdef CONFIG_PM_SLEEP
343static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
344{
345 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
346 unsigned int i;
347
348 if (device_may_wakeup(dev))
349 for (i = 0; i < IPCC_IRQ_NUM; i++)
350 irq_set_irq_wake(ipcc->irqs[i], enable);
351}
352
353static int stm32_ipcc_suspend(struct device *dev)
354{
355 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
356
357 ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
358 ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
359
360 stm32_ipcc_set_irq_wake(dev, true);
361
362 return 0;
363}
364
365static int stm32_ipcc_resume(struct device *dev)
366{
367 struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
368
369 stm32_ipcc_set_irq_wake(dev, false);
370
371 writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
372 writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
373
374 return 0;
375}
376#endif
377
378static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
379 stm32_ipcc_suspend, stm32_ipcc_resume);
380
381static const struct of_device_id stm32_ipcc_of_match[] = {
382 { .compatible = "st,stm32mp1-ipcc" },
383 {},
384};
385MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
386
387static struct platform_driver stm32_ipcc_driver = {
388 .driver = {
389 .name = "stm32-ipcc",
390 .pm = &stm32_ipcc_pm_ops,
391 .of_match_table = stm32_ipcc_of_match,
392 },
393 .probe = stm32_ipcc_probe,
394 .remove = stm32_ipcc_remove,
395};
396
397module_platform_driver(stm32_ipcc_driver);
398
399MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
400MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
401MODULE_DESCRIPTION("STM32 IPCC driver");
402MODULE_LICENSE("GPL v2");
403