1
2
3
4
5
6
7
8
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_platform.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16#include <asm/mpc5121.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/dmaengine.h>
21#include <linux/dma-direction.h>
22#include <linux/dma-mapping.h>
23
24#define DRV_NAME "mpc512x_lpbfifo"
25
26struct cs_range {
27 u32 csnum;
28 u32 base;
29 u32 addr;
30 u32 size;
31};
32
33static struct lpbfifo_data {
34 spinlock_t lock;
35 phys_addr_t regs_phys;
36 resource_size_t regs_size;
37 struct mpc512x_lpbfifo __iomem *regs;
38 int irq;
39 struct cs_range *cs_ranges;
40 size_t cs_n;
41 struct dma_chan *chan;
42 struct mpc512x_lpbfifo_request *req;
43 dma_addr_t ram_bus_addr;
44 bool wait_lpbfifo_irq;
45 bool wait_lpbfifo_callback;
46} lpbfifo;
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
64{
65 struct device *dev = (struct device *)param;
66 struct mpc512x_lpbfifo_request *req = NULL;
67 unsigned long flags;
68 u32 status;
69
70 spin_lock_irqsave(&lpbfifo.lock, flags);
71
72 if (!lpbfifo.regs)
73 goto end;
74
75 req = lpbfifo.req;
76 if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
77 dev_err(dev, "bogus LPBFIFO IRQ\n");
78 goto end;
79 }
80
81 status = in_be32(&lpbfifo.regs->status);
82 if (status != MPC512X_SCLPC_SUCCESS) {
83 dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
84 out_be32(&lpbfifo.regs->enable,
85 MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
86 goto end;
87 }
88
89 out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
90
91 lpbfifo.wait_lpbfifo_irq = false;
92
93 if (lpbfifo.wait_lpbfifo_callback)
94 goto end;
95
96
97 lpbfifo.req = NULL;
98
99 spin_unlock_irqrestore(&lpbfifo.lock, flags);
100
101 if (req->callback)
102 req->callback(req);
103
104 return IRQ_HANDLED;
105
106 end:
107 spin_unlock_irqrestore(&lpbfifo.lock, flags);
108 return IRQ_HANDLED;
109}
110
111
112
113
114
115static void mpc512x_lpbfifo_callback(void *param)
116{
117 unsigned long flags;
118 struct mpc512x_lpbfifo_request *req = NULL;
119 enum dma_data_direction dir;
120
121 spin_lock_irqsave(&lpbfifo.lock, flags);
122
123 if (!lpbfifo.regs) {
124 spin_unlock_irqrestore(&lpbfifo.lock, flags);
125 return;
126 }
127
128 req = lpbfifo.req;
129 if (!req) {
130 pr_err("bogus LPBFIFO callback\n");
131 spin_unlock_irqrestore(&lpbfifo.lock, flags);
132 return;
133 }
134
135
136 if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
137 dir = DMA_TO_DEVICE;
138 else
139 dir = DMA_FROM_DEVICE;
140 dma_unmap_single(lpbfifo.chan->device->dev,
141 lpbfifo.ram_bus_addr, req->size, dir);
142
143 lpbfifo.wait_lpbfifo_callback = false;
144
145 if (!lpbfifo.wait_lpbfifo_irq) {
146
147 lpbfifo.req = NULL;
148
149 spin_unlock_irqrestore(&lpbfifo.lock, flags);
150
151 if (req->callback)
152 req->callback(req);
153 } else {
154 spin_unlock_irqrestore(&lpbfifo.lock, flags);
155 }
156}
157
158static int mpc512x_lpbfifo_kick(void)
159{
160 u32 bits;
161 bool no_incr = false;
162 u32 bpt = 32;
163 u32 cs = 0;
164 size_t i;
165 struct dma_device *dma_dev = NULL;
166 struct scatterlist sg;
167 enum dma_data_direction dir;
168 struct dma_slave_config dma_conf = {};
169 struct dma_async_tx_descriptor *dma_tx = NULL;
170 dma_cookie_t cookie;
171 int ret;
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
190 return -EINVAL;
191
192 if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
193 bpt = lpbfifo.req->portsize;
194 no_incr = true;
195 }
196
197 while (bpt > 1) {
198 if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
199 IS_ALIGNED(lpbfifo.req->size, bpt)) {
200 break;
201 }
202
203 if (no_incr)
204 return -EINVAL;
205
206 bpt >>= 1;
207 }
208 dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
209 dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
210
211 for (i = 0; i < lpbfifo.cs_n; i++) {
212 phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
213 phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
214 phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
215 phys_addr_t access_end = access_start + lpbfifo.req->size;
216
217 if (access_start >= cs_start && access_end <= cs_end) {
218 cs = lpbfifo.cs_ranges[i].csnum;
219 break;
220 }
221 }
222 if (i == lpbfifo.cs_n)
223 return -EFAULT;
224
225
226 dma_dev = lpbfifo.chan->device;
227
228 if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
229 dir = DMA_TO_DEVICE;
230 dma_conf.direction = DMA_MEM_TO_DEV;
231 dma_conf.dst_addr = lpbfifo.regs_phys +
232 offsetof(struct mpc512x_lpbfifo, data_word);
233 } else {
234 dir = DMA_FROM_DEVICE;
235 dma_conf.direction = DMA_DEV_TO_MEM;
236 dma_conf.src_addr = lpbfifo.regs_phys +
237 offsetof(struct mpc512x_lpbfifo, data_word);
238 }
239 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
240 dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
241
242
243 if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
244 ret = -EINVAL;
245 goto err_dma_prep;
246 }
247
248 sg_init_table(&sg, 1);
249
250 sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
251 lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
252 if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
253 return -EFAULT;
254
255 lpbfifo.ram_bus_addr = sg_dma_address(&sg);
256
257 sg_dma_len(&sg) = lpbfifo.req->size;
258
259 dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
260 1, dma_conf.direction, 0);
261 if (!dma_tx) {
262 ret = -ENOSPC;
263 goto err_dma_prep;
264 }
265 dma_tx->callback = mpc512x_lpbfifo_callback;
266 dma_tx->callback_param = NULL;
267
268
269 out_be32(&lpbfifo.regs->enable,
270 MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
271 out_be32(&lpbfifo.regs->enable, 0x0);
272
273
274
275
276
277
278
279
280 out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
281 out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
282
283
284
285
286
287 out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
288
289
290
291
292
293 bits = MPC512X_SCLPC_CS(cs);
294 if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
295 bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
296 if (no_incr)
297 bits |= MPC512X_SCLPC_DAI;
298 bits |= MPC512X_SCLPC_BPT(bpt);
299 out_be32(&lpbfifo.regs->ctrl, bits);
300
301
302 bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
303 if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
304 bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
305 else
306 lpbfifo.wait_lpbfifo_irq = false;
307
308 out_be32(&lpbfifo.regs->enable, bits);
309
310
311 bits = lpbfifo.req->size | MPC512X_SCLPC_START;
312 out_be32(&lpbfifo.regs->pkt_size, bits);
313
314
315 cookie = dma_tx->tx_submit(dma_tx);
316 if (dma_submit_error(cookie)) {
317 ret = -ENOSPC;
318 goto err_dma_submit;
319 }
320
321 return 0;
322
323 err_dma_submit:
324 out_be32(&lpbfifo.regs->enable,
325 MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
326 err_dma_prep:
327 dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
328 lpbfifo.req->size, dir);
329 return ret;
330}
331
332static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
333{
334 int ret = 0;
335
336 if (!lpbfifo.regs)
337 return -ENODEV;
338
339
340 if (lpbfifo.req)
341 return -EBUSY;
342
343 lpbfifo.wait_lpbfifo_irq = true;
344 lpbfifo.wait_lpbfifo_callback = true;
345 lpbfifo.req = req;
346
347 ret = mpc512x_lpbfifo_kick();
348 if (ret != 0)
349 lpbfifo.req = NULL;
350
351 return ret;
352}
353
354int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
355{
356 unsigned long flags;
357 int ret = 0;
358
359 spin_lock_irqsave(&lpbfifo.lock, flags);
360 ret = mpc512x_lpbfifo_submit_locked(req);
361 spin_unlock_irqrestore(&lpbfifo.lock, flags);
362
363 return ret;
364}
365EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
366
367
368
369
370
371
372static int get_cs_ranges(struct device *dev)
373{
374 int ret = -ENODEV;
375 struct device_node *lb_node;
376 const u32 *addr_cells_p;
377 const u32 *size_cells_p;
378 int proplen;
379 size_t i;
380
381 lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
382 if (!lb_node)
383 return ret;
384
385
386
387
388
389
390
391
392
393
394
395 addr_cells_p = of_get_property(lb_node, "#address-cells", NULL);
396 size_cells_p = of_get_property(lb_node, "#size-cells", NULL);
397 if (addr_cells_p == NULL || *addr_cells_p != 2 ||
398 size_cells_p == NULL || *size_cells_p != 1) {
399 goto end;
400 }
401
402 proplen = of_property_count_u32_elems(lb_node, "ranges");
403 if (proplen <= 0 || proplen % 4 != 0)
404 goto end;
405
406 lpbfifo.cs_n = proplen / 4;
407 lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
408 sizeof(struct cs_range), GFP_KERNEL);
409 if (!lpbfifo.cs_ranges)
410 goto end;
411
412 if (of_property_read_u32_array(lb_node, "ranges",
413 (u32 *)lpbfifo.cs_ranges, proplen) != 0) {
414 goto end;
415 }
416
417 for (i = 0; i < lpbfifo.cs_n; i++) {
418 if (lpbfifo.cs_ranges[i].base != 0)
419 goto end;
420 }
421
422 ret = 0;
423
424 end:
425 of_node_put(lb_node);
426 return ret;
427}
428
429static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
430{
431 struct resource r;
432 int ret = 0;
433
434 memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
435 spin_lock_init(&lpbfifo.lock);
436
437 lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
438 if (IS_ERR(lpbfifo.chan))
439 return PTR_ERR(lpbfifo.chan);
440
441 if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
442 dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
443 ret = -ENODEV;
444 goto err0;
445 }
446
447 lpbfifo.regs_phys = r.start;
448 lpbfifo.regs_size = resource_size(&r);
449
450 if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
451 lpbfifo.regs_size, DRV_NAME)) {
452 dev_err(&pdev->dev, "unable to request region\n");
453 ret = -EBUSY;
454 goto err0;
455 }
456
457 lpbfifo.regs = devm_ioremap(&pdev->dev,
458 lpbfifo.regs_phys, lpbfifo.regs_size);
459 if (!lpbfifo.regs) {
460 dev_err(&pdev->dev, "mapping registers failed\n");
461 ret = -ENOMEM;
462 goto err0;
463 }
464
465 out_be32(&lpbfifo.regs->enable,
466 MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
467
468 if (get_cs_ranges(&pdev->dev) != 0) {
469 dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
470 ret = -ENODEV;
471 goto err0;
472 }
473
474 lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
475 if (!lpbfifo.irq) {
476 dev_err(&pdev->dev, "mapping irq failed\n");
477 ret = -ENODEV;
478 goto err0;
479 }
480
481 if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
482 DRV_NAME, &pdev->dev) != 0) {
483 dev_err(&pdev->dev, "requesting irq failed\n");
484 ret = -ENODEV;
485 goto err1;
486 }
487
488 dev_info(&pdev->dev, "probe succeeded\n");
489 return 0;
490
491 err1:
492 irq_dispose_mapping(lpbfifo.irq);
493 err0:
494 dma_release_channel(lpbfifo.chan);
495 return ret;
496}
497
498static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
499{
500 unsigned long flags;
501 struct dma_device *dma_dev = lpbfifo.chan->device;
502 struct mpc512x_lpbfifo __iomem *regs = NULL;
503
504 spin_lock_irqsave(&lpbfifo.lock, flags);
505 regs = lpbfifo.regs;
506 lpbfifo.regs = NULL;
507 spin_unlock_irqrestore(&lpbfifo.lock, flags);
508
509 dma_dev->device_terminate_all(lpbfifo.chan);
510 out_be32(®s->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
511
512 free_irq(lpbfifo.irq, &pdev->dev);
513 irq_dispose_mapping(lpbfifo.irq);
514 dma_release_channel(lpbfifo.chan);
515
516 return 0;
517}
518
519static const struct of_device_id mpc512x_lpbfifo_match[] = {
520 { .compatible = "fsl,mpc512x-lpbfifo", },
521 {},
522};
523MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
524
525static struct platform_driver mpc512x_lpbfifo_driver = {
526 .probe = mpc512x_lpbfifo_probe,
527 .remove = mpc512x_lpbfifo_remove,
528 .driver = {
529 .name = DRV_NAME,
530 .of_match_table = mpc512x_lpbfifo_match,
531 },
532};
533
534module_platform_driver(mpc512x_lpbfifo_driver);
535
536MODULE_AUTHOR("Alexander Popov <alex.popov@linux.com>");
537MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
538MODULE_LICENSE("GPL v2");
539