1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/device.h>
27#include <linux/mutex.h>
28
29#include <mach/dma.h>
30#include <mach/hardware.h>
31
32#include "ucb1x00.h"
33
34static DEFINE_MUTEX(ucb1x00_mutex);
35static LIST_HEAD(ucb1x00_drivers);
36static LIST_HEAD(ucb1x00_devices);
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
55{
56 unsigned long flags;
57
58 spin_lock_irqsave(&ucb->io_lock, flags);
59 ucb->io_dir |= out;
60 ucb->io_dir &= ~in;
61
62 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
63 spin_unlock_irqrestore(&ucb->io_lock, flags);
64}
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
83{
84 unsigned long flags;
85
86 spin_lock_irqsave(&ucb->io_lock, flags);
87 ucb->io_out |= set;
88 ucb->io_out &= ~clear;
89
90 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
91 spin_unlock_irqrestore(&ucb->io_lock, flags);
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
107{
108 return ucb1x00_reg_read(ucb, UCB_IO_DATA);
109}
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136void ucb1x00_adc_enable(struct ucb1x00 *ucb)
137{
138 down(&ucb->adc_sem);
139
140 ucb->adc_cr |= UCB_ADC_ENA;
141
142 ucb1x00_enable(ucb);
143 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
163{
164 unsigned int val;
165
166 if (sync)
167 adc_channel |= UCB_ADC_SYNC_ENA;
168
169 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
170 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
171
172 for (;;) {
173 val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
174 if (val & UCB_ADC_DAT_VAL)
175 break;
176
177 set_current_state(TASK_INTERRUPTIBLE);
178 schedule_timeout(1);
179 }
180
181 return UCB_ADC_DAT(val);
182}
183
184
185
186
187
188
189
190void ucb1x00_adc_disable(struct ucb1x00 *ucb)
191{
192 ucb->adc_cr &= ~UCB_ADC_ENA;
193 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
194 ucb1x00_disable(ucb);
195
196 up(&ucb->adc_sem);
197}
198
199
200
201
202
203
204
205
206
207static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
208{
209 struct ucb1x00 *ucb = devid;
210 struct ucb1x00_irq *irq;
211 unsigned int isr, i;
212
213 ucb1x00_enable(ucb);
214 isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
215 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
216 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
217
218 for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++)
219 if (isr & 1 && irq->fn)
220 irq->fn(i, irq->devid);
221 ucb1x00_disable(ucb);
222
223 return IRQ_HANDLED;
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
244{
245 struct ucb1x00_irq *irq;
246 int ret = -EINVAL;
247
248 if (idx < 16) {
249 irq = ucb->irq_handler + idx;
250 ret = -EBUSY;
251
252 spin_lock_irq(&ucb->lock);
253 if (irq->fn == NULL) {
254 irq->devid = devid;
255 irq->fn = fn;
256 ret = 0;
257 }
258 spin_unlock_irq(&ucb->lock);
259 }
260 return ret;
261}
262
263
264
265
266
267
268
269
270
271
272
273void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
274{
275 unsigned long flags;
276
277 if (idx < 16) {
278 spin_lock_irqsave(&ucb->lock, flags);
279
280 ucb1x00_enable(ucb);
281 if (edges & UCB_RISING) {
282 ucb->irq_ris_enbl |= 1 << idx;
283 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
284 }
285 if (edges & UCB_FALLING) {
286 ucb->irq_fal_enbl |= 1 << idx;
287 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
288 }
289 ucb1x00_disable(ucb);
290 spin_unlock_irqrestore(&ucb->lock, flags);
291 }
292}
293
294
295
296
297
298
299
300
301
302void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
303{
304 unsigned long flags;
305
306 if (idx < 16) {
307 spin_lock_irqsave(&ucb->lock, flags);
308
309 ucb1x00_enable(ucb);
310 if (edges & UCB_RISING) {
311 ucb->irq_ris_enbl &= ~(1 << idx);
312 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
313 }
314 if (edges & UCB_FALLING) {
315 ucb->irq_fal_enbl &= ~(1 << idx);
316 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
317 }
318 ucb1x00_disable(ucb);
319 spin_unlock_irqrestore(&ucb->lock, flags);
320 }
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
337{
338 struct ucb1x00_irq *irq;
339 int ret;
340
341 if (idx >= 16)
342 goto bad;
343
344 irq = ucb->irq_handler + idx;
345 ret = -ENOENT;
346
347 spin_lock_irq(&ucb->lock);
348 if (irq->devid == devid) {
349 ucb->irq_ris_enbl &= ~(1 << idx);
350 ucb->irq_fal_enbl &= ~(1 << idx);
351
352 ucb1x00_enable(ucb);
353 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
354 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
355 ucb1x00_disable(ucb);
356
357 irq->fn = NULL;
358 irq->devid = NULL;
359 ret = 0;
360 }
361 spin_unlock_irq(&ucb->lock);
362 return ret;
363
364bad:
365 printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx);
366 return -EINVAL;
367}
368
369static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
370{
371 struct ucb1x00_dev *dev;
372 int ret = -ENOMEM;
373
374 dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
375 if (dev) {
376 dev->ucb = ucb;
377 dev->drv = drv;
378
379 ret = drv->add(dev);
380
381 if (ret == 0) {
382 list_add(&dev->dev_node, &ucb->devs);
383 list_add(&dev->drv_node, &drv->devs);
384 } else {
385 kfree(dev);
386 }
387 }
388 return ret;
389}
390
391static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
392{
393 dev->drv->remove(dev);
394 list_del(&dev->dev_node);
395 list_del(&dev->drv_node);
396 kfree(dev);
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
419{
420 unsigned long mask;
421
422 mask = probe_irq_on();
423 if (!mask) {
424 probe_irq_off(mask);
425 return NO_IRQ;
426 }
427
428
429
430
431 ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
432 ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
433 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
434 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
435
436
437
438
439 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
440 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
441
442
443
444
445 while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
446 ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
447
448
449
450
451 ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
452 ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
453 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
454 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
455
456
457
458
459 return probe_irq_off(mask);
460}
461
462static void ucb1x00_release(struct device *dev)
463{
464 struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
465 kfree(ucb);
466}
467
468static struct class ucb1x00_class = {
469 .name = "ucb1x00",
470 .dev_release = ucb1x00_release,
471};
472
473static int ucb1x00_probe(struct mcp *mcp)
474{
475 struct ucb1x00 *ucb;
476 struct ucb1x00_driver *drv;
477 unsigned int id;
478 int ret = -ENODEV;
479
480 mcp_enable(mcp);
481 id = mcp_reg_read(mcp, UCB_ID);
482
483 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
484 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
485 goto err_disable;
486 }
487
488 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
489 ret = -ENOMEM;
490 if (!ucb)
491 goto err_disable;
492
493
494 ucb->dev.class = &ucb1x00_class;
495 ucb->dev.parent = &mcp->attached_device;
496 dev_set_name(&ucb->dev, "ucb1x00");
497
498 spin_lock_init(&ucb->lock);
499 spin_lock_init(&ucb->io_lock);
500 sema_init(&ucb->adc_sem, 1);
501
502 ucb->id = id;
503 ucb->mcp = mcp;
504 ucb->irq = ucb1x00_detect_irq(ucb);
505 if (ucb->irq == NO_IRQ) {
506 printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
507 ret = -ENODEV;
508 goto err_free;
509 }
510
511 ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
512 "UCB1x00", ucb);
513 if (ret) {
514 printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
515 ucb->irq, ret);
516 goto err_free;
517 }
518
519 mcp_set_drvdata(mcp, ucb);
520
521 ret = device_register(&ucb->dev);
522 if (ret)
523 goto err_irq;
524
525 INIT_LIST_HEAD(&ucb->devs);
526 mutex_lock(&ucb1x00_mutex);
527 list_add(&ucb->node, &ucb1x00_devices);
528 list_for_each_entry(drv, &ucb1x00_drivers, node) {
529 ucb1x00_add_dev(ucb, drv);
530 }
531 mutex_unlock(&ucb1x00_mutex);
532 goto out;
533
534 err_irq:
535 free_irq(ucb->irq, ucb);
536 err_free:
537 kfree(ucb);
538 err_disable:
539 mcp_disable(mcp);
540 out:
541 return ret;
542}
543
544static void ucb1x00_remove(struct mcp *mcp)
545{
546 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
547 struct list_head *l, *n;
548
549 mutex_lock(&ucb1x00_mutex);
550 list_del(&ucb->node);
551 list_for_each_safe(l, n, &ucb->devs) {
552 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
553 ucb1x00_remove_dev(dev);
554 }
555 mutex_unlock(&ucb1x00_mutex);
556
557 free_irq(ucb->irq, ucb);
558 device_unregister(&ucb->dev);
559}
560
561int ucb1x00_register_driver(struct ucb1x00_driver *drv)
562{
563 struct ucb1x00 *ucb;
564
565 INIT_LIST_HEAD(&drv->devs);
566 mutex_lock(&ucb1x00_mutex);
567 list_add(&drv->node, &ucb1x00_drivers);
568 list_for_each_entry(ucb, &ucb1x00_devices, node) {
569 ucb1x00_add_dev(ucb, drv);
570 }
571 mutex_unlock(&ucb1x00_mutex);
572 return 0;
573}
574
575void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
576{
577 struct list_head *n, *l;
578
579 mutex_lock(&ucb1x00_mutex);
580 list_del(&drv->node);
581 list_for_each_safe(l, n, &drv->devs) {
582 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
583 ucb1x00_remove_dev(dev);
584 }
585 mutex_unlock(&ucb1x00_mutex);
586}
587
588static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
589{
590 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
591 struct ucb1x00_dev *dev;
592
593 mutex_lock(&ucb1x00_mutex);
594 list_for_each_entry(dev, &ucb->devs, dev_node) {
595 if (dev->drv->suspend)
596 dev->drv->suspend(dev, state);
597 }
598 mutex_unlock(&ucb1x00_mutex);
599 return 0;
600}
601
602static int ucb1x00_resume(struct mcp *mcp)
603{
604 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
605 struct ucb1x00_dev *dev;
606
607 mutex_lock(&ucb1x00_mutex);
608 list_for_each_entry(dev, &ucb->devs, dev_node) {
609 if (dev->drv->resume)
610 dev->drv->resume(dev);
611 }
612 mutex_unlock(&ucb1x00_mutex);
613 return 0;
614}
615
616static struct mcp_driver ucb1x00_driver = {
617 .drv = {
618 .name = "ucb1x00",
619 },
620 .probe = ucb1x00_probe,
621 .remove = ucb1x00_remove,
622 .suspend = ucb1x00_suspend,
623 .resume = ucb1x00_resume,
624};
625
626static int __init ucb1x00_init(void)
627{
628 int ret = class_register(&ucb1x00_class);
629 if (ret == 0) {
630 ret = mcp_driver_register(&ucb1x00_driver);
631 if (ret)
632 class_unregister(&ucb1x00_class);
633 }
634 return ret;
635}
636
637static void __exit ucb1x00_exit(void)
638{
639 mcp_driver_unregister(&ucb1x00_driver);
640 class_unregister(&ucb1x00_class);
641}
642
643module_init(ucb1x00_init);
644module_exit(ucb1x00_exit);
645
646EXPORT_SYMBOL(ucb1x00_io_set_dir);
647EXPORT_SYMBOL(ucb1x00_io_write);
648EXPORT_SYMBOL(ucb1x00_io_read);
649
650EXPORT_SYMBOL(ucb1x00_adc_enable);
651EXPORT_SYMBOL(ucb1x00_adc_read);
652EXPORT_SYMBOL(ucb1x00_adc_disable);
653
654EXPORT_SYMBOL(ucb1x00_hook_irq);
655EXPORT_SYMBOL(ucb1x00_free_irq);
656EXPORT_SYMBOL(ucb1x00_enable_irq);
657EXPORT_SYMBOL(ucb1x00_disable_irq);
658
659EXPORT_SYMBOL(ucb1x00_register_driver);
660EXPORT_SYMBOL(ucb1x00_unregister_driver);
661
662MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
663MODULE_DESCRIPTION("UCB1x00 core driver");
664MODULE_LICENSE("GPL");
665