1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/errno.h>
15#include <linux/timer.h>
16#include <linux/delay.h>
17#include <linux/list.h>
18#include <linux/workqueue.h>
19#include <linux/interrupt.h>
20#include <linux/platform_device.h>
21#include <linux/io.h>
22#include <linux/spi/spi.h>
23
24#define SPI_SH_TBR 0x00
25#define SPI_SH_RBR 0x00
26#define SPI_SH_CR1 0x08
27#define SPI_SH_CR2 0x10
28#define SPI_SH_CR3 0x18
29#define SPI_SH_CR4 0x20
30#define SPI_SH_CR5 0x28
31
32
33#define SPI_SH_TBE 0x80
34#define SPI_SH_TBF 0x40
35#define SPI_SH_RBE 0x20
36#define SPI_SH_RBF 0x10
37#define SPI_SH_PFONRD 0x08
38#define SPI_SH_SSDB 0x04
39#define SPI_SH_SSD 0x02
40#define SPI_SH_SSA 0x01
41
42
43#define SPI_SH_RSTF 0x80
44#define SPI_SH_LOOPBK 0x40
45#define SPI_SH_CPOL 0x20
46#define SPI_SH_CPHA 0x10
47#define SPI_SH_L1M0 0x08
48
49
50#define SPI_SH_MAX_BYTE 0xFF
51
52
53#define SPI_SH_TBEI 0x80
54#define SPI_SH_TBFI 0x40
55#define SPI_SH_RBEI 0x20
56#define SPI_SH_RBFI 0x10
57#define SPI_SH_WPABRT 0x04
58#define SPI_SH_SSS 0x01
59
60
61#define SPI_SH_P1L0 0x80
62#define SPI_SH_PP1L0 0x40
63#define SPI_SH_MUXI 0x20
64#define SPI_SH_MUXIRQ 0x10
65
66#define SPI_SH_FIFO_SIZE 32
67#define SPI_SH_SEND_TIMEOUT (3 * HZ)
68#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
69
70#undef DEBUG
71
72struct spi_sh_data {
73 void __iomem *addr;
74 int irq;
75 struct spi_master *master;
76 struct list_head queue;
77 struct work_struct ws;
78 unsigned long cr1;
79 wait_queue_head_t wait;
80 spinlock_t lock;
81 int width;
82};
83
84static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
85 unsigned long offset)
86{
87 if (ss->width == 8)
88 iowrite8(data, ss->addr + (offset >> 2));
89 else if (ss->width == 32)
90 iowrite32(data, ss->addr + offset);
91}
92
93static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
94{
95 if (ss->width == 8)
96 return ioread8(ss->addr + (offset >> 2));
97 else if (ss->width == 32)
98 return ioread32(ss->addr + offset);
99 else
100 return 0;
101}
102
103static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
104 unsigned long offset)
105{
106 unsigned long tmp;
107
108 tmp = spi_sh_read(ss, offset);
109 tmp |= val;
110 spi_sh_write(ss, tmp, offset);
111}
112
113static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
114 unsigned long offset)
115{
116 unsigned long tmp;
117
118 tmp = spi_sh_read(ss, offset);
119 tmp &= ~val;
120 spi_sh_write(ss, tmp, offset);
121}
122
123static void clear_fifo(struct spi_sh_data *ss)
124{
125 spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
126 spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
127}
128
129static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
130{
131 int timeout = 100000;
132
133 while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
134 udelay(10);
135 if (timeout-- < 0)
136 return -ETIMEDOUT;
137 }
138 return 0;
139}
140
141static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
142{
143 int timeout = 100000;
144
145 while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
146 udelay(10);
147 if (timeout-- < 0)
148 return -ETIMEDOUT;
149 }
150 return 0;
151}
152
153static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
154 struct spi_transfer *t)
155{
156 int i, retval = 0;
157 int remain = t->len;
158 int cur_len;
159 unsigned char *data;
160 long ret;
161
162 if (t->len)
163 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
164
165 data = (unsigned char *)t->tx_buf;
166 while (remain > 0) {
167 cur_len = min(SPI_SH_FIFO_SIZE, remain);
168 for (i = 0; i < cur_len &&
169 !(spi_sh_read(ss, SPI_SH_CR4) &
170 SPI_SH_WPABRT) &&
171 !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
172 i++)
173 spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
174
175 if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
176
177 spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
178 retval = -EIO;
179 break;
180 }
181
182 cur_len = i;
183
184 remain -= cur_len;
185 data += cur_len;
186
187 if (remain > 0) {
188 ss->cr1 &= ~SPI_SH_TBE;
189 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
190 ret = wait_event_interruptible_timeout(ss->wait,
191 ss->cr1 & SPI_SH_TBE,
192 SPI_SH_SEND_TIMEOUT);
193 if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
194 printk(KERN_ERR "%s: timeout\n", __func__);
195 return -ETIMEDOUT;
196 }
197 }
198 }
199
200 if (list_is_last(&t->transfer_list, &mesg->transfers)) {
201 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
202 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
203
204 ss->cr1 &= ~SPI_SH_TBE;
205 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
206 ret = wait_event_interruptible_timeout(ss->wait,
207 ss->cr1 & SPI_SH_TBE,
208 SPI_SH_SEND_TIMEOUT);
209 if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
210 printk(KERN_ERR "%s: timeout\n", __func__);
211 return -ETIMEDOUT;
212 }
213 }
214
215 return retval;
216}
217
218static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
219 struct spi_transfer *t)
220{
221 int i;
222 int remain = t->len;
223 int cur_len;
224 unsigned char *data;
225 long ret;
226
227 if (t->len > SPI_SH_MAX_BYTE)
228 spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
229 else
230 spi_sh_write(ss, t->len, SPI_SH_CR3);
231
232 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
233 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
234
235 spi_sh_wait_write_buffer_empty(ss);
236
237 data = (unsigned char *)t->rx_buf;
238 while (remain > 0) {
239 if (remain >= SPI_SH_FIFO_SIZE) {
240 ss->cr1 &= ~SPI_SH_RBF;
241 spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
242 ret = wait_event_interruptible_timeout(ss->wait,
243 ss->cr1 & SPI_SH_RBF,
244 SPI_SH_RECEIVE_TIMEOUT);
245 if (ret == 0 &&
246 spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
247 printk(KERN_ERR "%s: timeout\n", __func__);
248 return -ETIMEDOUT;
249 }
250 }
251
252 cur_len = min(SPI_SH_FIFO_SIZE, remain);
253 for (i = 0; i < cur_len; i++) {
254 if (spi_sh_wait_receive_buffer(ss))
255 break;
256 data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
257 }
258
259 remain -= cur_len;
260 data += cur_len;
261 }
262
263
264 if (t->len > SPI_SH_MAX_BYTE) {
265 clear_fifo(ss);
266 spi_sh_write(ss, 1, SPI_SH_CR3);
267 } else {
268 spi_sh_write(ss, 0, SPI_SH_CR3);
269 }
270
271 return 0;
272}
273
274static void spi_sh_work(struct work_struct *work)
275{
276 struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
277 struct spi_message *mesg;
278 struct spi_transfer *t;
279 unsigned long flags;
280 int ret;
281
282 pr_debug("%s: enter\n", __func__);
283
284 spin_lock_irqsave(&ss->lock, flags);
285 while (!list_empty(&ss->queue)) {
286 mesg = list_entry(ss->queue.next, struct spi_message, queue);
287 list_del_init(&mesg->queue);
288
289 spin_unlock_irqrestore(&ss->lock, flags);
290 list_for_each_entry(t, &mesg->transfers, transfer_list) {
291 pr_debug("tx_buf = %p, rx_buf = %p\n",
292 t->tx_buf, t->rx_buf);
293 pr_debug("len = %d, delay_usecs = %d\n",
294 t->len, t->delay_usecs);
295
296 if (t->tx_buf) {
297 ret = spi_sh_send(ss, mesg, t);
298 if (ret < 0)
299 goto error;
300 }
301 if (t->rx_buf) {
302 ret = spi_sh_receive(ss, mesg, t);
303 if (ret < 0)
304 goto error;
305 }
306 mesg->actual_length += t->len;
307 }
308 spin_lock_irqsave(&ss->lock, flags);
309
310 mesg->status = 0;
311 if (mesg->complete)
312 mesg->complete(mesg->context);
313 }
314
315 clear_fifo(ss);
316 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
317 udelay(100);
318
319 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
320 SPI_SH_CR1);
321
322 clear_fifo(ss);
323
324 spin_unlock_irqrestore(&ss->lock, flags);
325
326 return;
327
328 error:
329 mesg->status = ret;
330 if (mesg->complete)
331 mesg->complete(mesg->context);
332
333 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
334 SPI_SH_CR1);
335 clear_fifo(ss);
336
337}
338
339static int spi_sh_setup(struct spi_device *spi)
340{
341 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
342
343 pr_debug("%s: enter\n", __func__);
344
345 spi_sh_write(ss, 0xfe, SPI_SH_CR1);
346 spi_sh_write(ss, 0x00, SPI_SH_CR1);
347 spi_sh_write(ss, 0x00, SPI_SH_CR3);
348
349 clear_fifo(ss);
350
351
352 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
353 udelay(10);
354
355 return 0;
356}
357
358static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
359{
360 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
361 unsigned long flags;
362
363 pr_debug("%s: enter\n", __func__);
364 pr_debug("\tmode = %02x\n", spi->mode);
365
366 spin_lock_irqsave(&ss->lock, flags);
367
368 mesg->actual_length = 0;
369 mesg->status = -EINPROGRESS;
370
371 spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
372
373 list_add_tail(&mesg->queue, &ss->queue);
374 schedule_work(&ss->ws);
375
376 spin_unlock_irqrestore(&ss->lock, flags);
377
378 return 0;
379}
380
381static void spi_sh_cleanup(struct spi_device *spi)
382{
383 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
384
385 pr_debug("%s: enter\n", __func__);
386
387 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
388 SPI_SH_CR1);
389}
390
391static irqreturn_t spi_sh_irq(int irq, void *_ss)
392{
393 struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
394 unsigned long cr1;
395
396 cr1 = spi_sh_read(ss, SPI_SH_CR1);
397 if (cr1 & SPI_SH_TBE)
398 ss->cr1 |= SPI_SH_TBE;
399 if (cr1 & SPI_SH_TBF)
400 ss->cr1 |= SPI_SH_TBF;
401 if (cr1 & SPI_SH_RBE)
402 ss->cr1 |= SPI_SH_RBE;
403 if (cr1 & SPI_SH_RBF)
404 ss->cr1 |= SPI_SH_RBF;
405
406 if (ss->cr1) {
407 spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
408 wake_up(&ss->wait);
409 }
410
411 return IRQ_HANDLED;
412}
413
414static int spi_sh_remove(struct platform_device *pdev)
415{
416 struct spi_sh_data *ss = platform_get_drvdata(pdev);
417
418 spi_unregister_master(ss->master);
419 flush_work(&ss->ws);
420 free_irq(ss->irq, ss);
421
422 return 0;
423}
424
425static int spi_sh_probe(struct platform_device *pdev)
426{
427 struct resource *res;
428 struct spi_master *master;
429 struct spi_sh_data *ss;
430 int ret, irq;
431
432
433 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
434 if (unlikely(res == NULL)) {
435 dev_err(&pdev->dev, "invalid resource\n");
436 return -EINVAL;
437 }
438
439 irq = platform_get_irq(pdev, 0);
440 if (irq < 0)
441 return irq;
442
443 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
444 if (master == NULL) {
445 dev_err(&pdev->dev, "spi_alloc_master error.\n");
446 return -ENOMEM;
447 }
448
449 ss = spi_master_get_devdata(master);
450 platform_set_drvdata(pdev, ss);
451
452 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
453 case IORESOURCE_MEM_8BIT:
454 ss->width = 8;
455 break;
456 case IORESOURCE_MEM_32BIT:
457 ss->width = 32;
458 break;
459 default:
460 dev_err(&pdev->dev, "No support width\n");
461 ret = -ENODEV;
462 goto error1;
463 }
464 ss->irq = irq;
465 ss->master = master;
466 ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
467 if (ss->addr == NULL) {
468 dev_err(&pdev->dev, "ioremap error.\n");
469 ret = -ENOMEM;
470 goto error1;
471 }
472 INIT_LIST_HEAD(&ss->queue);
473 spin_lock_init(&ss->lock);
474 INIT_WORK(&ss->ws, spi_sh_work);
475 init_waitqueue_head(&ss->wait);
476
477 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
478 if (ret < 0) {
479 dev_err(&pdev->dev, "request_irq error\n");
480 goto error1;
481 }
482
483 master->num_chipselect = 2;
484 master->bus_num = pdev->id;
485 master->setup = spi_sh_setup;
486 master->transfer = spi_sh_transfer;
487 master->cleanup = spi_sh_cleanup;
488
489 ret = spi_register_master(master);
490 if (ret < 0) {
491 printk(KERN_ERR "spi_register_master error.\n");
492 goto error3;
493 }
494
495 return 0;
496
497 error3:
498 free_irq(irq, ss);
499 error1:
500 spi_master_put(master);
501
502 return ret;
503}
504
505static struct platform_driver spi_sh_driver = {
506 .probe = spi_sh_probe,
507 .remove = spi_sh_remove,
508 .driver = {
509 .name = "sh_spi",
510 },
511};
512module_platform_driver(spi_sh_driver);
513
514MODULE_DESCRIPTION("SH SPI bus driver");
515MODULE_LICENSE("GPL v2");
516MODULE_AUTHOR("Yoshihiro Shimoda");
517MODULE_ALIAS("platform:sh_spi");
518