1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/cache.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/mutex.h>
25#include <linux/of_device.h>
26#include <linux/of_irq.h>
27#include <linux/clk/clk-conf.h>
28#include <linux/slab.h>
29#include <linux/mod_devicetable.h>
30#include <linux/spi/spi.h>
31#include <linux/of_gpio.h>
32#include <linux/pm_runtime.h>
33#include <linux/pm_domain.h>
34#include <linux/export.h>
35#include <linux/sched/rt.h>
36#include <linux/delay.h>
37#include <linux/kthread.h>
38#include <linux/ioport.h>
39#include <linux/acpi.h>
40#include <linux/highmem.h>
41
42#define CREATE_TRACE_POINTS
43#include <trace/events/spi.h>
44
45static void spidev_release(struct device *dev)
46{
47 struct spi_device *spi = to_spi_device(dev);
48
49
50 if (spi->master->cleanup)
51 spi->master->cleanup(spi);
52
53 spi_master_put(spi->master);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71#define SPI_STATISTICS_ATTRS(field, file) \
72static ssize_t spi_master_##field##_show(struct device *dev, \
73 struct device_attribute *attr, \
74 char *buf) \
75{ \
76 struct spi_master *master = container_of(dev, \
77 struct spi_master, dev); \
78 return spi_statistics_##field##_show(&master->statistics, buf); \
79} \
80static struct device_attribute dev_attr_spi_master_##field = { \
81 .attr = { .name = file, .mode = S_IRUGO }, \
82 .show = spi_master_##field##_show, \
83}; \
84static ssize_t spi_device_##field##_show(struct device *dev, \
85 struct device_attribute *attr, \
86 char *buf) \
87{ \
88 struct spi_device *spi = to_spi_device(dev); \
89 return spi_statistics_##field##_show(&spi->statistics, buf); \
90} \
91static struct device_attribute dev_attr_spi_device_##field = { \
92 .attr = { .name = file, .mode = S_IRUGO }, \
93 .show = spi_device_##field##_show, \
94}
95
96#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
97static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98 char *buf) \
99{ \
100 unsigned long flags; \
101 ssize_t len; \
102 spin_lock_irqsave(&stat->lock, flags); \
103 len = sprintf(buf, format_string, stat->field); \
104 spin_unlock_irqrestore(&stat->lock, flags); \
105 return len; \
106} \
107SPI_STATISTICS_ATTRS(name, file)
108
109#define SPI_STATISTICS_SHOW(field, format_string) \
110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
111 field, format_string)
112
113SPI_STATISTICS_SHOW(messages, "%lu");
114SPI_STATISTICS_SHOW(transfers, "%lu");
115SPI_STATISTICS_SHOW(errors, "%lu");
116SPI_STATISTICS_SHOW(timedout, "%lu");
117
118SPI_STATISTICS_SHOW(spi_sync, "%lu");
119SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120SPI_STATISTICS_SHOW(spi_async, "%lu");
121
122SPI_STATISTICS_SHOW(bytes, "%llu");
123SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125
126#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
127 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
128 "transfer_bytes_histo_" number, \
129 transfer_bytes_histo[index], "%lu")
130SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
131SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
132SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
133SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
134SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
135SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
136SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
137SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
138SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
139SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
140SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
141SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
142SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
143SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
146SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147
148SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
149
150static struct attribute *spi_dev_attrs[] = {
151 &dev_attr_modalias.attr,
152 NULL,
153};
154
155static const struct attribute_group spi_dev_group = {
156 .attrs = spi_dev_attrs,
157};
158
159static struct attribute *spi_device_statistics_attrs[] = {
160 &dev_attr_spi_device_messages.attr,
161 &dev_attr_spi_device_transfers.attr,
162 &dev_attr_spi_device_errors.attr,
163 &dev_attr_spi_device_timedout.attr,
164 &dev_attr_spi_device_spi_sync.attr,
165 &dev_attr_spi_device_spi_sync_immediate.attr,
166 &dev_attr_spi_device_spi_async.attr,
167 &dev_attr_spi_device_bytes.attr,
168 &dev_attr_spi_device_bytes_rx.attr,
169 &dev_attr_spi_device_bytes_tx.attr,
170 &dev_attr_spi_device_transfer_bytes_histo0.attr,
171 &dev_attr_spi_device_transfer_bytes_histo1.attr,
172 &dev_attr_spi_device_transfer_bytes_histo2.attr,
173 &dev_attr_spi_device_transfer_bytes_histo3.attr,
174 &dev_attr_spi_device_transfer_bytes_histo4.attr,
175 &dev_attr_spi_device_transfer_bytes_histo5.attr,
176 &dev_attr_spi_device_transfer_bytes_histo6.attr,
177 &dev_attr_spi_device_transfer_bytes_histo7.attr,
178 &dev_attr_spi_device_transfer_bytes_histo8.attr,
179 &dev_attr_spi_device_transfer_bytes_histo9.attr,
180 &dev_attr_spi_device_transfer_bytes_histo10.attr,
181 &dev_attr_spi_device_transfer_bytes_histo11.attr,
182 &dev_attr_spi_device_transfer_bytes_histo12.attr,
183 &dev_attr_spi_device_transfer_bytes_histo13.attr,
184 &dev_attr_spi_device_transfer_bytes_histo14.attr,
185 &dev_attr_spi_device_transfer_bytes_histo15.attr,
186 &dev_attr_spi_device_transfer_bytes_histo16.attr,
187 &dev_attr_spi_device_transfers_split_maxsize.attr,
188 NULL,
189};
190
191static const struct attribute_group spi_device_statistics_group = {
192 .name = "statistics",
193 .attrs = spi_device_statistics_attrs,
194};
195
196static const struct attribute_group *spi_dev_groups[] = {
197 &spi_dev_group,
198 &spi_device_statistics_group,
199 NULL,
200};
201
202static struct attribute *spi_master_statistics_attrs[] = {
203 &dev_attr_spi_master_messages.attr,
204 &dev_attr_spi_master_transfers.attr,
205 &dev_attr_spi_master_errors.attr,
206 &dev_attr_spi_master_timedout.attr,
207 &dev_attr_spi_master_spi_sync.attr,
208 &dev_attr_spi_master_spi_sync_immediate.attr,
209 &dev_attr_spi_master_spi_async.attr,
210 &dev_attr_spi_master_bytes.attr,
211 &dev_attr_spi_master_bytes_rx.attr,
212 &dev_attr_spi_master_bytes_tx.attr,
213 &dev_attr_spi_master_transfer_bytes_histo0.attr,
214 &dev_attr_spi_master_transfer_bytes_histo1.attr,
215 &dev_attr_spi_master_transfer_bytes_histo2.attr,
216 &dev_attr_spi_master_transfer_bytes_histo3.attr,
217 &dev_attr_spi_master_transfer_bytes_histo4.attr,
218 &dev_attr_spi_master_transfer_bytes_histo5.attr,
219 &dev_attr_spi_master_transfer_bytes_histo6.attr,
220 &dev_attr_spi_master_transfer_bytes_histo7.attr,
221 &dev_attr_spi_master_transfer_bytes_histo8.attr,
222 &dev_attr_spi_master_transfer_bytes_histo9.attr,
223 &dev_attr_spi_master_transfer_bytes_histo10.attr,
224 &dev_attr_spi_master_transfer_bytes_histo11.attr,
225 &dev_attr_spi_master_transfer_bytes_histo12.attr,
226 &dev_attr_spi_master_transfer_bytes_histo13.attr,
227 &dev_attr_spi_master_transfer_bytes_histo14.attr,
228 &dev_attr_spi_master_transfer_bytes_histo15.attr,
229 &dev_attr_spi_master_transfer_bytes_histo16.attr,
230 &dev_attr_spi_master_transfers_split_maxsize.attr,
231 NULL,
232};
233
234static const struct attribute_group spi_master_statistics_group = {
235 .name = "statistics",
236 .attrs = spi_master_statistics_attrs,
237};
238
239static const struct attribute_group *spi_master_groups[] = {
240 &spi_master_statistics_group,
241 NULL,
242};
243
244void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
245 struct spi_transfer *xfer,
246 struct spi_master *master)
247{
248 unsigned long flags;
249 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
250
251 if (l2len < 0)
252 l2len = 0;
253
254 spin_lock_irqsave(&stats->lock, flags);
255
256 stats->transfers++;
257 stats->transfer_bytes_histo[l2len]++;
258
259 stats->bytes += xfer->len;
260 if ((xfer->tx_buf) &&
261 (xfer->tx_buf != master->dummy_tx))
262 stats->bytes_tx += xfer->len;
263 if ((xfer->rx_buf) &&
264 (xfer->rx_buf != master->dummy_rx))
265 stats->bytes_rx += xfer->len;
266
267 spin_unlock_irqrestore(&stats->lock, flags);
268}
269EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
270
271
272
273
274
275static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
276 const struct spi_device *sdev)
277{
278 while (id->name[0]) {
279 if (!strcmp(sdev->modalias, id->name))
280 return id;
281 id++;
282 }
283 return NULL;
284}
285
286const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
287{
288 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
289
290 return spi_match_id(sdrv->id_table, sdev);
291}
292EXPORT_SYMBOL_GPL(spi_get_device_id);
293
294static int spi_match_device(struct device *dev, struct device_driver *drv)
295{
296 const struct spi_device *spi = to_spi_device(dev);
297 const struct spi_driver *sdrv = to_spi_driver(drv);
298
299
300 if (of_driver_match_device(dev, drv))
301 return 1;
302
303
304 if (acpi_driver_match_device(dev, drv))
305 return 1;
306
307 if (sdrv->id_table)
308 return !!spi_match_id(sdrv->id_table, spi);
309
310 return strcmp(spi->modalias, drv->name) == 0;
311}
312
313static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
314{
315 const struct spi_device *spi = to_spi_device(dev);
316 int rc;
317
318 rc = acpi_device_uevent_modalias(dev, env);
319 if (rc != -ENODEV)
320 return rc;
321
322 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
323 return 0;
324}
325
326struct bus_type spi_bus_type = {
327 .name = "spi",
328 .dev_groups = spi_dev_groups,
329 .match = spi_match_device,
330 .uevent = spi_uevent,
331};
332EXPORT_SYMBOL_GPL(spi_bus_type);
333
334
335static int spi_drv_probe(struct device *dev)
336{
337 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
338 struct spi_device *spi = to_spi_device(dev);
339 int ret;
340
341 ret = of_clk_set_defaults(dev->of_node, false);
342 if (ret)
343 return ret;
344
345 if (dev->of_node) {
346 spi->irq = of_irq_get(dev->of_node, 0);
347 if (spi->irq == -EPROBE_DEFER)
348 return -EPROBE_DEFER;
349 if (spi->irq < 0)
350 spi->irq = 0;
351 }
352
353 ret = dev_pm_domain_attach(dev, true);
354 if (ret != -EPROBE_DEFER) {
355 ret = sdrv->probe(spi);
356 if (ret)
357 dev_pm_domain_detach(dev, true);
358 }
359
360 return ret;
361}
362
363static int spi_drv_remove(struct device *dev)
364{
365 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
366 int ret;
367
368 ret = sdrv->remove(to_spi_device(dev));
369 dev_pm_domain_detach(dev, true);
370
371 return ret;
372}
373
374static void spi_drv_shutdown(struct device *dev)
375{
376 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
377
378 sdrv->shutdown(to_spi_device(dev));
379}
380
381
382
383
384
385
386
387
388
389int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
390{
391 sdrv->driver.owner = owner;
392 sdrv->driver.bus = &spi_bus_type;
393 if (sdrv->probe)
394 sdrv->driver.probe = spi_drv_probe;
395 if (sdrv->remove)
396 sdrv->driver.remove = spi_drv_remove;
397 if (sdrv->shutdown)
398 sdrv->driver.shutdown = spi_drv_shutdown;
399 return driver_register(&sdrv->driver);
400}
401EXPORT_SYMBOL_GPL(__spi_register_driver);
402
403
404
405
406
407
408
409
410
411struct boardinfo {
412 struct list_head list;
413 struct spi_board_info board_info;
414};
415
416static LIST_HEAD(board_list);
417static LIST_HEAD(spi_master_list);
418
419
420
421
422
423static DEFINE_MUTEX(board_lock);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct spi_device *spi_alloc_device(struct spi_master *master)
443{
444 struct spi_device *spi;
445
446 if (!spi_master_get(master))
447 return NULL;
448
449 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
450 if (!spi) {
451 spi_master_put(master);
452 return NULL;
453 }
454
455 spi->master = master;
456 spi->dev.parent = &master->dev;
457 spi->dev.bus = &spi_bus_type;
458 spi->dev.release = spidev_release;
459 spi->cs_gpio = -ENOENT;
460
461 spin_lock_init(&spi->statistics.lock);
462
463 device_initialize(&spi->dev);
464 return spi;
465}
466EXPORT_SYMBOL_GPL(spi_alloc_device);
467
468static void spi_dev_set_name(struct spi_device *spi)
469{
470 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
471
472 if (adev) {
473 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
474 return;
475 }
476
477 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
478 spi->chip_select);
479}
480
481static int spi_dev_check(struct device *dev, void *data)
482{
483 struct spi_device *spi = to_spi_device(dev);
484 struct spi_device *new_spi = data;
485
486 if (spi->master == new_spi->master &&
487 spi->chip_select == new_spi->chip_select)
488 return -EBUSY;
489 return 0;
490}
491
492
493
494
495
496
497
498
499
500
501int spi_add_device(struct spi_device *spi)
502{
503 static DEFINE_MUTEX(spi_add_lock);
504 struct spi_master *master = spi->master;
505 struct device *dev = master->dev.parent;
506 int status;
507
508
509 if (spi->chip_select >= master->num_chipselect) {
510 dev_err(dev, "cs%d >= max %d\n",
511 spi->chip_select,
512 master->num_chipselect);
513 return -EINVAL;
514 }
515
516
517 spi_dev_set_name(spi);
518
519
520
521
522
523 mutex_lock(&spi_add_lock);
524
525 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
526 if (status) {
527 dev_err(dev, "chipselect %d already in use\n",
528 spi->chip_select);
529 goto done;
530 }
531
532 if (master->cs_gpios)
533 spi->cs_gpio = master->cs_gpios[spi->chip_select];
534
535
536
537
538
539 status = spi_setup(spi);
540 if (status < 0) {
541 dev_err(dev, "can't setup %s, status %d\n",
542 dev_name(&spi->dev), status);
543 goto done;
544 }
545
546
547 status = device_add(&spi->dev);
548 if (status < 0)
549 dev_err(dev, "can't add %s, status %d\n",
550 dev_name(&spi->dev), status);
551 else
552 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
553
554done:
555 mutex_unlock(&spi_add_lock);
556 return status;
557}
558EXPORT_SYMBOL_GPL(spi_add_device);
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574struct spi_device *spi_new_device(struct spi_master *master,
575 struct spi_board_info *chip)
576{
577 struct spi_device *proxy;
578 int status;
579
580
581
582
583
584
585
586
587 proxy = spi_alloc_device(master);
588 if (!proxy)
589 return NULL;
590
591 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
592
593 proxy->chip_select = chip->chip_select;
594 proxy->max_speed_hz = chip->max_speed_hz;
595 proxy->mode = chip->mode;
596 proxy->irq = chip->irq;
597 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
598 proxy->dev.platform_data = (void *) chip->platform_data;
599 proxy->controller_data = chip->controller_data;
600 proxy->controller_state = NULL;
601
602 status = spi_add_device(proxy);
603 if (status < 0) {
604 spi_dev_put(proxy);
605 return NULL;
606 }
607
608 return proxy;
609}
610EXPORT_SYMBOL_GPL(spi_new_device);
611
612
613
614
615
616
617
618
619void spi_unregister_device(struct spi_device *spi)
620{
621 if (!spi)
622 return;
623
624 if (spi->dev.of_node)
625 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
626 if (ACPI_COMPANION(&spi->dev))
627 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
628 device_unregister(&spi->dev);
629}
630EXPORT_SYMBOL_GPL(spi_unregister_device);
631
632static void spi_match_master_to_boardinfo(struct spi_master *master,
633 struct spi_board_info *bi)
634{
635 struct spi_device *dev;
636
637 if (master->bus_num != bi->bus_num)
638 return;
639
640 dev = spi_new_device(master, bi);
641 if (!dev)
642 dev_err(master->dev.parent, "can't create new device for %s\n",
643 bi->modalias);
644}
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667int spi_register_board_info(struct spi_board_info const *info, unsigned n)
668{
669 struct boardinfo *bi;
670 int i;
671
672 if (!n)
673 return -EINVAL;
674
675 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
676 if (!bi)
677 return -ENOMEM;
678
679 for (i = 0; i < n; i++, bi++, info++) {
680 struct spi_master *master;
681
682 memcpy(&bi->board_info, info, sizeof(*info));
683 mutex_lock(&board_lock);
684 list_add_tail(&bi->list, &board_list);
685 list_for_each_entry(master, &spi_master_list, list)
686 spi_match_master_to_boardinfo(master, &bi->board_info);
687 mutex_unlock(&board_lock);
688 }
689
690 return 0;
691}
692
693
694
695static void spi_set_cs(struct spi_device *spi, bool enable)
696{
697 if (spi->mode & SPI_CS_HIGH)
698 enable = !enable;
699
700 if (gpio_is_valid(spi->cs_gpio))
701 gpio_set_value(spi->cs_gpio, !enable);
702 else if (spi->master->set_cs)
703 spi->master->set_cs(spi, !enable);
704}
705
706#ifdef CONFIG_HAS_DMA
707static int spi_map_buf(struct spi_master *master, struct device *dev,
708 struct sg_table *sgt, void *buf, size_t len,
709 enum dma_data_direction dir)
710{
711 const bool vmalloced_buf = is_vmalloc_addr(buf);
712 unsigned int max_seg_size = dma_get_max_seg_size(dev);
713#ifdef CONFIG_HIGHMEM
714 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
715 (unsigned long)buf < (PKMAP_BASE +
716 (LAST_PKMAP * PAGE_SIZE)));
717#else
718 const bool kmap_buf = false;
719#endif
720 int desc_len;
721 int sgs;
722 struct page *vm_page;
723 void *sg_buf;
724 size_t min;
725 int i, ret;
726
727 if (vmalloced_buf || kmap_buf) {
728 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
729 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
730 } else if (virt_addr_valid(buf)) {
731 desc_len = min_t(int, max_seg_size, master->max_dma_len);
732 sgs = DIV_ROUND_UP(len, desc_len);
733 } else {
734 return -EINVAL;
735 }
736
737 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
738 if (ret != 0)
739 return ret;
740
741 for (i = 0; i < sgs; i++) {
742
743 if (vmalloced_buf || kmap_buf) {
744 min = min_t(size_t,
745 len, desc_len - offset_in_page(buf));
746 if (vmalloced_buf)
747 vm_page = vmalloc_to_page(buf);
748 else
749 vm_page = kmap_to_page(buf);
750 if (!vm_page) {
751 sg_free_table(sgt);
752 return -ENOMEM;
753 }
754 sg_set_page(&sgt->sgl[i], vm_page,
755 min, offset_in_page(buf));
756 } else {
757 min = min_t(size_t, len, desc_len);
758 sg_buf = buf;
759 sg_set_buf(&sgt->sgl[i], sg_buf, min);
760 }
761
762 buf += min;
763 len -= min;
764 }
765
766 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
767 if (!ret)
768 ret = -ENOMEM;
769 if (ret < 0) {
770 sg_free_table(sgt);
771 return ret;
772 }
773
774 sgt->nents = ret;
775
776 return 0;
777}
778
779static void spi_unmap_buf(struct spi_master *master, struct device *dev,
780 struct sg_table *sgt, enum dma_data_direction dir)
781{
782 if (sgt->orig_nents) {
783 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
784 sg_free_table(sgt);
785 }
786}
787
788static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
789{
790 struct device *tx_dev, *rx_dev;
791 struct spi_transfer *xfer;
792 int ret;
793
794 if (!master->can_dma)
795 return 0;
796
797 if (master->dma_tx)
798 tx_dev = master->dma_tx->device->dev;
799 else
800 tx_dev = &master->dev;
801
802 if (master->dma_rx)
803 rx_dev = master->dma_rx->device->dev;
804 else
805 rx_dev = &master->dev;
806
807 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
808 if (!master->can_dma(master, msg->spi, xfer))
809 continue;
810
811 if (xfer->tx_buf != NULL) {
812 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
813 (void *)xfer->tx_buf, xfer->len,
814 DMA_TO_DEVICE);
815 if (ret != 0)
816 return ret;
817 }
818
819 if (xfer->rx_buf != NULL) {
820 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
821 xfer->rx_buf, xfer->len,
822 DMA_FROM_DEVICE);
823 if (ret != 0) {
824 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
825 DMA_TO_DEVICE);
826 return ret;
827 }
828 }
829 }
830
831 master->cur_msg_mapped = true;
832
833 return 0;
834}
835
836static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
837{
838 struct spi_transfer *xfer;
839 struct device *tx_dev, *rx_dev;
840
841 if (!master->cur_msg_mapped || !master->can_dma)
842 return 0;
843
844 if (master->dma_tx)
845 tx_dev = master->dma_tx->device->dev;
846 else
847 tx_dev = &master->dev;
848
849 if (master->dma_rx)
850 rx_dev = master->dma_rx->device->dev;
851 else
852 rx_dev = &master->dev;
853
854 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
855 if (!master->can_dma(master, msg->spi, xfer))
856 continue;
857
858 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
859 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
860 }
861
862 return 0;
863}
864#else
865static inline int spi_map_buf(struct spi_master *master,
866 struct device *dev, struct sg_table *sgt,
867 void *buf, size_t len,
868 enum dma_data_direction dir)
869{
870 return -EINVAL;
871}
872
873static inline void spi_unmap_buf(struct spi_master *master,
874 struct device *dev, struct sg_table *sgt,
875 enum dma_data_direction dir)
876{
877}
878
879static inline int __spi_map_msg(struct spi_master *master,
880 struct spi_message *msg)
881{
882 return 0;
883}
884
885static inline int __spi_unmap_msg(struct spi_master *master,
886 struct spi_message *msg)
887{
888 return 0;
889}
890#endif
891
892static inline int spi_unmap_msg(struct spi_master *master,
893 struct spi_message *msg)
894{
895 struct spi_transfer *xfer;
896
897 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
898
899
900
901
902 if (xfer->tx_buf == master->dummy_tx)
903 xfer->tx_buf = NULL;
904 if (xfer->rx_buf == master->dummy_rx)
905 xfer->rx_buf = NULL;
906 }
907
908 return __spi_unmap_msg(master, msg);
909}
910
911static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
912{
913 struct spi_transfer *xfer;
914 void *tmp;
915 unsigned int max_tx, max_rx;
916
917 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
918 max_tx = 0;
919 max_rx = 0;
920
921 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
922 if ((master->flags & SPI_MASTER_MUST_TX) &&
923 !xfer->tx_buf)
924 max_tx = max(xfer->len, max_tx);
925 if ((master->flags & SPI_MASTER_MUST_RX) &&
926 !xfer->rx_buf)
927 max_rx = max(xfer->len, max_rx);
928 }
929
930 if (max_tx) {
931 tmp = krealloc(master->dummy_tx, max_tx,
932 GFP_KERNEL | GFP_DMA);
933 if (!tmp)
934 return -ENOMEM;
935 master->dummy_tx = tmp;
936 memset(tmp, 0, max_tx);
937 }
938
939 if (max_rx) {
940 tmp = krealloc(master->dummy_rx, max_rx,
941 GFP_KERNEL | GFP_DMA);
942 if (!tmp)
943 return -ENOMEM;
944 master->dummy_rx = tmp;
945 }
946
947 if (max_tx || max_rx) {
948 list_for_each_entry(xfer, &msg->transfers,
949 transfer_list) {
950 if (!xfer->tx_buf)
951 xfer->tx_buf = master->dummy_tx;
952 if (!xfer->rx_buf)
953 xfer->rx_buf = master->dummy_rx;
954 }
955 }
956 }
957
958 return __spi_map_msg(master, msg);
959}
960
961
962
963
964
965
966
967
968static int spi_transfer_one_message(struct spi_master *master,
969 struct spi_message *msg)
970{
971 struct spi_transfer *xfer;
972 bool keep_cs = false;
973 int ret = 0;
974 unsigned long long ms = 1;
975 struct spi_statistics *statm = &master->statistics;
976 struct spi_statistics *stats = &msg->spi->statistics;
977
978 spi_set_cs(msg->spi, true);
979
980 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
981 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
982
983 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
984 trace_spi_transfer_start(msg, xfer);
985
986 spi_statistics_add_transfer_stats(statm, xfer, master);
987 spi_statistics_add_transfer_stats(stats, xfer, master);
988
989 if (xfer->tx_buf || xfer->rx_buf) {
990 reinit_completion(&master->xfer_completion);
991
992 ret = master->transfer_one(master, msg->spi, xfer);
993 if (ret < 0) {
994 SPI_STATISTICS_INCREMENT_FIELD(statm,
995 errors);
996 SPI_STATISTICS_INCREMENT_FIELD(stats,
997 errors);
998 dev_err(&msg->spi->dev,
999 "SPI transfer failed: %d\n", ret);
1000 goto out;
1001 }
1002
1003 if (ret > 0) {
1004 ret = 0;
1005 ms = 8LL * 1000LL * xfer->len;
1006 do_div(ms, xfer->speed_hz);
1007 ms += ms + 100;
1008
1009 if (ms > UINT_MAX)
1010 ms = UINT_MAX;
1011
1012 ms = wait_for_completion_timeout(&master->xfer_completion,
1013 msecs_to_jiffies(ms));
1014 }
1015
1016 if (ms == 0) {
1017 SPI_STATISTICS_INCREMENT_FIELD(statm,
1018 timedout);
1019 SPI_STATISTICS_INCREMENT_FIELD(stats,
1020 timedout);
1021 dev_err(&msg->spi->dev,
1022 "SPI transfer timed out\n");
1023 msg->status = -ETIMEDOUT;
1024 }
1025 } else {
1026 if (xfer->len)
1027 dev_err(&msg->spi->dev,
1028 "Bufferless transfer has length %u\n",
1029 xfer->len);
1030 }
1031
1032 trace_spi_transfer_stop(msg, xfer);
1033
1034 if (msg->status != -EINPROGRESS)
1035 goto out;
1036
1037 if (xfer->delay_usecs)
1038 udelay(xfer->delay_usecs);
1039
1040 if (xfer->cs_change) {
1041 if (list_is_last(&xfer->transfer_list,
1042 &msg->transfers)) {
1043 keep_cs = true;
1044 } else {
1045 spi_set_cs(msg->spi, false);
1046 udelay(10);
1047 spi_set_cs(msg->spi, true);
1048 }
1049 }
1050
1051 msg->actual_length += xfer->len;
1052 }
1053
1054out:
1055 if (ret != 0 || !keep_cs)
1056 spi_set_cs(msg->spi, false);
1057
1058 if (msg->status == -EINPROGRESS)
1059 msg->status = ret;
1060
1061 if (msg->status && master->handle_err)
1062 master->handle_err(master, msg);
1063
1064 spi_res_release(master, msg);
1065
1066 spi_finalize_current_message(master);
1067
1068 return ret;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079void spi_finalize_current_transfer(struct spi_master *master)
1080{
1081 complete(&master->xfer_completion);
1082}
1083EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1099{
1100 unsigned long flags;
1101 bool was_busy = false;
1102 int ret;
1103
1104
1105 spin_lock_irqsave(&master->queue_lock, flags);
1106
1107
1108 if (master->cur_msg) {
1109 spin_unlock_irqrestore(&master->queue_lock, flags);
1110 return;
1111 }
1112
1113
1114 if (master->idling) {
1115 kthread_queue_work(&master->kworker, &master->pump_messages);
1116 spin_unlock_irqrestore(&master->queue_lock, flags);
1117 return;
1118 }
1119
1120
1121 if (list_empty(&master->queue) || !master->running) {
1122 if (!master->busy) {
1123 spin_unlock_irqrestore(&master->queue_lock, flags);
1124 return;
1125 }
1126
1127
1128 if (!in_kthread) {
1129 kthread_queue_work(&master->kworker,
1130 &master->pump_messages);
1131 spin_unlock_irqrestore(&master->queue_lock, flags);
1132 return;
1133 }
1134
1135 master->busy = false;
1136 master->idling = true;
1137 spin_unlock_irqrestore(&master->queue_lock, flags);
1138
1139 kfree(master->dummy_rx);
1140 master->dummy_rx = NULL;
1141 kfree(master->dummy_tx);
1142 master->dummy_tx = NULL;
1143 if (master->unprepare_transfer_hardware &&
1144 master->unprepare_transfer_hardware(master))
1145 dev_err(&master->dev,
1146 "failed to unprepare transfer hardware\n");
1147 if (master->auto_runtime_pm) {
1148 pm_runtime_mark_last_busy(master->dev.parent);
1149 pm_runtime_put_autosuspend(master->dev.parent);
1150 }
1151 trace_spi_master_idle(master);
1152
1153 spin_lock_irqsave(&master->queue_lock, flags);
1154 master->idling = false;
1155 spin_unlock_irqrestore(&master->queue_lock, flags);
1156 return;
1157 }
1158
1159
1160 master->cur_msg =
1161 list_first_entry(&master->queue, struct spi_message, queue);
1162
1163 list_del_init(&master->cur_msg->queue);
1164 if (master->busy)
1165 was_busy = true;
1166 else
1167 master->busy = true;
1168 spin_unlock_irqrestore(&master->queue_lock, flags);
1169
1170 mutex_lock(&master->io_mutex);
1171
1172 if (!was_busy && master->auto_runtime_pm) {
1173 ret = pm_runtime_get_sync(master->dev.parent);
1174 if (ret < 0) {
1175 dev_err(&master->dev, "Failed to power device: %d\n",
1176 ret);
1177 mutex_unlock(&master->io_mutex);
1178 return;
1179 }
1180 }
1181
1182 if (!was_busy)
1183 trace_spi_master_busy(master);
1184
1185 if (!was_busy && master->prepare_transfer_hardware) {
1186 ret = master->prepare_transfer_hardware(master);
1187 if (ret) {
1188 dev_err(&master->dev,
1189 "failed to prepare transfer hardware\n");
1190
1191 if (master->auto_runtime_pm)
1192 pm_runtime_put(master->dev.parent);
1193 mutex_unlock(&master->io_mutex);
1194 return;
1195 }
1196 }
1197
1198 trace_spi_message_start(master->cur_msg);
1199
1200 if (master->prepare_message) {
1201 ret = master->prepare_message(master, master->cur_msg);
1202 if (ret) {
1203 dev_err(&master->dev,
1204 "failed to prepare message: %d\n", ret);
1205 master->cur_msg->status = ret;
1206 spi_finalize_current_message(master);
1207 goto out;
1208 }
1209 master->cur_msg_prepared = true;
1210 }
1211
1212 ret = spi_map_msg(master, master->cur_msg);
1213 if (ret) {
1214 master->cur_msg->status = ret;
1215 spi_finalize_current_message(master);
1216 goto out;
1217 }
1218
1219 ret = master->transfer_one_message(master, master->cur_msg);
1220 if (ret) {
1221 dev_err(&master->dev,
1222 "failed to transfer one message from queue\n");
1223 goto out;
1224 }
1225
1226out:
1227 mutex_unlock(&master->io_mutex);
1228
1229
1230 if (!ret)
1231 cond_resched();
1232}
1233
1234
1235
1236
1237
1238static void spi_pump_messages(struct kthread_work *work)
1239{
1240 struct spi_master *master =
1241 container_of(work, struct spi_master, pump_messages);
1242
1243 __spi_pump_messages(master, true);
1244}
1245
1246static int spi_init_queue(struct spi_master *master)
1247{
1248 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1249
1250 master->running = false;
1251 master->busy = false;
1252
1253 kthread_init_worker(&master->kworker);
1254 master->kworker_task = kthread_run(kthread_worker_fn,
1255 &master->kworker, "%s",
1256 dev_name(&master->dev));
1257 if (IS_ERR(master->kworker_task)) {
1258 dev_err(&master->dev, "failed to create message pump task\n");
1259 return PTR_ERR(master->kworker_task);
1260 }
1261 kthread_init_work(&master->pump_messages, spi_pump_messages);
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (master->rt) {
1271 dev_info(&master->dev,
1272 "will run message pump with realtime priority\n");
1273 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m);
1274 }
1275
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1290{
1291 struct spi_message *next;
1292 unsigned long flags;
1293
1294
1295 spin_lock_irqsave(&master->queue_lock, flags);
1296 next = list_first_entry_or_null(&master->queue, struct spi_message,
1297 queue);
1298 spin_unlock_irqrestore(&master->queue_lock, flags);
1299
1300 return next;
1301}
1302EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1303
1304
1305
1306
1307
1308
1309
1310
1311void spi_finalize_current_message(struct spi_master *master)
1312{
1313 struct spi_message *mesg;
1314 unsigned long flags;
1315 int ret;
1316
1317 spin_lock_irqsave(&master->queue_lock, flags);
1318 mesg = master->cur_msg;
1319 spin_unlock_irqrestore(&master->queue_lock, flags);
1320
1321 spi_unmap_msg(master, mesg);
1322
1323 if (master->cur_msg_prepared && master->unprepare_message) {
1324 ret = master->unprepare_message(master, mesg);
1325 if (ret) {
1326 dev_err(&master->dev,
1327 "failed to unprepare message: %d\n", ret);
1328 }
1329 }
1330
1331 spin_lock_irqsave(&master->queue_lock, flags);
1332 master->cur_msg = NULL;
1333 master->cur_msg_prepared = false;
1334 kthread_queue_work(&master->kworker, &master->pump_messages);
1335 spin_unlock_irqrestore(&master->queue_lock, flags);
1336
1337 trace_spi_message_done(mesg);
1338
1339 mesg->state = NULL;
1340 if (mesg->complete)
1341 mesg->complete(mesg->context);
1342}
1343EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1344
1345static int spi_start_queue(struct spi_master *master)
1346{
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(&master->queue_lock, flags);
1350
1351 if (master->running || master->busy) {
1352 spin_unlock_irqrestore(&master->queue_lock, flags);
1353 return -EBUSY;
1354 }
1355
1356 master->running = true;
1357 master->cur_msg = NULL;
1358 spin_unlock_irqrestore(&master->queue_lock, flags);
1359
1360 kthread_queue_work(&master->kworker, &master->pump_messages);
1361
1362 return 0;
1363}
1364
1365static int spi_stop_queue(struct spi_master *master)
1366{
1367 unsigned long flags;
1368 unsigned limit = 500;
1369 int ret = 0;
1370
1371 spin_lock_irqsave(&master->queue_lock, flags);
1372
1373
1374
1375
1376
1377
1378
1379 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1380 spin_unlock_irqrestore(&master->queue_lock, flags);
1381 usleep_range(10000, 11000);
1382 spin_lock_irqsave(&master->queue_lock, flags);
1383 }
1384
1385 if (!list_empty(&master->queue) || master->busy)
1386 ret = -EBUSY;
1387 else
1388 master->running = false;
1389
1390 spin_unlock_irqrestore(&master->queue_lock, flags);
1391
1392 if (ret) {
1393 dev_warn(&master->dev,
1394 "could not stop message queue\n");
1395 return ret;
1396 }
1397 return ret;
1398}
1399
1400static int spi_destroy_queue(struct spi_master *master)
1401{
1402 int ret;
1403
1404 ret = spi_stop_queue(master);
1405
1406
1407
1408
1409
1410
1411
1412 if (ret) {
1413 dev_err(&master->dev, "problem destroying queue\n");
1414 return ret;
1415 }
1416
1417 kthread_flush_worker(&master->kworker);
1418 kthread_stop(master->kworker_task);
1419
1420 return 0;
1421}
1422
1423static int __spi_queued_transfer(struct spi_device *spi,
1424 struct spi_message *msg,
1425 bool need_pump)
1426{
1427 struct spi_master *master = spi->master;
1428 unsigned long flags;
1429
1430 spin_lock_irqsave(&master->queue_lock, flags);
1431
1432 if (!master->running) {
1433 spin_unlock_irqrestore(&master->queue_lock, flags);
1434 return -ESHUTDOWN;
1435 }
1436 msg->actual_length = 0;
1437 msg->status = -EINPROGRESS;
1438
1439 list_add_tail(&msg->queue, &master->queue);
1440 if (!master->busy && need_pump)
1441 kthread_queue_work(&master->kworker, &master->pump_messages);
1442
1443 spin_unlock_irqrestore(&master->queue_lock, flags);
1444 return 0;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1455{
1456 return __spi_queued_transfer(spi, msg, true);
1457}
1458
1459static int spi_master_initialize_queue(struct spi_master *master)
1460{
1461 int ret;
1462
1463 master->transfer = spi_queued_transfer;
1464 if (!master->transfer_one_message)
1465 master->transfer_one_message = spi_transfer_one_message;
1466
1467
1468 ret = spi_init_queue(master);
1469 if (ret) {
1470 dev_err(&master->dev, "problem initializing queue\n");
1471 goto err_init_queue;
1472 }
1473 master->queued = true;
1474 ret = spi_start_queue(master);
1475 if (ret) {
1476 dev_err(&master->dev, "problem starting queue\n");
1477 goto err_start_queue;
1478 }
1479
1480 return 0;
1481
1482err_start_queue:
1483 spi_destroy_queue(master);
1484err_init_queue:
1485 return ret;
1486}
1487
1488
1489
1490#if defined(CONFIG_OF)
1491static struct spi_device *
1492of_register_spi_device(struct spi_master *master, struct device_node *nc)
1493{
1494 struct spi_device *spi;
1495 int rc;
1496 u32 value;
1497
1498
1499 spi = spi_alloc_device(master);
1500 if (!spi) {
1501 dev_err(&master->dev, "spi_device alloc error for %s\n",
1502 nc->full_name);
1503 rc = -ENOMEM;
1504 goto err_out;
1505 }
1506
1507
1508 rc = of_modalias_node(nc, spi->modalias,
1509 sizeof(spi->modalias));
1510 if (rc < 0) {
1511 dev_err(&master->dev, "cannot find modalias for %s\n",
1512 nc->full_name);
1513 goto err_out;
1514 }
1515
1516
1517 rc = of_property_read_u32(nc, "reg", &value);
1518 if (rc) {
1519 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1520 nc->full_name, rc);
1521 goto err_out;
1522 }
1523 spi->chip_select = value;
1524
1525
1526 if (of_find_property(nc, "spi-cpha", NULL))
1527 spi->mode |= SPI_CPHA;
1528 if (of_find_property(nc, "spi-cpol", NULL))
1529 spi->mode |= SPI_CPOL;
1530 if (of_find_property(nc, "spi-cs-high", NULL))
1531 spi->mode |= SPI_CS_HIGH;
1532 if (of_find_property(nc, "spi-3wire", NULL))
1533 spi->mode |= SPI_3WIRE;
1534 if (of_find_property(nc, "spi-lsb-first", NULL))
1535 spi->mode |= SPI_LSB_FIRST;
1536
1537
1538 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1539 switch (value) {
1540 case 1:
1541 break;
1542 case 2:
1543 spi->mode |= SPI_TX_DUAL;
1544 break;
1545 case 4:
1546 spi->mode |= SPI_TX_QUAD;
1547 break;
1548 default:
1549 dev_warn(&master->dev,
1550 "spi-tx-bus-width %d not supported\n",
1551 value);
1552 break;
1553 }
1554 }
1555
1556 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1557 switch (value) {
1558 case 1:
1559 break;
1560 case 2:
1561 spi->mode |= SPI_RX_DUAL;
1562 break;
1563 case 4:
1564 spi->mode |= SPI_RX_QUAD;
1565 break;
1566 default:
1567 dev_warn(&master->dev,
1568 "spi-rx-bus-width %d not supported\n",
1569 value);
1570 break;
1571 }
1572 }
1573
1574
1575 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1576 if (rc) {
1577 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1578 nc->full_name, rc);
1579 goto err_out;
1580 }
1581 spi->max_speed_hz = value;
1582
1583
1584 of_node_get(nc);
1585 spi->dev.of_node = nc;
1586
1587
1588 rc = spi_add_device(spi);
1589 if (rc) {
1590 dev_err(&master->dev, "spi_device register error %s\n",
1591 nc->full_name);
1592 goto err_out;
1593 }
1594
1595 return spi;
1596
1597err_out:
1598 spi_dev_put(spi);
1599 return ERR_PTR(rc);
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609static void of_register_spi_devices(struct spi_master *master)
1610{
1611 struct spi_device *spi;
1612 struct device_node *nc;
1613
1614 if (!master->dev.of_node)
1615 return;
1616
1617 for_each_available_child_of_node(master->dev.of_node, nc) {
1618 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1619 continue;
1620 spi = of_register_spi_device(master, nc);
1621 if (IS_ERR(spi)) {
1622 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1623 nc->full_name);
1624 of_node_clear_flag(nc, OF_POPULATED);
1625 }
1626 }
1627}
1628#else
1629static void of_register_spi_devices(struct spi_master *master) { }
1630#endif
1631
1632#ifdef CONFIG_ACPI
1633static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1634{
1635 struct spi_device *spi = data;
1636 struct spi_master *master = spi->master;
1637
1638 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1639 struct acpi_resource_spi_serialbus *sb;
1640
1641 sb = &ares->data.spi_serial_bus;
1642 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1643
1644
1645
1646
1647
1648
1649
1650 if (master->fw_translate_cs) {
1651 int cs = master->fw_translate_cs(master,
1652 sb->device_selection);
1653 if (cs < 0)
1654 return cs;
1655 spi->chip_select = cs;
1656 } else {
1657 spi->chip_select = sb->device_selection;
1658 }
1659
1660 spi->max_speed_hz = sb->connection_speed;
1661
1662 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1663 spi->mode |= SPI_CPHA;
1664 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1665 spi->mode |= SPI_CPOL;
1666 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1667 spi->mode |= SPI_CS_HIGH;
1668 }
1669 } else if (spi->irq < 0) {
1670 struct resource r;
1671
1672 if (acpi_dev_resource_interrupt(ares, 0, &r))
1673 spi->irq = r.start;
1674 }
1675
1676
1677 return 1;
1678}
1679
1680static acpi_status acpi_register_spi_device(struct spi_master *master,
1681 struct acpi_device *adev)
1682{
1683 struct list_head resource_list;
1684 struct spi_device *spi;
1685 int ret;
1686
1687 if (acpi_bus_get_status(adev) || !adev->status.present ||
1688 acpi_device_enumerated(adev))
1689 return AE_OK;
1690
1691 spi = spi_alloc_device(master);
1692 if (!spi) {
1693 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1694 dev_name(&adev->dev));
1695 return AE_NO_MEMORY;
1696 }
1697
1698 ACPI_COMPANION_SET(&spi->dev, adev);
1699 spi->irq = -1;
1700
1701 INIT_LIST_HEAD(&resource_list);
1702 ret = acpi_dev_get_resources(adev, &resource_list,
1703 acpi_spi_add_resource, spi);
1704 acpi_dev_free_resource_list(&resource_list);
1705
1706 if (ret < 0 || !spi->max_speed_hz) {
1707 spi_dev_put(spi);
1708 return AE_OK;
1709 }
1710
1711 if (spi->irq < 0)
1712 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1713
1714 acpi_device_set_enumerated(adev);
1715
1716 adev->power.flags.ignore_parent = true;
1717 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1718 if (spi_add_device(spi)) {
1719 adev->power.flags.ignore_parent = false;
1720 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1721 dev_name(&adev->dev));
1722 spi_dev_put(spi);
1723 }
1724
1725 return AE_OK;
1726}
1727
1728static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1729 void *data, void **return_value)
1730{
1731 struct spi_master *master = data;
1732 struct acpi_device *adev;
1733
1734 if (acpi_bus_get_device(handle, &adev))
1735 return AE_OK;
1736
1737 return acpi_register_spi_device(master, adev);
1738}
1739
1740static void acpi_register_spi_devices(struct spi_master *master)
1741{
1742 acpi_status status;
1743 acpi_handle handle;
1744
1745 handle = ACPI_HANDLE(master->dev.parent);
1746 if (!handle)
1747 return;
1748
1749 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1750 acpi_spi_add_device, NULL,
1751 master, NULL);
1752 if (ACPI_FAILURE(status))
1753 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1754}
1755#else
1756static inline void acpi_register_spi_devices(struct spi_master *master) {}
1757#endif
1758
1759static void spi_master_release(struct device *dev)
1760{
1761 struct spi_master *master;
1762
1763 master = container_of(dev, struct spi_master, dev);
1764 kfree(master);
1765}
1766
1767static struct class spi_master_class = {
1768 .name = "spi_master",
1769 .owner = THIS_MODULE,
1770 .dev_release = spi_master_release,
1771 .dev_groups = spi_master_groups,
1772};
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1796{
1797 struct spi_master *master;
1798
1799 if (!dev)
1800 return NULL;
1801
1802 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1803 if (!master)
1804 return NULL;
1805
1806 device_initialize(&master->dev);
1807 master->bus_num = -1;
1808 master->num_chipselect = 1;
1809 master->dev.class = &spi_master_class;
1810 master->dev.parent = dev;
1811 pm_suspend_ignore_children(&master->dev, true);
1812 spi_master_set_devdata(master, &master[1]);
1813
1814 return master;
1815}
1816EXPORT_SYMBOL_GPL(spi_alloc_master);
1817
1818#ifdef CONFIG_OF
1819static int of_spi_register_master(struct spi_master *master)
1820{
1821 int nb, i, *cs;
1822 struct device_node *np = master->dev.of_node;
1823
1824 if (!np)
1825 return 0;
1826
1827 nb = of_gpio_named_count(np, "cs-gpios");
1828 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1829
1830
1831 if (nb == 0 || nb == -ENOENT)
1832 return 0;
1833 else if (nb < 0)
1834 return nb;
1835
1836 cs = devm_kzalloc(&master->dev,
1837 sizeof(int) * master->num_chipselect,
1838 GFP_KERNEL);
1839 master->cs_gpios = cs;
1840
1841 if (!master->cs_gpios)
1842 return -ENOMEM;
1843
1844 for (i = 0; i < master->num_chipselect; i++)
1845 cs[i] = -ENOENT;
1846
1847 for (i = 0; i < nb; i++)
1848 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1849
1850 return 0;
1851}
1852#else
1853static int of_spi_register_master(struct spi_master *master)
1854{
1855 return 0;
1856}
1857#endif
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881int spi_register_master(struct spi_master *master)
1882{
1883 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1884 struct device *dev = master->dev.parent;
1885 struct boardinfo *bi;
1886 int status = -ENODEV;
1887 int dynamic = 0;
1888
1889 if (!dev)
1890 return -ENODEV;
1891
1892 status = of_spi_register_master(master);
1893 if (status)
1894 return status;
1895
1896
1897
1898
1899 if (master->num_chipselect == 0)
1900 return -EINVAL;
1901
1902 if ((master->bus_num < 0) && master->dev.of_node)
1903 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1904
1905
1906 if (master->bus_num < 0) {
1907
1908
1909
1910 master->bus_num = atomic_dec_return(&dyn_bus_id);
1911 dynamic = 1;
1912 }
1913
1914 INIT_LIST_HEAD(&master->queue);
1915 spin_lock_init(&master->queue_lock);
1916 spin_lock_init(&master->bus_lock_spinlock);
1917 mutex_init(&master->bus_lock_mutex);
1918 mutex_init(&master->io_mutex);
1919 master->bus_lock_flag = 0;
1920 init_completion(&master->xfer_completion);
1921 if (!master->max_dma_len)
1922 master->max_dma_len = INT_MAX;
1923
1924
1925
1926
1927 dev_set_name(&master->dev, "spi%u", master->bus_num);
1928 status = device_add(&master->dev);
1929 if (status < 0)
1930 goto done;
1931 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1932 dynamic ? " (dynamic)" : "");
1933
1934
1935 if (master->transfer)
1936 dev_info(dev, "master is unqueued, this is deprecated\n");
1937 else {
1938 status = spi_master_initialize_queue(master);
1939 if (status) {
1940 device_del(&master->dev);
1941 goto done;
1942 }
1943 }
1944
1945 spin_lock_init(&master->statistics.lock);
1946
1947 mutex_lock(&board_lock);
1948 list_add_tail(&master->list, &spi_master_list);
1949 list_for_each_entry(bi, &board_list, list)
1950 spi_match_master_to_boardinfo(master, &bi->board_info);
1951 mutex_unlock(&board_lock);
1952
1953
1954 of_register_spi_devices(master);
1955 acpi_register_spi_devices(master);
1956done:
1957 return status;
1958}
1959EXPORT_SYMBOL_GPL(spi_register_master);
1960
1961static void devm_spi_unregister(struct device *dev, void *res)
1962{
1963 spi_unregister_master(*(struct spi_master **)res);
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977int devm_spi_register_master(struct device *dev, struct spi_master *master)
1978{
1979 struct spi_master **ptr;
1980 int ret;
1981
1982 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1983 if (!ptr)
1984 return -ENOMEM;
1985
1986 ret = spi_register_master(master);
1987 if (!ret) {
1988 *ptr = master;
1989 devres_add(dev, ptr);
1990 } else {
1991 devres_free(ptr);
1992 }
1993
1994 return ret;
1995}
1996EXPORT_SYMBOL_GPL(devm_spi_register_master);
1997
1998static int __unregister(struct device *dev, void *null)
1999{
2000 spi_unregister_device(to_spi_device(dev));
2001 return 0;
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014void spi_unregister_master(struct spi_master *master)
2015{
2016 int dummy;
2017
2018 if (master->queued) {
2019 if (spi_destroy_queue(master))
2020 dev_err(&master->dev, "queue remove failed\n");
2021 }
2022
2023 mutex_lock(&board_lock);
2024 list_del(&master->list);
2025 mutex_unlock(&board_lock);
2026
2027 dummy = device_for_each_child(&master->dev, NULL, __unregister);
2028 device_unregister(&master->dev);
2029}
2030EXPORT_SYMBOL_GPL(spi_unregister_master);
2031
2032int spi_master_suspend(struct spi_master *master)
2033{
2034 int ret;
2035
2036
2037 if (!master->queued)
2038 return 0;
2039
2040 ret = spi_stop_queue(master);
2041 if (ret)
2042 dev_err(&master->dev, "queue stop failed\n");
2043
2044 return ret;
2045}
2046EXPORT_SYMBOL_GPL(spi_master_suspend);
2047
2048int spi_master_resume(struct spi_master *master)
2049{
2050 int ret;
2051
2052 if (!master->queued)
2053 return 0;
2054
2055 ret = spi_start_queue(master);
2056 if (ret)
2057 dev_err(&master->dev, "queue restart failed\n");
2058
2059 return ret;
2060}
2061EXPORT_SYMBOL_GPL(spi_master_resume);
2062
2063static int __spi_master_match(struct device *dev, const void *data)
2064{
2065 struct spi_master *m;
2066 const u16 *bus_num = data;
2067
2068 m = container_of(dev, struct spi_master, dev);
2069 return m->bus_num == *bus_num;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084struct spi_master *spi_busnum_to_master(u16 bus_num)
2085{
2086 struct device *dev;
2087 struct spi_master *master = NULL;
2088
2089 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2090 __spi_master_match);
2091 if (dev)
2092 master = container_of(dev, struct spi_master, dev);
2093
2094 return master;
2095}
2096EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116void *spi_res_alloc(struct spi_device *spi,
2117 spi_res_release_t release,
2118 size_t size, gfp_t gfp)
2119{
2120 struct spi_res *sres;
2121
2122 sres = kzalloc(sizeof(*sres) + size, gfp);
2123 if (!sres)
2124 return NULL;
2125
2126 INIT_LIST_HEAD(&sres->entry);
2127 sres->release = release;
2128
2129 return sres->data;
2130}
2131EXPORT_SYMBOL_GPL(spi_res_alloc);
2132
2133
2134
2135
2136
2137
2138void spi_res_free(void *res)
2139{
2140 struct spi_res *sres = container_of(res, struct spi_res, data);
2141
2142 if (!res)
2143 return;
2144
2145 WARN_ON(!list_empty(&sres->entry));
2146 kfree(sres);
2147}
2148EXPORT_SYMBOL_GPL(spi_res_free);
2149
2150
2151
2152
2153
2154
2155void spi_res_add(struct spi_message *message, void *res)
2156{
2157 struct spi_res *sres = container_of(res, struct spi_res, data);
2158
2159 WARN_ON(!list_empty(&sres->entry));
2160 list_add_tail(&sres->entry, &message->resources);
2161}
2162EXPORT_SYMBOL_GPL(spi_res_add);
2163
2164
2165
2166
2167
2168
2169void spi_res_release(struct spi_master *master,
2170 struct spi_message *message)
2171{
2172 struct spi_res *res;
2173
2174 while (!list_empty(&message->resources)) {
2175 res = list_last_entry(&message->resources,
2176 struct spi_res, entry);
2177
2178 if (res->release)
2179 res->release(master, message, res->data);
2180
2181 list_del(&res->entry);
2182
2183 kfree(res);
2184 }
2185}
2186EXPORT_SYMBOL_GPL(spi_res_release);
2187
2188
2189
2190
2191
2192static void __spi_replace_transfers_release(struct spi_master *master,
2193 struct spi_message *msg,
2194 void *res)
2195{
2196 struct spi_replaced_transfers *rxfer = res;
2197 size_t i;
2198
2199
2200 if (rxfer->release)
2201 rxfer->release(master, msg, res);
2202
2203
2204 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2205
2206
2207 for (i = 0; i < rxfer->inserted; i++)
2208 list_del(&rxfer->inserted_transfers[i].transfer_list);
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226struct spi_replaced_transfers *spi_replace_transfers(
2227 struct spi_message *msg,
2228 struct spi_transfer *xfer_first,
2229 size_t remove,
2230 size_t insert,
2231 spi_replaced_release_t release,
2232 size_t extradatasize,
2233 gfp_t gfp)
2234{
2235 struct spi_replaced_transfers *rxfer;
2236 struct spi_transfer *xfer;
2237 size_t i;
2238
2239
2240 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2241 insert * sizeof(struct spi_transfer)
2242 + sizeof(struct spi_replaced_transfers)
2243 + extradatasize,
2244 gfp);
2245 if (!rxfer)
2246 return ERR_PTR(-ENOMEM);
2247
2248
2249 rxfer->release = release;
2250
2251
2252 if (extradatasize)
2253 rxfer->extradata =
2254 &rxfer->inserted_transfers[insert];
2255
2256
2257 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2258
2259
2260
2261
2262 rxfer->replaced_after = xfer_first->transfer_list.prev;
2263
2264
2265 for (i = 0; i < remove; i++) {
2266
2267
2268
2269
2270 if (rxfer->replaced_after->next == &msg->transfers) {
2271 dev_err(&msg->spi->dev,
2272 "requested to remove more spi_transfers than are available\n");
2273
2274 list_splice(&rxfer->replaced_transfers,
2275 rxfer->replaced_after);
2276
2277
2278 spi_res_free(rxfer);
2279
2280
2281 return ERR_PTR(-EINVAL);
2282 }
2283
2284
2285
2286
2287 list_move_tail(rxfer->replaced_after->next,
2288 &rxfer->replaced_transfers);
2289 }
2290
2291
2292
2293
2294 for (i = 0; i < insert; i++) {
2295
2296 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2297
2298
2299 memcpy(xfer, xfer_first, sizeof(*xfer));
2300
2301
2302 list_add(&xfer->transfer_list, rxfer->replaced_after);
2303
2304
2305 if (i) {
2306 xfer->cs_change = false;
2307 xfer->delay_usecs = 0;
2308 }
2309 }
2310
2311
2312 rxfer->inserted = insert;
2313
2314
2315 spi_res_add(msg, rxfer);
2316
2317 return rxfer;
2318}
2319EXPORT_SYMBOL_GPL(spi_replace_transfers);
2320
2321static int __spi_split_transfer_maxsize(struct spi_master *master,
2322 struct spi_message *msg,
2323 struct spi_transfer **xferp,
2324 size_t maxsize,
2325 gfp_t gfp)
2326{
2327 struct spi_transfer *xfer = *xferp, *xfers;
2328 struct spi_replaced_transfers *srt;
2329 size_t offset;
2330 size_t count, i;
2331
2332
2333 dev_warn_once(&msg->spi->dev,
2334 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2335 xfer->len, maxsize);
2336
2337
2338 count = DIV_ROUND_UP(xfer->len, maxsize);
2339
2340
2341 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2342 if (IS_ERR(srt))
2343 return PTR_ERR(srt);
2344 xfers = srt->inserted_transfers;
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2360
2361
2362 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2363
2364 if (xfers[i].rx_buf)
2365 xfers[i].rx_buf += offset;
2366 if (xfers[i].rx_dma)
2367 xfers[i].rx_dma += offset;
2368 if (xfers[i].tx_buf)
2369 xfers[i].tx_buf += offset;
2370 if (xfers[i].tx_dma)
2371 xfers[i].tx_dma += offset;
2372
2373
2374 xfers[i].len = min(maxsize, xfers[i].len - offset);
2375 }
2376
2377
2378
2379
2380 *xferp = &xfers[count - 1];
2381
2382
2383 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2384 transfers_split_maxsize);
2385 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2386 transfers_split_maxsize);
2387
2388 return 0;
2389}
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402int spi_split_transfers_maxsize(struct spi_master *master,
2403 struct spi_message *msg,
2404 size_t maxsize,
2405 gfp_t gfp)
2406{
2407 struct spi_transfer *xfer;
2408 int ret;
2409
2410
2411
2412
2413
2414
2415
2416 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2417 if (xfer->len > maxsize) {
2418 ret = __spi_split_transfer_maxsize(
2419 master, msg, &xfer, maxsize, gfp);
2420 if (ret)
2421 return ret;
2422 }
2423 }
2424
2425 return 0;
2426}
2427EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2428
2429
2430
2431
2432
2433
2434
2435static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2436{
2437 if (master->bits_per_word_mask) {
2438
2439 if (bits_per_word > 32)
2440 return -EINVAL;
2441 if (!(master->bits_per_word_mask &
2442 SPI_BPW_MASK(bits_per_word)))
2443 return -EINVAL;
2444 }
2445
2446 return 0;
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469int spi_setup(struct spi_device *spi)
2470{
2471 unsigned bad_bits, ugly_bits;
2472 int status;
2473
2474
2475
2476 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2477 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2478 dev_err(&spi->dev,
2479 "setup: can not select dual and quad at the same time\n");
2480 return -EINVAL;
2481 }
2482
2483
2484 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2485 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2486 return -EINVAL;
2487
2488
2489
2490 bad_bits = spi->mode & ~spi->master->mode_bits;
2491 ugly_bits = bad_bits &
2492 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2493 if (ugly_bits) {
2494 dev_warn(&spi->dev,
2495 "setup: ignoring unsupported mode bits %x\n",
2496 ugly_bits);
2497 spi->mode &= ~ugly_bits;
2498 bad_bits &= ~ugly_bits;
2499 }
2500 if (bad_bits) {
2501 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2502 bad_bits);
2503 return -EINVAL;
2504 }
2505
2506 if (!spi->bits_per_word)
2507 spi->bits_per_word = 8;
2508
2509 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2510 if (status)
2511 return status;
2512
2513 if (!spi->max_speed_hz)
2514 spi->max_speed_hz = spi->master->max_speed_hz;
2515
2516 if (spi->master->setup)
2517 status = spi->master->setup(spi);
2518
2519 spi_set_cs(spi, false);
2520
2521 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2522 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2523 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2524 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2525 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2526 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2527 spi->bits_per_word, spi->max_speed_hz,
2528 status);
2529
2530 return status;
2531}
2532EXPORT_SYMBOL_GPL(spi_setup);
2533
2534static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2535{
2536 struct spi_master *master = spi->master;
2537 struct spi_transfer *xfer;
2538 int w_size;
2539
2540 if (list_empty(&message->transfers))
2541 return -EINVAL;
2542
2543
2544
2545
2546
2547
2548 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2549 || (spi->mode & SPI_3WIRE)) {
2550 unsigned flags = master->flags;
2551
2552 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2553 if (xfer->rx_buf && xfer->tx_buf)
2554 return -EINVAL;
2555 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2556 return -EINVAL;
2557 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2558 return -EINVAL;
2559 }
2560 }
2561
2562
2563
2564
2565
2566
2567
2568 message->frame_length = 0;
2569 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2570 message->frame_length += xfer->len;
2571 if (!xfer->bits_per_word)
2572 xfer->bits_per_word = spi->bits_per_word;
2573
2574 if (!xfer->speed_hz)
2575 xfer->speed_hz = spi->max_speed_hz;
2576 if (!xfer->speed_hz)
2577 xfer->speed_hz = master->max_speed_hz;
2578
2579 if (master->max_speed_hz &&
2580 xfer->speed_hz > master->max_speed_hz)
2581 xfer->speed_hz = master->max_speed_hz;
2582
2583 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2584 return -EINVAL;
2585
2586
2587
2588
2589
2590 if (xfer->bits_per_word <= 8)
2591 w_size = 1;
2592 else if (xfer->bits_per_word <= 16)
2593 w_size = 2;
2594 else
2595 w_size = 4;
2596
2597
2598 if (xfer->len % w_size)
2599 return -EINVAL;
2600
2601 if (xfer->speed_hz && master->min_speed_hz &&
2602 xfer->speed_hz < master->min_speed_hz)
2603 return -EINVAL;
2604
2605 if (xfer->tx_buf && !xfer->tx_nbits)
2606 xfer->tx_nbits = SPI_NBITS_SINGLE;
2607 if (xfer->rx_buf && !xfer->rx_nbits)
2608 xfer->rx_nbits = SPI_NBITS_SINGLE;
2609
2610
2611
2612
2613 if (xfer->tx_buf) {
2614 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2615 xfer->tx_nbits != SPI_NBITS_DUAL &&
2616 xfer->tx_nbits != SPI_NBITS_QUAD)
2617 return -EINVAL;
2618 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2619 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2620 return -EINVAL;
2621 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2622 !(spi->mode & SPI_TX_QUAD))
2623 return -EINVAL;
2624 }
2625
2626 if (xfer->rx_buf) {
2627 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2628 xfer->rx_nbits != SPI_NBITS_DUAL &&
2629 xfer->rx_nbits != SPI_NBITS_QUAD)
2630 return -EINVAL;
2631 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2632 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2633 return -EINVAL;
2634 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2635 !(spi->mode & SPI_RX_QUAD))
2636 return -EINVAL;
2637 }
2638 }
2639
2640 message->status = -EINPROGRESS;
2641
2642 return 0;
2643}
2644
2645static int __spi_async(struct spi_device *spi, struct spi_message *message)
2646{
2647 struct spi_master *master = spi->master;
2648
2649 message->spi = spi;
2650
2651 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2652 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2653
2654 trace_spi_message_submit(message);
2655
2656 return master->transfer(spi, message);
2657}
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690int spi_async(struct spi_device *spi, struct spi_message *message)
2691{
2692 struct spi_master *master = spi->master;
2693 int ret;
2694 unsigned long flags;
2695
2696 ret = __spi_validate(spi, message);
2697 if (ret != 0)
2698 return ret;
2699
2700 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2701
2702 if (master->bus_lock_flag)
2703 ret = -EBUSY;
2704 else
2705 ret = __spi_async(spi, message);
2706
2707 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2708
2709 return ret;
2710}
2711EXPORT_SYMBOL_GPL(spi_async);
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2745{
2746 struct spi_master *master = spi->master;
2747 int ret;
2748 unsigned long flags;
2749
2750 ret = __spi_validate(spi, message);
2751 if (ret != 0)
2752 return ret;
2753
2754 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2755
2756 ret = __spi_async(spi, message);
2757
2758 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2759
2760 return ret;
2761
2762}
2763EXPORT_SYMBOL_GPL(spi_async_locked);
2764
2765
2766int spi_flash_read(struct spi_device *spi,
2767 struct spi_flash_read_message *msg)
2768
2769{
2770 struct spi_master *master = spi->master;
2771 struct device *rx_dev = NULL;
2772 int ret;
2773
2774 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2775 msg->addr_nbits == SPI_NBITS_DUAL) &&
2776 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2777 return -EINVAL;
2778 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2779 msg->addr_nbits == SPI_NBITS_QUAD) &&
2780 !(spi->mode & SPI_TX_QUAD))
2781 return -EINVAL;
2782 if (msg->data_nbits == SPI_NBITS_DUAL &&
2783 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2784 return -EINVAL;
2785 if (msg->data_nbits == SPI_NBITS_QUAD &&
2786 !(spi->mode & SPI_RX_QUAD))
2787 return -EINVAL;
2788
2789 if (master->auto_runtime_pm) {
2790 ret = pm_runtime_get_sync(master->dev.parent);
2791 if (ret < 0) {
2792 dev_err(&master->dev, "Failed to power device: %d\n",
2793 ret);
2794 return ret;
2795 }
2796 }
2797
2798 mutex_lock(&master->bus_lock_mutex);
2799 mutex_lock(&master->io_mutex);
2800 if (master->dma_rx) {
2801 rx_dev = master->dma_rx->device->dev;
2802 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2803 msg->buf, msg->len,
2804 DMA_FROM_DEVICE);
2805 if (!ret)
2806 msg->cur_msg_mapped = true;
2807 }
2808 ret = master->spi_flash_read(spi, msg);
2809 if (msg->cur_msg_mapped)
2810 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2811 DMA_FROM_DEVICE);
2812 mutex_unlock(&master->io_mutex);
2813 mutex_unlock(&master->bus_lock_mutex);
2814
2815 if (master->auto_runtime_pm)
2816 pm_runtime_put(master->dev.parent);
2817
2818 return ret;
2819}
2820EXPORT_SYMBOL_GPL(spi_flash_read);
2821
2822
2823
2824
2825
2826
2827
2828
2829static void spi_complete(void *arg)
2830{
2831 complete(arg);
2832}
2833
2834static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2835{
2836 DECLARE_COMPLETION_ONSTACK(done);
2837 int status;
2838 struct spi_master *master = spi->master;
2839 unsigned long flags;
2840
2841 status = __spi_validate(spi, message);
2842 if (status != 0)
2843 return status;
2844
2845 message->complete = spi_complete;
2846 message->context = &done;
2847 message->spi = spi;
2848
2849 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2850 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2851
2852
2853
2854
2855
2856
2857 if (master->transfer == spi_queued_transfer) {
2858 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2859
2860 trace_spi_message_submit(message);
2861
2862 status = __spi_queued_transfer(spi, message, false);
2863
2864 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2865 } else {
2866 status = spi_async_locked(spi, message);
2867 }
2868
2869 if (status == 0) {
2870
2871
2872
2873 if (master->transfer == spi_queued_transfer) {
2874 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2875 spi_sync_immediate);
2876 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2877 spi_sync_immediate);
2878 __spi_pump_messages(master, false);
2879 }
2880
2881 wait_for_completion(&done);
2882 status = message->status;
2883 }
2884 message->context = NULL;
2885 return status;
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909int spi_sync(struct spi_device *spi, struct spi_message *message)
2910{
2911 int ret;
2912
2913 mutex_lock(&spi->master->bus_lock_mutex);
2914 ret = __spi_sync(spi, message);
2915 mutex_unlock(&spi->master->bus_lock_mutex);
2916
2917 return ret;
2918}
2919EXPORT_SYMBOL_GPL(spi_sync);
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2938{
2939 return __spi_sync(spi, message);
2940}
2941EXPORT_SYMBOL_GPL(spi_sync_locked);
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958int spi_bus_lock(struct spi_master *master)
2959{
2960 unsigned long flags;
2961
2962 mutex_lock(&master->bus_lock_mutex);
2963
2964 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2965 master->bus_lock_flag = 1;
2966 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2967
2968
2969
2970 return 0;
2971}
2972EXPORT_SYMBOL_GPL(spi_bus_lock);
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987int spi_bus_unlock(struct spi_master *master)
2988{
2989 master->bus_lock_flag = 0;
2990
2991 mutex_unlock(&master->bus_lock_mutex);
2992
2993 return 0;
2994}
2995EXPORT_SYMBOL_GPL(spi_bus_unlock);
2996
2997
2998#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2999
3000static u8 *buf;
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023int spi_write_then_read(struct spi_device *spi,
3024 const void *txbuf, unsigned n_tx,
3025 void *rxbuf, unsigned n_rx)
3026{
3027 static DEFINE_MUTEX(lock);
3028
3029 int status;
3030 struct spi_message message;
3031 struct spi_transfer x[2];
3032 u8 *local_buf;
3033
3034
3035
3036
3037
3038
3039 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3040 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3041 GFP_KERNEL | GFP_DMA);
3042 if (!local_buf)
3043 return -ENOMEM;
3044 } else {
3045 local_buf = buf;
3046 }
3047
3048 spi_message_init(&message);
3049 memset(x, 0, sizeof(x));
3050 if (n_tx) {
3051 x[0].len = n_tx;
3052 spi_message_add_tail(&x[0], &message);
3053 }
3054 if (n_rx) {
3055 x[1].len = n_rx;
3056 spi_message_add_tail(&x[1], &message);
3057 }
3058
3059 memcpy(local_buf, txbuf, n_tx);
3060 x[0].tx_buf = local_buf;
3061 x[1].rx_buf = local_buf + n_tx;
3062
3063
3064 status = spi_sync(spi, &message);
3065 if (status == 0)
3066 memcpy(rxbuf, x[1].rx_buf, n_rx);
3067
3068 if (x[0].tx_buf == buf)
3069 mutex_unlock(&lock);
3070 else
3071 kfree(local_buf);
3072
3073 return status;
3074}
3075EXPORT_SYMBOL_GPL(spi_write_then_read);
3076
3077
3078
3079#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3080static int __spi_of_device_match(struct device *dev, void *data)
3081{
3082 return dev->of_node == data;
3083}
3084
3085
3086static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3087{
3088 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3089 __spi_of_device_match);
3090 return dev ? to_spi_device(dev) : NULL;
3091}
3092
3093static int __spi_of_master_match(struct device *dev, const void *data)
3094{
3095 return dev->of_node == data;
3096}
3097
3098
3099static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3100{
3101 struct device *dev;
3102
3103 dev = class_find_device(&spi_master_class, NULL, node,
3104 __spi_of_master_match);
3105 if (!dev)
3106 return NULL;
3107
3108
3109 return container_of(dev, struct spi_master, dev);
3110}
3111
3112static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3113 void *arg)
3114{
3115 struct of_reconfig_data *rd = arg;
3116 struct spi_master *master;
3117 struct spi_device *spi;
3118
3119 switch (of_reconfig_get_state_change(action, arg)) {
3120 case OF_RECONFIG_CHANGE_ADD:
3121 master = of_find_spi_master_by_node(rd->dn->parent);
3122 if (master == NULL)
3123 return NOTIFY_OK;
3124
3125 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3126 put_device(&master->dev);
3127 return NOTIFY_OK;
3128 }
3129
3130 spi = of_register_spi_device(master, rd->dn);
3131 put_device(&master->dev);
3132
3133 if (IS_ERR(spi)) {
3134 pr_err("%s: failed to create for '%s'\n",
3135 __func__, rd->dn->full_name);
3136 of_node_clear_flag(rd->dn, OF_POPULATED);
3137 return notifier_from_errno(PTR_ERR(spi));
3138 }
3139 break;
3140
3141 case OF_RECONFIG_CHANGE_REMOVE:
3142
3143 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3144 return NOTIFY_OK;
3145
3146
3147 spi = of_find_spi_device_by_node(rd->dn);
3148 if (spi == NULL)
3149 return NOTIFY_OK;
3150
3151
3152 spi_unregister_device(spi);
3153
3154
3155 put_device(&spi->dev);
3156 break;
3157 }
3158
3159 return NOTIFY_OK;
3160}
3161
3162static struct notifier_block spi_of_notifier = {
3163 .notifier_call = of_spi_notify,
3164};
3165#else
3166extern struct notifier_block spi_of_notifier;
3167#endif
3168
3169#if IS_ENABLED(CONFIG_ACPI)
3170static int spi_acpi_master_match(struct device *dev, const void *data)
3171{
3172 return ACPI_COMPANION(dev->parent) == data;
3173}
3174
3175static int spi_acpi_device_match(struct device *dev, void *data)
3176{
3177 return ACPI_COMPANION(dev) == data;
3178}
3179
3180static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3181{
3182 struct device *dev;
3183
3184 dev = class_find_device(&spi_master_class, NULL, adev,
3185 spi_acpi_master_match);
3186 if (!dev)
3187 return NULL;
3188
3189 return container_of(dev, struct spi_master, dev);
3190}
3191
3192static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3193{
3194 struct device *dev;
3195
3196 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3197
3198 return dev ? to_spi_device(dev) : NULL;
3199}
3200
3201static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3202 void *arg)
3203{
3204 struct acpi_device *adev = arg;
3205 struct spi_master *master;
3206 struct spi_device *spi;
3207
3208 switch (value) {
3209 case ACPI_RECONFIG_DEVICE_ADD:
3210 master = acpi_spi_find_master_by_adev(adev->parent);
3211 if (!master)
3212 break;
3213
3214 acpi_register_spi_device(master, adev);
3215 put_device(&master->dev);
3216 break;
3217 case ACPI_RECONFIG_DEVICE_REMOVE:
3218 if (!acpi_device_enumerated(adev))
3219 break;
3220
3221 spi = acpi_spi_find_device_by_adev(adev);
3222 if (!spi)
3223 break;
3224
3225 spi_unregister_device(spi);
3226 put_device(&spi->dev);
3227 break;
3228 }
3229
3230 return NOTIFY_OK;
3231}
3232
3233static struct notifier_block spi_acpi_notifier = {
3234 .notifier_call = acpi_spi_notify,
3235};
3236#else
3237extern struct notifier_block spi_acpi_notifier;
3238#endif
3239
3240static int __init spi_init(void)
3241{
3242 int status;
3243
3244 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3245 if (!buf) {
3246 status = -ENOMEM;
3247 goto err0;
3248 }
3249
3250 status = bus_register(&spi_bus_type);
3251 if (status < 0)
3252 goto err1;
3253
3254 status = class_register(&spi_master_class);
3255 if (status < 0)
3256 goto err2;
3257
3258 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3259 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3260 if (IS_ENABLED(CONFIG_ACPI))
3261 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3262
3263 return 0;
3264
3265err2:
3266 bus_unregister(&spi_bus_type);
3267err1:
3268 kfree(buf);
3269 buf = NULL;
3270err0:
3271 return status;
3272}
3273
3274
3275
3276
3277
3278
3279
3280
3281postcore_initcall(spi_init);
3282
3283