1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/cache.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/clk/clk-conf.h>
17#include <linux/slab.h>
18#include <linux/mod_devicetable.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h>
21#include <linux/of_gpio.h>
22#include <linux/gpio/consumer.h>
23#include <linux/pm_runtime.h>
24#include <linux/pm_domain.h>
25#include <linux/property.h>
26#include <linux/export.h>
27#include <linux/sched/rt.h>
28#include <uapi/linux/sched/types.h>
29#include <linux/delay.h>
30#include <linux/kthread.h>
31#include <linux/ioport.h>
32#include <linux/acpi.h>
33#include <linux/highmem.h>
34#include <linux/idr.h>
35#include <linux/platform_data/x86/apple.h>
36
37#define CREATE_TRACE_POINTS
38#include <trace/events/spi.h>
39EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42#include "internals.h"
43
44static DEFINE_IDR(spi_master_idr);
45
46static void spidev_release(struct device *dev)
47{
48 struct spi_device *spi = to_spi_device(dev);
49
50
51 if (spi->controller->cleanup)
52 spi->controller->cleanup(spi);
53
54 spi_controller_put(spi->controller);
55 kfree(spi->driver_override);
56 kfree(spi);
57}
58
59static ssize_t
60modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61{
62 const struct spi_device *spi = to_spi_device(dev);
63 int len;
64
65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 if (len != -ENODEV)
67 return len;
68
69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70}
71static DEVICE_ATTR_RO(modalias);
72
73static ssize_t driver_override_store(struct device *dev,
74 struct device_attribute *a,
75 const char *buf, size_t count)
76{
77 struct spi_device *spi = to_spi_device(dev);
78 const char *end = memchr(buf, '\n', count);
79 const size_t len = end ? end - buf : count;
80 const char *driver_override, *old;
81
82
83 if (len >= (PAGE_SIZE - 1))
84 return -EINVAL;
85
86 driver_override = kstrndup(buf, len, GFP_KERNEL);
87 if (!driver_override)
88 return -ENOMEM;
89
90 device_lock(dev);
91 old = spi->driver_override;
92 if (len) {
93 spi->driver_override = driver_override;
94 } else {
95
96 spi->driver_override = NULL;
97 kfree(driver_override);
98 }
99 device_unlock(dev);
100 kfree(old);
101
102 return count;
103}
104
105static ssize_t driver_override_show(struct device *dev,
106 struct device_attribute *a, char *buf)
107{
108 const struct spi_device *spi = to_spi_device(dev);
109 ssize_t len;
110
111 device_lock(dev);
112 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
113 device_unlock(dev);
114 return len;
115}
116static DEVICE_ATTR_RW(driver_override);
117
118#define SPI_STATISTICS_ATTRS(field, file) \
119static ssize_t spi_controller_##field##_show(struct device *dev, \
120 struct device_attribute *attr, \
121 char *buf) \
122{ \
123 struct spi_controller *ctlr = container_of(dev, \
124 struct spi_controller, dev); \
125 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
126} \
127static struct device_attribute dev_attr_spi_controller_##field = { \
128 .attr = { .name = file, .mode = 0444 }, \
129 .show = spi_controller_##field##_show, \
130}; \
131static ssize_t spi_device_##field##_show(struct device *dev, \
132 struct device_attribute *attr, \
133 char *buf) \
134{ \
135 struct spi_device *spi = to_spi_device(dev); \
136 return spi_statistics_##field##_show(&spi->statistics, buf); \
137} \
138static struct device_attribute dev_attr_spi_device_##field = { \
139 .attr = { .name = file, .mode = 0444 }, \
140 .show = spi_device_##field##_show, \
141}
142
143#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
144static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
145 char *buf) \
146{ \
147 unsigned long flags; \
148 ssize_t len; \
149 spin_lock_irqsave(&stat->lock, flags); \
150 len = sprintf(buf, format_string, stat->field); \
151 spin_unlock_irqrestore(&stat->lock, flags); \
152 return len; \
153} \
154SPI_STATISTICS_ATTRS(name, file)
155
156#define SPI_STATISTICS_SHOW(field, format_string) \
157 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
158 field, format_string)
159
160SPI_STATISTICS_SHOW(messages, "%lu");
161SPI_STATISTICS_SHOW(transfers, "%lu");
162SPI_STATISTICS_SHOW(errors, "%lu");
163SPI_STATISTICS_SHOW(timedout, "%lu");
164
165SPI_STATISTICS_SHOW(spi_sync, "%lu");
166SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
167SPI_STATISTICS_SHOW(spi_async, "%lu");
168
169SPI_STATISTICS_SHOW(bytes, "%llu");
170SPI_STATISTICS_SHOW(bytes_rx, "%llu");
171SPI_STATISTICS_SHOW(bytes_tx, "%llu");
172
173#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
174 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
175 "transfer_bytes_histo_" number, \
176 transfer_bytes_histo[index], "%lu")
177SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
178SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
179SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
180SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
181SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
182SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
183SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
184SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
185SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
186SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
187SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
188SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
189SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
190SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
191SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
192SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
193SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
194
195SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
196
197static struct attribute *spi_dev_attrs[] = {
198 &dev_attr_modalias.attr,
199 &dev_attr_driver_override.attr,
200 NULL,
201};
202
203static const struct attribute_group spi_dev_group = {
204 .attrs = spi_dev_attrs,
205};
206
207static struct attribute *spi_device_statistics_attrs[] = {
208 &dev_attr_spi_device_messages.attr,
209 &dev_attr_spi_device_transfers.attr,
210 &dev_attr_spi_device_errors.attr,
211 &dev_attr_spi_device_timedout.attr,
212 &dev_attr_spi_device_spi_sync.attr,
213 &dev_attr_spi_device_spi_sync_immediate.attr,
214 &dev_attr_spi_device_spi_async.attr,
215 &dev_attr_spi_device_bytes.attr,
216 &dev_attr_spi_device_bytes_rx.attr,
217 &dev_attr_spi_device_bytes_tx.attr,
218 &dev_attr_spi_device_transfer_bytes_histo0.attr,
219 &dev_attr_spi_device_transfer_bytes_histo1.attr,
220 &dev_attr_spi_device_transfer_bytes_histo2.attr,
221 &dev_attr_spi_device_transfer_bytes_histo3.attr,
222 &dev_attr_spi_device_transfer_bytes_histo4.attr,
223 &dev_attr_spi_device_transfer_bytes_histo5.attr,
224 &dev_attr_spi_device_transfer_bytes_histo6.attr,
225 &dev_attr_spi_device_transfer_bytes_histo7.attr,
226 &dev_attr_spi_device_transfer_bytes_histo8.attr,
227 &dev_attr_spi_device_transfer_bytes_histo9.attr,
228 &dev_attr_spi_device_transfer_bytes_histo10.attr,
229 &dev_attr_spi_device_transfer_bytes_histo11.attr,
230 &dev_attr_spi_device_transfer_bytes_histo12.attr,
231 &dev_attr_spi_device_transfer_bytes_histo13.attr,
232 &dev_attr_spi_device_transfer_bytes_histo14.attr,
233 &dev_attr_spi_device_transfer_bytes_histo15.attr,
234 &dev_attr_spi_device_transfer_bytes_histo16.attr,
235 &dev_attr_spi_device_transfers_split_maxsize.attr,
236 NULL,
237};
238
239static const struct attribute_group spi_device_statistics_group = {
240 .name = "statistics",
241 .attrs = spi_device_statistics_attrs,
242};
243
244static const struct attribute_group *spi_dev_groups[] = {
245 &spi_dev_group,
246 &spi_device_statistics_group,
247 NULL,
248};
249
250static struct attribute *spi_controller_statistics_attrs[] = {
251 &dev_attr_spi_controller_messages.attr,
252 &dev_attr_spi_controller_transfers.attr,
253 &dev_attr_spi_controller_errors.attr,
254 &dev_attr_spi_controller_timedout.attr,
255 &dev_attr_spi_controller_spi_sync.attr,
256 &dev_attr_spi_controller_spi_sync_immediate.attr,
257 &dev_attr_spi_controller_spi_async.attr,
258 &dev_attr_spi_controller_bytes.attr,
259 &dev_attr_spi_controller_bytes_rx.attr,
260 &dev_attr_spi_controller_bytes_tx.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
274 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
275 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
276 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
277 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
278 &dev_attr_spi_controller_transfers_split_maxsize.attr,
279 NULL,
280};
281
282static const struct attribute_group spi_controller_statistics_group = {
283 .name = "statistics",
284 .attrs = spi_controller_statistics_attrs,
285};
286
287static const struct attribute_group *spi_master_groups[] = {
288 &spi_controller_statistics_group,
289 NULL,
290};
291
292void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
293 struct spi_transfer *xfer,
294 struct spi_controller *ctlr)
295{
296 unsigned long flags;
297 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
298
299 if (l2len < 0)
300 l2len = 0;
301
302 spin_lock_irqsave(&stats->lock, flags);
303
304 stats->transfers++;
305 stats->transfer_bytes_histo[l2len]++;
306
307 stats->bytes += xfer->len;
308 if ((xfer->tx_buf) &&
309 (xfer->tx_buf != ctlr->dummy_tx))
310 stats->bytes_tx += xfer->len;
311 if ((xfer->rx_buf) &&
312 (xfer->rx_buf != ctlr->dummy_rx))
313 stats->bytes_rx += xfer->len;
314
315 spin_unlock_irqrestore(&stats->lock, flags);
316}
317EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
318
319
320
321
322
323static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
324 const struct spi_device *sdev)
325{
326 while (id->name[0]) {
327 if (!strcmp(sdev->modalias, id->name))
328 return id;
329 id++;
330 }
331 return NULL;
332}
333
334const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
335{
336 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
337
338 return spi_match_id(sdrv->id_table, sdev);
339}
340EXPORT_SYMBOL_GPL(spi_get_device_id);
341
342static int spi_match_device(struct device *dev, struct device_driver *drv)
343{
344 const struct spi_device *spi = to_spi_device(dev);
345 const struct spi_driver *sdrv = to_spi_driver(drv);
346
347
348 if (spi->driver_override)
349 return strcmp(spi->driver_override, drv->name) == 0;
350
351
352 if (of_driver_match_device(dev, drv))
353 return 1;
354
355
356 if (acpi_driver_match_device(dev, drv))
357 return 1;
358
359 if (sdrv->id_table)
360 return !!spi_match_id(sdrv->id_table, spi);
361
362 return strcmp(spi->modalias, drv->name) == 0;
363}
364
365static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
366{
367 const struct spi_device *spi = to_spi_device(dev);
368 int rc;
369
370 rc = acpi_device_uevent_modalias(dev, env);
371 if (rc != -ENODEV)
372 return rc;
373
374 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
375}
376
377struct bus_type spi_bus_type = {
378 .name = "spi",
379 .dev_groups = spi_dev_groups,
380 .match = spi_match_device,
381 .uevent = spi_uevent,
382};
383EXPORT_SYMBOL_GPL(spi_bus_type);
384
385
386static int spi_drv_probe(struct device *dev)
387{
388 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
389 struct spi_device *spi = to_spi_device(dev);
390 int ret;
391
392 ret = of_clk_set_defaults(dev->of_node, false);
393 if (ret)
394 return ret;
395
396 if (dev->of_node) {
397 spi->irq = of_irq_get(dev->of_node, 0);
398 if (spi->irq == -EPROBE_DEFER)
399 return -EPROBE_DEFER;
400 if (spi->irq < 0)
401 spi->irq = 0;
402 }
403
404 ret = dev_pm_domain_attach(dev, true);
405 if (ret)
406 return ret;
407
408 ret = sdrv->probe(spi);
409 if (ret)
410 dev_pm_domain_detach(dev, true);
411
412 return ret;
413}
414
415static int spi_drv_remove(struct device *dev)
416{
417 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
418 int ret;
419
420 ret = sdrv->remove(to_spi_device(dev));
421 dev_pm_domain_detach(dev, true);
422
423 return ret;
424}
425
426static void spi_drv_shutdown(struct device *dev)
427{
428 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
429
430 sdrv->shutdown(to_spi_device(dev));
431}
432
433
434
435
436
437
438
439
440
441int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442{
443 sdrv->driver.owner = owner;
444 sdrv->driver.bus = &spi_bus_type;
445 if (sdrv->probe)
446 sdrv->driver.probe = spi_drv_probe;
447 if (sdrv->remove)
448 sdrv->driver.remove = spi_drv_remove;
449 if (sdrv->shutdown)
450 sdrv->driver.shutdown = spi_drv_shutdown;
451 return driver_register(&sdrv->driver);
452}
453EXPORT_SYMBOL_GPL(__spi_register_driver);
454
455
456
457
458
459
460
461
462
463struct boardinfo {
464 struct list_head list;
465 struct spi_board_info board_info;
466};
467
468static LIST_HEAD(board_list);
469static LIST_HEAD(spi_controller_list);
470
471
472
473
474
475
476static DEFINE_MUTEX(board_lock);
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
496{
497 struct spi_device *spi;
498
499 if (!spi_controller_get(ctlr))
500 return NULL;
501
502 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
503 if (!spi) {
504 spi_controller_put(ctlr);
505 return NULL;
506 }
507
508 spi->master = spi->controller = ctlr;
509 spi->dev.parent = &ctlr->dev;
510 spi->dev.bus = &spi_bus_type;
511 spi->dev.release = spidev_release;
512 spi->cs_gpio = -ENOENT;
513
514 spin_lock_init(&spi->statistics.lock);
515
516 device_initialize(&spi->dev);
517 return spi;
518}
519EXPORT_SYMBOL_GPL(spi_alloc_device);
520
521static void spi_dev_set_name(struct spi_device *spi)
522{
523 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
524
525 if (adev) {
526 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
527 return;
528 }
529
530 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
531 spi->chip_select);
532}
533
534static int spi_dev_check(struct device *dev, void *data)
535{
536 struct spi_device *spi = to_spi_device(dev);
537 struct spi_device *new_spi = data;
538
539 if (spi->controller == new_spi->controller &&
540 spi->chip_select == new_spi->chip_select)
541 return -EBUSY;
542 return 0;
543}
544
545
546
547
548
549
550
551
552
553
554int spi_add_device(struct spi_device *spi)
555{
556 static DEFINE_MUTEX(spi_add_lock);
557 struct spi_controller *ctlr = spi->controller;
558 struct device *dev = ctlr->dev.parent;
559 int status;
560
561
562 if (spi->chip_select >= ctlr->num_chipselect) {
563 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
564 ctlr->num_chipselect);
565 return -EINVAL;
566 }
567
568
569 spi_dev_set_name(spi);
570
571
572
573
574
575 mutex_lock(&spi_add_lock);
576
577 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
578 if (status) {
579 dev_err(dev, "chipselect %d already in use\n",
580 spi->chip_select);
581 goto done;
582 }
583
584
585 if (ctlr->cs_gpiods)
586 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
587 else if (ctlr->cs_gpios)
588 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
589
590
591
592
593
594 status = spi_setup(spi);
595 if (status < 0) {
596 dev_err(dev, "can't setup %s, status %d\n",
597 dev_name(&spi->dev), status);
598 goto done;
599 }
600
601
602 status = device_add(&spi->dev);
603 if (status < 0)
604 dev_err(dev, "can't add %s, status %d\n",
605 dev_name(&spi->dev), status);
606 else
607 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
608
609done:
610 mutex_unlock(&spi_add_lock);
611 return status;
612}
613EXPORT_SYMBOL_GPL(spi_add_device);
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629struct spi_device *spi_new_device(struct spi_controller *ctlr,
630 struct spi_board_info *chip)
631{
632 struct spi_device *proxy;
633 int status;
634
635
636
637
638
639
640
641
642 proxy = spi_alloc_device(ctlr);
643 if (!proxy)
644 return NULL;
645
646 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
647
648 proxy->chip_select = chip->chip_select;
649 proxy->max_speed_hz = chip->max_speed_hz;
650 proxy->mode = chip->mode;
651 proxy->irq = chip->irq;
652 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
653 proxy->dev.platform_data = (void *) chip->platform_data;
654 proxy->controller_data = chip->controller_data;
655 proxy->controller_state = NULL;
656
657 if (chip->properties) {
658 status = device_add_properties(&proxy->dev, chip->properties);
659 if (status) {
660 dev_err(&ctlr->dev,
661 "failed to add properties to '%s': %d\n",
662 chip->modalias, status);
663 goto err_dev_put;
664 }
665 }
666
667 status = spi_add_device(proxy);
668 if (status < 0)
669 goto err_remove_props;
670
671 return proxy;
672
673err_remove_props:
674 if (chip->properties)
675 device_remove_properties(&proxy->dev);
676err_dev_put:
677 spi_dev_put(proxy);
678 return NULL;
679}
680EXPORT_SYMBOL_GPL(spi_new_device);
681
682
683
684
685
686
687
688
689void spi_unregister_device(struct spi_device *spi)
690{
691 if (!spi)
692 return;
693
694 if (spi->dev.of_node) {
695 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
696 of_node_put(spi->dev.of_node);
697 }
698 if (ACPI_COMPANION(&spi->dev))
699 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
700 device_unregister(&spi->dev);
701}
702EXPORT_SYMBOL_GPL(spi_unregister_device);
703
704static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
705 struct spi_board_info *bi)
706{
707 struct spi_device *dev;
708
709 if (ctlr->bus_num != bi->bus_num)
710 return;
711
712 dev = spi_new_device(ctlr, bi);
713 if (!dev)
714 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
715 bi->modalias);
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740int spi_register_board_info(struct spi_board_info const *info, unsigned n)
741{
742 struct boardinfo *bi;
743 int i;
744
745 if (!n)
746 return 0;
747
748 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
749 if (!bi)
750 return -ENOMEM;
751
752 for (i = 0; i < n; i++, bi++, info++) {
753 struct spi_controller *ctlr;
754
755 memcpy(&bi->board_info, info, sizeof(*info));
756 if (info->properties) {
757 bi->board_info.properties =
758 property_entries_dup(info->properties);
759 if (IS_ERR(bi->board_info.properties))
760 return PTR_ERR(bi->board_info.properties);
761 }
762
763 mutex_lock(&board_lock);
764 list_add_tail(&bi->list, &board_list);
765 list_for_each_entry(ctlr, &spi_controller_list, list)
766 spi_match_controller_to_boardinfo(ctlr,
767 &bi->board_info);
768 mutex_unlock(&board_lock);
769 }
770
771 return 0;
772}
773
774
775
776static void spi_set_cs(struct spi_device *spi, bool enable)
777{
778 if (spi->mode & SPI_CS_HIGH)
779 enable = !enable;
780
781 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
782
783
784
785
786
787
788
789 if (!(spi->mode & SPI_NO_CS)) {
790 if (spi->cs_gpiod)
791 gpiod_set_value_cansleep(spi->cs_gpiod,
792 !enable);
793 else
794 gpio_set_value_cansleep(spi->cs_gpio, !enable);
795 }
796
797 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
798 spi->controller->set_cs)
799 spi->controller->set_cs(spi, !enable);
800 } else if (spi->controller->set_cs) {
801 spi->controller->set_cs(spi, !enable);
802 }
803}
804
805#ifdef CONFIG_HAS_DMA
806int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
807 struct sg_table *sgt, void *buf, size_t len,
808 enum dma_data_direction dir)
809{
810 const bool vmalloced_buf = is_vmalloc_addr(buf);
811 unsigned int max_seg_size = dma_get_max_seg_size(dev);
812#ifdef CONFIG_HIGHMEM
813 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
814 (unsigned long)buf < (PKMAP_BASE +
815 (LAST_PKMAP * PAGE_SIZE)));
816#else
817 const bool kmap_buf = false;
818#endif
819 int desc_len;
820 int sgs;
821 struct page *vm_page;
822 struct scatterlist *sg;
823 void *sg_buf;
824 size_t min;
825 int i, ret;
826
827 if (vmalloced_buf || kmap_buf) {
828 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
829 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
830 } else if (virt_addr_valid(buf)) {
831 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
832 sgs = DIV_ROUND_UP(len, desc_len);
833 } else {
834 return -EINVAL;
835 }
836
837 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
838 if (ret != 0)
839 return ret;
840
841 sg = &sgt->sgl[0];
842 for (i = 0; i < sgs; i++) {
843
844 if (vmalloced_buf || kmap_buf) {
845
846
847
848
849
850 min = min_t(size_t, desc_len,
851 min_t(size_t, len,
852 PAGE_SIZE - offset_in_page(buf)));
853 if (vmalloced_buf)
854 vm_page = vmalloc_to_page(buf);
855 else
856 vm_page = kmap_to_page(buf);
857 if (!vm_page) {
858 sg_free_table(sgt);
859 return -ENOMEM;
860 }
861 sg_set_page(sg, vm_page,
862 min, offset_in_page(buf));
863 } else {
864 min = min_t(size_t, len, desc_len);
865 sg_buf = buf;
866 sg_set_buf(sg, sg_buf, min);
867 }
868
869 buf += min;
870 len -= min;
871 sg = sg_next(sg);
872 }
873
874 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
875 if (!ret)
876 ret = -ENOMEM;
877 if (ret < 0) {
878 sg_free_table(sgt);
879 return ret;
880 }
881
882 sgt->nents = ret;
883
884 return 0;
885}
886
887void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
888 struct sg_table *sgt, enum dma_data_direction dir)
889{
890 if (sgt->orig_nents) {
891 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
892 sg_free_table(sgt);
893 }
894}
895
896static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
897{
898 struct device *tx_dev, *rx_dev;
899 struct spi_transfer *xfer;
900 int ret;
901
902 if (!ctlr->can_dma)
903 return 0;
904
905 if (ctlr->dma_tx)
906 tx_dev = ctlr->dma_tx->device->dev;
907 else
908 tx_dev = ctlr->dev.parent;
909
910 if (ctlr->dma_rx)
911 rx_dev = ctlr->dma_rx->device->dev;
912 else
913 rx_dev = ctlr->dev.parent;
914
915 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
916 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
917 continue;
918
919 if (xfer->tx_buf != NULL) {
920 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
921 (void *)xfer->tx_buf, xfer->len,
922 DMA_TO_DEVICE);
923 if (ret != 0)
924 return ret;
925 }
926
927 if (xfer->rx_buf != NULL) {
928 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
929 xfer->rx_buf, xfer->len,
930 DMA_FROM_DEVICE);
931 if (ret != 0) {
932 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
933 DMA_TO_DEVICE);
934 return ret;
935 }
936 }
937 }
938
939 ctlr->cur_msg_mapped = true;
940
941 return 0;
942}
943
944static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
945{
946 struct spi_transfer *xfer;
947 struct device *tx_dev, *rx_dev;
948
949 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
950 return 0;
951
952 if (ctlr->dma_tx)
953 tx_dev = ctlr->dma_tx->device->dev;
954 else
955 tx_dev = ctlr->dev.parent;
956
957 if (ctlr->dma_rx)
958 rx_dev = ctlr->dma_rx->device->dev;
959 else
960 rx_dev = ctlr->dev.parent;
961
962 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
963 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
964 continue;
965
966 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
967 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
968 }
969
970 return 0;
971}
972#else
973static inline int __spi_map_msg(struct spi_controller *ctlr,
974 struct spi_message *msg)
975{
976 return 0;
977}
978
979static inline int __spi_unmap_msg(struct spi_controller *ctlr,
980 struct spi_message *msg)
981{
982 return 0;
983}
984#endif
985
986static inline int spi_unmap_msg(struct spi_controller *ctlr,
987 struct spi_message *msg)
988{
989 struct spi_transfer *xfer;
990
991 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
992
993
994
995
996 if (xfer->tx_buf == ctlr->dummy_tx)
997 xfer->tx_buf = NULL;
998 if (xfer->rx_buf == ctlr->dummy_rx)
999 xfer->rx_buf = NULL;
1000 }
1001
1002 return __spi_unmap_msg(ctlr, msg);
1003}
1004
1005static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1006{
1007 struct spi_transfer *xfer;
1008 void *tmp;
1009 unsigned int max_tx, max_rx;
1010
1011 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1012 max_tx = 0;
1013 max_rx = 0;
1014
1015 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1016 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1017 !xfer->tx_buf)
1018 max_tx = max(xfer->len, max_tx);
1019 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1020 !xfer->rx_buf)
1021 max_rx = max(xfer->len, max_rx);
1022 }
1023
1024 if (max_tx) {
1025 tmp = krealloc(ctlr->dummy_tx, max_tx,
1026 GFP_KERNEL | GFP_DMA);
1027 if (!tmp)
1028 return -ENOMEM;
1029 ctlr->dummy_tx = tmp;
1030 memset(tmp, 0, max_tx);
1031 }
1032
1033 if (max_rx) {
1034 tmp = krealloc(ctlr->dummy_rx, max_rx,
1035 GFP_KERNEL | GFP_DMA);
1036 if (!tmp)
1037 return -ENOMEM;
1038 ctlr->dummy_rx = tmp;
1039 }
1040
1041 if (max_tx || max_rx) {
1042 list_for_each_entry(xfer, &msg->transfers,
1043 transfer_list) {
1044 if (!xfer->len)
1045 continue;
1046 if (!xfer->tx_buf)
1047 xfer->tx_buf = ctlr->dummy_tx;
1048 if (!xfer->rx_buf)
1049 xfer->rx_buf = ctlr->dummy_rx;
1050 }
1051 }
1052 }
1053
1054 return __spi_map_msg(ctlr, msg);
1055}
1056
1057static int spi_transfer_wait(struct spi_controller *ctlr,
1058 struct spi_message *msg,
1059 struct spi_transfer *xfer)
1060{
1061 struct spi_statistics *statm = &ctlr->statistics;
1062 struct spi_statistics *stats = &msg->spi->statistics;
1063 unsigned long long ms = 1;
1064
1065 if (spi_controller_is_slave(ctlr)) {
1066 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1067 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1068 return -EINTR;
1069 }
1070 } else {
1071 ms = 8LL * 1000LL * xfer->len;
1072 do_div(ms, xfer->speed_hz);
1073 ms += ms + 200;
1074
1075 if (ms > UINT_MAX)
1076 ms = UINT_MAX;
1077
1078 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1079 msecs_to_jiffies(ms));
1080
1081 if (ms == 0) {
1082 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1083 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1084 dev_err(&msg->spi->dev,
1085 "SPI transfer timed out\n");
1086 return -ETIMEDOUT;
1087 }
1088 }
1089
1090 return 0;
1091}
1092
1093static void _spi_transfer_delay_ns(u32 ns)
1094{
1095 if (!ns)
1096 return;
1097 if (ns <= 1000) {
1098 ndelay(ns);
1099 } else {
1100 u32 us = DIV_ROUND_UP(ns, 1000);
1101
1102 if (us <= 10)
1103 udelay(us);
1104 else
1105 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1106 }
1107}
1108
1109static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1110 struct spi_transfer *xfer)
1111{
1112 u32 delay = xfer->cs_change_delay;
1113 u32 unit = xfer->cs_change_delay_unit;
1114 u32 hz;
1115
1116
1117 if (!delay && unit != SPI_DELAY_UNIT_USECS)
1118 return;
1119
1120 switch (unit) {
1121 case SPI_DELAY_UNIT_USECS:
1122
1123 if (!delay)
1124 delay = 10000;
1125 else
1126 delay *= 1000;
1127 break;
1128 case SPI_DELAY_UNIT_NSECS:
1129 break;
1130 case SPI_DELAY_UNIT_SCK:
1131
1132
1133
1134 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1135 delay *= DIV_ROUND_UP(1000000000, hz);
1136 break;
1137 default:
1138 dev_err_once(&msg->spi->dev,
1139 "Use of unsupported delay unit %i, using default of 10us\n",
1140 xfer->cs_change_delay_unit);
1141 delay = 10000;
1142 }
1143
1144 _spi_transfer_delay_ns(delay);
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154static int spi_transfer_one_message(struct spi_controller *ctlr,
1155 struct spi_message *msg)
1156{
1157 struct spi_transfer *xfer;
1158 bool keep_cs = false;
1159 int ret = 0;
1160 struct spi_statistics *statm = &ctlr->statistics;
1161 struct spi_statistics *stats = &msg->spi->statistics;
1162
1163 spi_set_cs(msg->spi, true);
1164
1165 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1166 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1167
1168 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1169 trace_spi_transfer_start(msg, xfer);
1170
1171 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1172 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1173
1174 if (xfer->tx_buf || xfer->rx_buf) {
1175 reinit_completion(&ctlr->xfer_completion);
1176
1177 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1178 if (ret < 0) {
1179 SPI_STATISTICS_INCREMENT_FIELD(statm,
1180 errors);
1181 SPI_STATISTICS_INCREMENT_FIELD(stats,
1182 errors);
1183 dev_err(&msg->spi->dev,
1184 "SPI transfer failed: %d\n", ret);
1185 goto out;
1186 }
1187
1188 if (ret > 0) {
1189 ret = spi_transfer_wait(ctlr, msg, xfer);
1190 if (ret < 0)
1191 msg->status = ret;
1192 }
1193 } else {
1194 if (xfer->len)
1195 dev_err(&msg->spi->dev,
1196 "Bufferless transfer has length %u\n",
1197 xfer->len);
1198 }
1199
1200 trace_spi_transfer_stop(msg, xfer);
1201
1202 if (msg->status != -EINPROGRESS)
1203 goto out;
1204
1205 if (xfer->delay_usecs)
1206 _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
1207
1208 if (xfer->cs_change) {
1209 if (list_is_last(&xfer->transfer_list,
1210 &msg->transfers)) {
1211 keep_cs = true;
1212 } else {
1213 spi_set_cs(msg->spi, false);
1214 _spi_transfer_cs_change_delay(msg, xfer);
1215 spi_set_cs(msg->spi, true);
1216 }
1217 }
1218
1219 msg->actual_length += xfer->len;
1220 }
1221
1222out:
1223 if (ret != 0 || !keep_cs)
1224 spi_set_cs(msg->spi, false);
1225
1226 if (msg->status == -EINPROGRESS)
1227 msg->status = ret;
1228
1229 if (msg->status && ctlr->handle_err)
1230 ctlr->handle_err(ctlr, msg);
1231
1232 spi_res_release(ctlr, msg);
1233
1234 spi_finalize_current_message(ctlr);
1235
1236 return ret;
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247void spi_finalize_current_transfer(struct spi_controller *ctlr)
1248{
1249 complete(&ctlr->xfer_completion);
1250}
1251EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1267{
1268 unsigned long flags;
1269 bool was_busy = false;
1270 int ret;
1271
1272
1273 spin_lock_irqsave(&ctlr->queue_lock, flags);
1274
1275
1276 if (ctlr->cur_msg) {
1277 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1278 return;
1279 }
1280
1281
1282 if (ctlr->idling) {
1283 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1284 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1285 return;
1286 }
1287
1288
1289 if (list_empty(&ctlr->queue) || !ctlr->running) {
1290 if (!ctlr->busy) {
1291 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1292 return;
1293 }
1294
1295
1296 if (!in_kthread) {
1297 kthread_queue_work(&ctlr->kworker,
1298 &ctlr->pump_messages);
1299 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1300 return;
1301 }
1302
1303 ctlr->busy = false;
1304 ctlr->idling = true;
1305 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1306
1307 kfree(ctlr->dummy_rx);
1308 ctlr->dummy_rx = NULL;
1309 kfree(ctlr->dummy_tx);
1310 ctlr->dummy_tx = NULL;
1311 if (ctlr->unprepare_transfer_hardware &&
1312 ctlr->unprepare_transfer_hardware(ctlr))
1313 dev_err(&ctlr->dev,
1314 "failed to unprepare transfer hardware\n");
1315 if (ctlr->auto_runtime_pm) {
1316 pm_runtime_mark_last_busy(ctlr->dev.parent);
1317 pm_runtime_put_autosuspend(ctlr->dev.parent);
1318 }
1319 trace_spi_controller_idle(ctlr);
1320
1321 spin_lock_irqsave(&ctlr->queue_lock, flags);
1322 ctlr->idling = false;
1323 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1324 return;
1325 }
1326
1327
1328 ctlr->cur_msg =
1329 list_first_entry(&ctlr->queue, struct spi_message, queue);
1330
1331 list_del_init(&ctlr->cur_msg->queue);
1332 if (ctlr->busy)
1333 was_busy = true;
1334 else
1335 ctlr->busy = true;
1336 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1337
1338 mutex_lock(&ctlr->io_mutex);
1339
1340 if (!was_busy && ctlr->auto_runtime_pm) {
1341 ret = pm_runtime_get_sync(ctlr->dev.parent);
1342 if (ret < 0) {
1343 pm_runtime_put_noidle(ctlr->dev.parent);
1344 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1345 ret);
1346 mutex_unlock(&ctlr->io_mutex);
1347 return;
1348 }
1349 }
1350
1351 if (!was_busy)
1352 trace_spi_controller_busy(ctlr);
1353
1354 if (!was_busy && ctlr->prepare_transfer_hardware) {
1355 ret = ctlr->prepare_transfer_hardware(ctlr);
1356 if (ret) {
1357 dev_err(&ctlr->dev,
1358 "failed to prepare transfer hardware: %d\n",
1359 ret);
1360
1361 if (ctlr->auto_runtime_pm)
1362 pm_runtime_put(ctlr->dev.parent);
1363
1364 ctlr->cur_msg->status = ret;
1365 spi_finalize_current_message(ctlr);
1366
1367 mutex_unlock(&ctlr->io_mutex);
1368 return;
1369 }
1370 }
1371
1372 trace_spi_message_start(ctlr->cur_msg);
1373
1374 if (ctlr->prepare_message) {
1375 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1376 if (ret) {
1377 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1378 ret);
1379 ctlr->cur_msg->status = ret;
1380 spi_finalize_current_message(ctlr);
1381 goto out;
1382 }
1383 ctlr->cur_msg_prepared = true;
1384 }
1385
1386 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1387 if (ret) {
1388 ctlr->cur_msg->status = ret;
1389 spi_finalize_current_message(ctlr);
1390 goto out;
1391 }
1392
1393 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1394 if (ret) {
1395 dev_err(&ctlr->dev,
1396 "failed to transfer one message from queue\n");
1397 goto out;
1398 }
1399
1400out:
1401 mutex_unlock(&ctlr->io_mutex);
1402
1403
1404 if (!ret)
1405 cond_resched();
1406}
1407
1408
1409
1410
1411
1412static void spi_pump_messages(struct kthread_work *work)
1413{
1414 struct spi_controller *ctlr =
1415 container_of(work, struct spi_controller, pump_messages);
1416
1417 __spi_pump_messages(ctlr, true);
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435static void spi_set_thread_rt(struct spi_controller *ctlr)
1436{
1437 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1438
1439 dev_info(&ctlr->dev,
1440 "will run message pump with realtime priority\n");
1441 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1442}
1443
1444static int spi_init_queue(struct spi_controller *ctlr)
1445{
1446 ctlr->running = false;
1447 ctlr->busy = false;
1448
1449 kthread_init_worker(&ctlr->kworker);
1450 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1451 "%s", dev_name(&ctlr->dev));
1452 if (IS_ERR(ctlr->kworker_task)) {
1453 dev_err(&ctlr->dev, "failed to create message pump task\n");
1454 return PTR_ERR(ctlr->kworker_task);
1455 }
1456 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1457
1458
1459
1460
1461
1462
1463
1464
1465 if (ctlr->rt)
1466 spi_set_thread_rt(ctlr);
1467
1468 return 0;
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1482{
1483 struct spi_message *next;
1484 unsigned long flags;
1485
1486
1487 spin_lock_irqsave(&ctlr->queue_lock, flags);
1488 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1489 queue);
1490 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1491
1492 return next;
1493}
1494EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1495
1496
1497
1498
1499
1500
1501
1502
1503void spi_finalize_current_message(struct spi_controller *ctlr)
1504{
1505 struct spi_message *mesg;
1506 unsigned long flags;
1507 int ret;
1508
1509 spin_lock_irqsave(&ctlr->queue_lock, flags);
1510 mesg = ctlr->cur_msg;
1511 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1512
1513 spi_unmap_msg(ctlr, mesg);
1514
1515 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1516 ret = ctlr->unprepare_message(ctlr, mesg);
1517 if (ret) {
1518 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1519 ret);
1520 }
1521 }
1522
1523 spin_lock_irqsave(&ctlr->queue_lock, flags);
1524 ctlr->cur_msg = NULL;
1525 ctlr->cur_msg_prepared = false;
1526 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1527 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1528
1529 trace_spi_message_done(mesg);
1530
1531 mesg->state = NULL;
1532 if (mesg->complete)
1533 mesg->complete(mesg->context);
1534}
1535EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1536
1537static int spi_start_queue(struct spi_controller *ctlr)
1538{
1539 unsigned long flags;
1540
1541 spin_lock_irqsave(&ctlr->queue_lock, flags);
1542
1543 if (ctlr->running || ctlr->busy) {
1544 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1545 return -EBUSY;
1546 }
1547
1548 ctlr->running = true;
1549 ctlr->cur_msg = NULL;
1550 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1551
1552 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1553
1554 return 0;
1555}
1556
1557static int spi_stop_queue(struct spi_controller *ctlr)
1558{
1559 unsigned long flags;
1560 unsigned limit = 500;
1561 int ret = 0;
1562
1563 spin_lock_irqsave(&ctlr->queue_lock, flags);
1564
1565
1566
1567
1568
1569
1570
1571 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1572 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1573 usleep_range(10000, 11000);
1574 spin_lock_irqsave(&ctlr->queue_lock, flags);
1575 }
1576
1577 if (!list_empty(&ctlr->queue) || ctlr->busy)
1578 ret = -EBUSY;
1579 else
1580 ctlr->running = false;
1581
1582 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1583
1584 if (ret) {
1585 dev_warn(&ctlr->dev, "could not stop message queue\n");
1586 return ret;
1587 }
1588 return ret;
1589}
1590
1591static int spi_destroy_queue(struct spi_controller *ctlr)
1592{
1593 int ret;
1594
1595 ret = spi_stop_queue(ctlr);
1596
1597
1598
1599
1600
1601
1602
1603 if (ret) {
1604 dev_err(&ctlr->dev, "problem destroying queue\n");
1605 return ret;
1606 }
1607
1608 kthread_flush_worker(&ctlr->kworker);
1609 kthread_stop(ctlr->kworker_task);
1610
1611 return 0;
1612}
1613
1614static int __spi_queued_transfer(struct spi_device *spi,
1615 struct spi_message *msg,
1616 bool need_pump)
1617{
1618 struct spi_controller *ctlr = spi->controller;
1619 unsigned long flags;
1620
1621 spin_lock_irqsave(&ctlr->queue_lock, flags);
1622
1623 if (!ctlr->running) {
1624 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1625 return -ESHUTDOWN;
1626 }
1627 msg->actual_length = 0;
1628 msg->status = -EINPROGRESS;
1629
1630 list_add_tail(&msg->queue, &ctlr->queue);
1631 if (!ctlr->busy && need_pump)
1632 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1633
1634 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1635 return 0;
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1646{
1647 return __spi_queued_transfer(spi, msg, true);
1648}
1649
1650static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1651{
1652 int ret;
1653
1654 ctlr->transfer = spi_queued_transfer;
1655 if (!ctlr->transfer_one_message)
1656 ctlr->transfer_one_message = spi_transfer_one_message;
1657
1658
1659 ret = spi_init_queue(ctlr);
1660 if (ret) {
1661 dev_err(&ctlr->dev, "problem initializing queue\n");
1662 goto err_init_queue;
1663 }
1664 ctlr->queued = true;
1665 ret = spi_start_queue(ctlr);
1666 if (ret) {
1667 dev_err(&ctlr->dev, "problem starting queue\n");
1668 goto err_start_queue;
1669 }
1670
1671 return 0;
1672
1673err_start_queue:
1674 spi_destroy_queue(ctlr);
1675err_init_queue:
1676 return ret;
1677}
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689void spi_flush_queue(struct spi_controller *ctlr)
1690{
1691 if (ctlr->transfer == spi_queued_transfer)
1692 __spi_pump_messages(ctlr, false);
1693}
1694
1695
1696
1697#if defined(CONFIG_OF)
1698static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1699 struct device_node *nc)
1700{
1701 u32 value;
1702 int rc;
1703
1704
1705 if (of_property_read_bool(nc, "spi-cpha"))
1706 spi->mode |= SPI_CPHA;
1707 if (of_property_read_bool(nc, "spi-cpol"))
1708 spi->mode |= SPI_CPOL;
1709 if (of_property_read_bool(nc, "spi-3wire"))
1710 spi->mode |= SPI_3WIRE;
1711 if (of_property_read_bool(nc, "spi-lsb-first"))
1712 spi->mode |= SPI_LSB_FIRST;
1713
1714
1715
1716
1717
1718
1719 if (ctlr->use_gpio_descriptors)
1720 spi->mode |= SPI_CS_HIGH;
1721 else if (of_property_read_bool(nc, "spi-cs-high"))
1722 spi->mode |= SPI_CS_HIGH;
1723
1724
1725 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1726 switch (value) {
1727 case 1:
1728 break;
1729 case 2:
1730 spi->mode |= SPI_TX_DUAL;
1731 break;
1732 case 4:
1733 spi->mode |= SPI_TX_QUAD;
1734 break;
1735 case 8:
1736 spi->mode |= SPI_TX_OCTAL;
1737 break;
1738 default:
1739 dev_warn(&ctlr->dev,
1740 "spi-tx-bus-width %d not supported\n",
1741 value);
1742 break;
1743 }
1744 }
1745
1746 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1747 switch (value) {
1748 case 1:
1749 break;
1750 case 2:
1751 spi->mode |= SPI_RX_DUAL;
1752 break;
1753 case 4:
1754 spi->mode |= SPI_RX_QUAD;
1755 break;
1756 case 8:
1757 spi->mode |= SPI_RX_OCTAL;
1758 break;
1759 default:
1760 dev_warn(&ctlr->dev,
1761 "spi-rx-bus-width %d not supported\n",
1762 value);
1763 break;
1764 }
1765 }
1766
1767 if (spi_controller_is_slave(ctlr)) {
1768 if (!of_node_name_eq(nc, "slave")) {
1769 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1770 nc);
1771 return -EINVAL;
1772 }
1773 return 0;
1774 }
1775
1776
1777 rc = of_property_read_u32(nc, "reg", &value);
1778 if (rc) {
1779 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1780 nc, rc);
1781 return rc;
1782 }
1783 spi->chip_select = value;
1784
1785
1786 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1787 if (rc) {
1788 dev_err(&ctlr->dev,
1789 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1790 return rc;
1791 }
1792 spi->max_speed_hz = value;
1793
1794 return 0;
1795}
1796
1797static struct spi_device *
1798of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1799{
1800 struct spi_device *spi;
1801 int rc;
1802
1803
1804 spi = spi_alloc_device(ctlr);
1805 if (!spi) {
1806 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1807 rc = -ENOMEM;
1808 goto err_out;
1809 }
1810
1811
1812 rc = of_modalias_node(nc, spi->modalias,
1813 sizeof(spi->modalias));
1814 if (rc < 0) {
1815 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1816 goto err_out;
1817 }
1818
1819 rc = of_spi_parse_dt(ctlr, spi, nc);
1820 if (rc)
1821 goto err_out;
1822
1823
1824 of_node_get(nc);
1825 spi->dev.of_node = nc;
1826
1827
1828 rc = spi_add_device(spi);
1829 if (rc) {
1830 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1831 goto err_of_node_put;
1832 }
1833
1834 return spi;
1835
1836err_of_node_put:
1837 of_node_put(nc);
1838err_out:
1839 spi_dev_put(spi);
1840 return ERR_PTR(rc);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850static void of_register_spi_devices(struct spi_controller *ctlr)
1851{
1852 struct spi_device *spi;
1853 struct device_node *nc;
1854
1855 if (!ctlr->dev.of_node)
1856 return;
1857
1858 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1859 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1860 continue;
1861 spi = of_register_spi_device(ctlr, nc);
1862 if (IS_ERR(spi)) {
1863 dev_warn(&ctlr->dev,
1864 "Failed to create SPI device for %pOF\n", nc);
1865 of_node_clear_flag(nc, OF_POPULATED);
1866 }
1867 }
1868}
1869#else
1870static void of_register_spi_devices(struct spi_controller *ctlr) { }
1871#endif
1872
1873#ifdef CONFIG_ACPI
1874struct acpi_spi_lookup {
1875 struct spi_controller *ctlr;
1876 u32 max_speed_hz;
1877 u32 mode;
1878 int irq;
1879 u8 bits_per_word;
1880 u8 chip_select;
1881};
1882
1883static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
1884 struct acpi_spi_lookup *lookup)
1885{
1886 const union acpi_object *obj;
1887
1888 if (!x86_apple_machine)
1889 return;
1890
1891 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1892 && obj->buffer.length >= 4)
1893 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1894
1895 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1896 && obj->buffer.length == 8)
1897 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
1898
1899 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1900 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1901 lookup->mode |= SPI_LSB_FIRST;
1902
1903 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1904 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1905 lookup->mode |= SPI_CPOL;
1906
1907 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1908 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1909 lookup->mode |= SPI_CPHA;
1910}
1911
1912static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1913{
1914 struct acpi_spi_lookup *lookup = data;
1915 struct spi_controller *ctlr = lookup->ctlr;
1916
1917 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1918 struct acpi_resource_spi_serialbus *sb;
1919 acpi_handle parent_handle;
1920 acpi_status status;
1921
1922 sb = &ares->data.spi_serial_bus;
1923 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1924
1925 status = acpi_get_handle(NULL,
1926 sb->resource_source.string_ptr,
1927 &parent_handle);
1928
1929 if (ACPI_FAILURE(status) ||
1930 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
1931 return -ENODEV;
1932
1933
1934
1935
1936
1937
1938
1939
1940 if (ctlr->fw_translate_cs) {
1941 int cs = ctlr->fw_translate_cs(ctlr,
1942 sb->device_selection);
1943 if (cs < 0)
1944 return cs;
1945 lookup->chip_select = cs;
1946 } else {
1947 lookup->chip_select = sb->device_selection;
1948 }
1949
1950 lookup->max_speed_hz = sb->connection_speed;
1951
1952 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1953 lookup->mode |= SPI_CPHA;
1954 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1955 lookup->mode |= SPI_CPOL;
1956 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1957 lookup->mode |= SPI_CS_HIGH;
1958 }
1959 } else if (lookup->irq < 0) {
1960 struct resource r;
1961
1962 if (acpi_dev_resource_interrupt(ares, 0, &r))
1963 lookup->irq = r.start;
1964 }
1965
1966
1967 return 1;
1968}
1969
1970static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1971 struct acpi_device *adev)
1972{
1973 acpi_handle parent_handle = NULL;
1974 struct list_head resource_list;
1975 struct acpi_spi_lookup lookup = {};
1976 struct spi_device *spi;
1977 int ret;
1978
1979 if (acpi_bus_get_status(adev) || !adev->status.present ||
1980 acpi_device_enumerated(adev))
1981 return AE_OK;
1982
1983 lookup.ctlr = ctlr;
1984 lookup.irq = -1;
1985
1986 INIT_LIST_HEAD(&resource_list);
1987 ret = acpi_dev_get_resources(adev, &resource_list,
1988 acpi_spi_add_resource, &lookup);
1989 acpi_dev_free_resource_list(&resource_list);
1990
1991 if (ret < 0)
1992
1993 return AE_OK;
1994
1995 if (!lookup.max_speed_hz &&
1996 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
1997 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
1998
1999 acpi_spi_parse_apple_properties(adev, &lookup);
2000 }
2001
2002 if (!lookup.max_speed_hz)
2003 return AE_OK;
2004
2005 spi = spi_alloc_device(ctlr);
2006 if (!spi) {
2007 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2008 dev_name(&adev->dev));
2009 return AE_NO_MEMORY;
2010 }
2011
2012 ACPI_COMPANION_SET(&spi->dev, adev);
2013 spi->max_speed_hz = lookup.max_speed_hz;
2014 spi->mode = lookup.mode;
2015 spi->irq = lookup.irq;
2016 spi->bits_per_word = lookup.bits_per_word;
2017 spi->chip_select = lookup.chip_select;
2018
2019 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2020 sizeof(spi->modalias));
2021
2022 if (spi->irq < 0)
2023 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2024
2025 acpi_device_set_enumerated(adev);
2026
2027 adev->power.flags.ignore_parent = true;
2028 if (spi_add_device(spi)) {
2029 adev->power.flags.ignore_parent = false;
2030 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2031 dev_name(&adev->dev));
2032 spi_dev_put(spi);
2033 }
2034
2035 return AE_OK;
2036}
2037
2038static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2039 void *data, void **return_value)
2040{
2041 struct spi_controller *ctlr = data;
2042 struct acpi_device *adev;
2043
2044 if (acpi_bus_get_device(handle, &adev))
2045 return AE_OK;
2046
2047 return acpi_register_spi_device(ctlr, adev);
2048}
2049
2050#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2051
2052static void acpi_register_spi_devices(struct spi_controller *ctlr)
2053{
2054 acpi_status status;
2055 acpi_handle handle;
2056
2057 handle = ACPI_HANDLE(ctlr->dev.parent);
2058 if (!handle)
2059 return;
2060
2061 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2062 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2063 acpi_spi_add_device, NULL, ctlr, NULL);
2064 if (ACPI_FAILURE(status))
2065 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2066}
2067#else
2068static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2069#endif
2070
2071static void spi_controller_release(struct device *dev)
2072{
2073 struct spi_controller *ctlr;
2074
2075 ctlr = container_of(dev, struct spi_controller, dev);
2076 kfree(ctlr);
2077}
2078
2079static struct class spi_master_class = {
2080 .name = "spi_master",
2081 .owner = THIS_MODULE,
2082 .dev_release = spi_controller_release,
2083 .dev_groups = spi_master_groups,
2084};
2085
2086#ifdef CONFIG_SPI_SLAVE
2087
2088
2089
2090
2091
2092int spi_slave_abort(struct spi_device *spi)
2093{
2094 struct spi_controller *ctlr = spi->controller;
2095
2096 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2097 return ctlr->slave_abort(ctlr);
2098
2099 return -ENOTSUPP;
2100}
2101EXPORT_SYMBOL_GPL(spi_slave_abort);
2102
2103static int match_true(struct device *dev, void *data)
2104{
2105 return 1;
2106}
2107
2108static ssize_t spi_slave_show(struct device *dev,
2109 struct device_attribute *attr, char *buf)
2110{
2111 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2112 dev);
2113 struct device *child;
2114
2115 child = device_find_child(&ctlr->dev, NULL, match_true);
2116 return sprintf(buf, "%s\n",
2117 child ? to_spi_device(child)->modalias : NULL);
2118}
2119
2120static ssize_t spi_slave_store(struct device *dev,
2121 struct device_attribute *attr, const char *buf,
2122 size_t count)
2123{
2124 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2125 dev);
2126 struct spi_device *spi;
2127 struct device *child;
2128 char name[32];
2129 int rc;
2130
2131 rc = sscanf(buf, "%31s", name);
2132 if (rc != 1 || !name[0])
2133 return -EINVAL;
2134
2135 child = device_find_child(&ctlr->dev, NULL, match_true);
2136 if (child) {
2137
2138 device_unregister(child);
2139 put_device(child);
2140 }
2141
2142 if (strcmp(name, "(null)")) {
2143
2144 spi = spi_alloc_device(ctlr);
2145 if (!spi)
2146 return -ENOMEM;
2147
2148 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2149
2150 rc = spi_add_device(spi);
2151 if (rc) {
2152 spi_dev_put(spi);
2153 return rc;
2154 }
2155 }
2156
2157 return count;
2158}
2159
2160static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2161
2162static struct attribute *spi_slave_attrs[] = {
2163 &dev_attr_slave.attr,
2164 NULL,
2165};
2166
2167static const struct attribute_group spi_slave_group = {
2168 .attrs = spi_slave_attrs,
2169};
2170
2171static const struct attribute_group *spi_slave_groups[] = {
2172 &spi_controller_statistics_group,
2173 &spi_slave_group,
2174 NULL,
2175};
2176
2177static struct class spi_slave_class = {
2178 .name = "spi_slave",
2179 .owner = THIS_MODULE,
2180 .dev_release = spi_controller_release,
2181 .dev_groups = spi_slave_groups,
2182};
2183#else
2184extern struct class spi_slave_class;
2185#endif
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210struct spi_controller *__spi_alloc_controller(struct device *dev,
2211 unsigned int size, bool slave)
2212{
2213 struct spi_controller *ctlr;
2214
2215 if (!dev)
2216 return NULL;
2217
2218 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2219 if (!ctlr)
2220 return NULL;
2221
2222 device_initialize(&ctlr->dev);
2223 ctlr->bus_num = -1;
2224 ctlr->num_chipselect = 1;
2225 ctlr->slave = slave;
2226 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2227 ctlr->dev.class = &spi_slave_class;
2228 else
2229 ctlr->dev.class = &spi_master_class;
2230 ctlr->dev.parent = dev;
2231 pm_suspend_ignore_children(&ctlr->dev, true);
2232 spi_controller_set_devdata(ctlr, &ctlr[1]);
2233
2234 return ctlr;
2235}
2236EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2237
2238#ifdef CONFIG_OF
2239static int of_spi_register_master(struct spi_controller *ctlr)
2240{
2241 int nb, i, *cs;
2242 struct device_node *np = ctlr->dev.of_node;
2243
2244 if (!np)
2245 return 0;
2246
2247 nb = of_gpio_named_count(np, "cs-gpios");
2248 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2249
2250
2251 if (nb == 0 || nb == -ENOENT)
2252 return 0;
2253 else if (nb < 0)
2254 return nb;
2255
2256 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2257 GFP_KERNEL);
2258 ctlr->cs_gpios = cs;
2259
2260 if (!ctlr->cs_gpios)
2261 return -ENOMEM;
2262
2263 for (i = 0; i < ctlr->num_chipselect; i++)
2264 cs[i] = -ENOENT;
2265
2266 for (i = 0; i < nb; i++)
2267 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2268
2269 return 0;
2270}
2271#else
2272static int of_spi_register_master(struct spi_controller *ctlr)
2273{
2274 return 0;
2275}
2276#endif
2277
2278
2279
2280
2281
2282static int spi_get_gpio_descs(struct spi_controller *ctlr)
2283{
2284 int nb, i;
2285 struct gpio_desc **cs;
2286 struct device *dev = &ctlr->dev;
2287
2288 nb = gpiod_count(dev, "cs");
2289 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2290
2291
2292 if (nb == 0 || nb == -ENOENT)
2293 return 0;
2294 else if (nb < 0)
2295 return nb;
2296
2297 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2298 GFP_KERNEL);
2299 if (!cs)
2300 return -ENOMEM;
2301 ctlr->cs_gpiods = cs;
2302
2303 for (i = 0; i < nb; i++) {
2304
2305
2306
2307
2308
2309
2310
2311 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2312 GPIOD_OUT_LOW);
2313 if (IS_ERR(cs[i]))
2314 return PTR_ERR(cs[i]);
2315
2316 if (cs[i]) {
2317
2318
2319
2320
2321 char *gpioname;
2322
2323 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2324 dev_name(dev), i);
2325 if (!gpioname)
2326 return -ENOMEM;
2327 gpiod_set_consumer_name(cs[i], gpioname);
2328 }
2329 }
2330
2331 return 0;
2332}
2333
2334static int spi_controller_check_ops(struct spi_controller *ctlr)
2335{
2336
2337
2338
2339
2340
2341
2342
2343 if (ctlr->mem_ops) {
2344 if (!ctlr->mem_ops->exec_op)
2345 return -EINVAL;
2346 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2347 !ctlr->transfer_one_message) {
2348 return -EINVAL;
2349 }
2350
2351 return 0;
2352}
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377int spi_register_controller(struct spi_controller *ctlr)
2378{
2379 struct device *dev = ctlr->dev.parent;
2380 struct boardinfo *bi;
2381 int status;
2382 int id, first_dynamic;
2383
2384 if (!dev)
2385 return -ENODEV;
2386
2387
2388
2389
2390
2391 status = spi_controller_check_ops(ctlr);
2392 if (status)
2393 return status;
2394
2395 if (ctlr->bus_num >= 0) {
2396
2397 mutex_lock(&board_lock);
2398 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2399 ctlr->bus_num + 1, GFP_KERNEL);
2400 mutex_unlock(&board_lock);
2401 if (WARN(id < 0, "couldn't get idr"))
2402 return id == -ENOSPC ? -EBUSY : id;
2403 ctlr->bus_num = id;
2404 } else if (ctlr->dev.of_node) {
2405
2406 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2407 if (id >= 0) {
2408 ctlr->bus_num = id;
2409 mutex_lock(&board_lock);
2410 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2411 ctlr->bus_num + 1, GFP_KERNEL);
2412 mutex_unlock(&board_lock);
2413 if (WARN(id < 0, "couldn't get idr"))
2414 return id == -ENOSPC ? -EBUSY : id;
2415 }
2416 }
2417 if (ctlr->bus_num < 0) {
2418 first_dynamic = of_alias_get_highest_id("spi");
2419 if (first_dynamic < 0)
2420 first_dynamic = 0;
2421 else
2422 first_dynamic++;
2423
2424 mutex_lock(&board_lock);
2425 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2426 0, GFP_KERNEL);
2427 mutex_unlock(&board_lock);
2428 if (WARN(id < 0, "couldn't get idr"))
2429 return id;
2430 ctlr->bus_num = id;
2431 }
2432 INIT_LIST_HEAD(&ctlr->queue);
2433 spin_lock_init(&ctlr->queue_lock);
2434 spin_lock_init(&ctlr->bus_lock_spinlock);
2435 mutex_init(&ctlr->bus_lock_mutex);
2436 mutex_init(&ctlr->io_mutex);
2437 ctlr->bus_lock_flag = 0;
2438 init_completion(&ctlr->xfer_completion);
2439 if (!ctlr->max_dma_len)
2440 ctlr->max_dma_len = INT_MAX;
2441
2442
2443
2444
2445 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2446
2447 if (!spi_controller_is_slave(ctlr)) {
2448 if (ctlr->use_gpio_descriptors) {
2449 status = spi_get_gpio_descs(ctlr);
2450 if (status)
2451 return status;
2452
2453
2454
2455
2456 ctlr->mode_bits |= SPI_CS_HIGH;
2457 } else {
2458
2459 status = of_spi_register_master(ctlr);
2460 if (status)
2461 return status;
2462 }
2463 }
2464
2465
2466
2467
2468
2469 if (!ctlr->num_chipselect)
2470 return -EINVAL;
2471
2472 status = device_add(&ctlr->dev);
2473 if (status < 0) {
2474
2475 mutex_lock(&board_lock);
2476 idr_remove(&spi_master_idr, ctlr->bus_num);
2477 mutex_unlock(&board_lock);
2478 goto done;
2479 }
2480 dev_dbg(dev, "registered %s %s\n",
2481 spi_controller_is_slave(ctlr) ? "slave" : "master",
2482 dev_name(&ctlr->dev));
2483
2484
2485
2486
2487
2488
2489 if (ctlr->transfer) {
2490 dev_info(dev, "controller is unqueued, this is deprecated\n");
2491 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2492 status = spi_controller_initialize_queue(ctlr);
2493 if (status) {
2494 device_del(&ctlr->dev);
2495
2496 mutex_lock(&board_lock);
2497 idr_remove(&spi_master_idr, ctlr->bus_num);
2498 mutex_unlock(&board_lock);
2499 goto done;
2500 }
2501 }
2502
2503 spin_lock_init(&ctlr->statistics.lock);
2504
2505 mutex_lock(&board_lock);
2506 list_add_tail(&ctlr->list, &spi_controller_list);
2507 list_for_each_entry(bi, &board_list, list)
2508 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2509 mutex_unlock(&board_lock);
2510
2511
2512 of_register_spi_devices(ctlr);
2513 acpi_register_spi_devices(ctlr);
2514done:
2515 return status;
2516}
2517EXPORT_SYMBOL_GPL(spi_register_controller);
2518
2519static void devm_spi_unregister(struct device *dev, void *res)
2520{
2521 spi_unregister_controller(*(struct spi_controller **)res);
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537int devm_spi_register_controller(struct device *dev,
2538 struct spi_controller *ctlr)
2539{
2540 struct spi_controller **ptr;
2541 int ret;
2542
2543 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2544 if (!ptr)
2545 return -ENOMEM;
2546
2547 ret = spi_register_controller(ctlr);
2548 if (!ret) {
2549 *ptr = ctlr;
2550 devres_add(dev, ptr);
2551 } else {
2552 devres_free(ptr);
2553 }
2554
2555 return ret;
2556}
2557EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2558
2559static int __unregister(struct device *dev, void *null)
2560{
2561 spi_unregister_device(to_spi_device(dev));
2562 return 0;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577void spi_unregister_controller(struct spi_controller *ctlr)
2578{
2579 struct spi_controller *found;
2580 int id = ctlr->bus_num;
2581
2582
2583 mutex_lock(&board_lock);
2584 found = idr_find(&spi_master_idr, id);
2585 mutex_unlock(&board_lock);
2586 if (ctlr->queued) {
2587 if (spi_destroy_queue(ctlr))
2588 dev_err(&ctlr->dev, "queue remove failed\n");
2589 }
2590 mutex_lock(&board_lock);
2591 list_del(&ctlr->list);
2592 mutex_unlock(&board_lock);
2593
2594 device_for_each_child(&ctlr->dev, NULL, __unregister);
2595 device_unregister(&ctlr->dev);
2596
2597 mutex_lock(&board_lock);
2598 if (found == ctlr)
2599 idr_remove(&spi_master_idr, id);
2600 mutex_unlock(&board_lock);
2601}
2602EXPORT_SYMBOL_GPL(spi_unregister_controller);
2603
2604int spi_controller_suspend(struct spi_controller *ctlr)
2605{
2606 int ret;
2607
2608
2609 if (!ctlr->queued)
2610 return 0;
2611
2612 ret = spi_stop_queue(ctlr);
2613 if (ret)
2614 dev_err(&ctlr->dev, "queue stop failed\n");
2615
2616 return ret;
2617}
2618EXPORT_SYMBOL_GPL(spi_controller_suspend);
2619
2620int spi_controller_resume(struct spi_controller *ctlr)
2621{
2622 int ret;
2623
2624 if (!ctlr->queued)
2625 return 0;
2626
2627 ret = spi_start_queue(ctlr);
2628 if (ret)
2629 dev_err(&ctlr->dev, "queue restart failed\n");
2630
2631 return ret;
2632}
2633EXPORT_SYMBOL_GPL(spi_controller_resume);
2634
2635static int __spi_controller_match(struct device *dev, const void *data)
2636{
2637 struct spi_controller *ctlr;
2638 const u16 *bus_num = data;
2639
2640 ctlr = container_of(dev, struct spi_controller, dev);
2641 return ctlr->bus_num == *bus_num;
2642}
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656struct spi_controller *spi_busnum_to_master(u16 bus_num)
2657{
2658 struct device *dev;
2659 struct spi_controller *ctlr = NULL;
2660
2661 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2662 __spi_controller_match);
2663 if (dev)
2664 ctlr = container_of(dev, struct spi_controller, dev);
2665
2666 return ctlr;
2667}
2668EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688void *spi_res_alloc(struct spi_device *spi,
2689 spi_res_release_t release,
2690 size_t size, gfp_t gfp)
2691{
2692 struct spi_res *sres;
2693
2694 sres = kzalloc(sizeof(*sres) + size, gfp);
2695 if (!sres)
2696 return NULL;
2697
2698 INIT_LIST_HEAD(&sres->entry);
2699 sres->release = release;
2700
2701 return sres->data;
2702}
2703EXPORT_SYMBOL_GPL(spi_res_alloc);
2704
2705
2706
2707
2708
2709
2710void spi_res_free(void *res)
2711{
2712 struct spi_res *sres = container_of(res, struct spi_res, data);
2713
2714 if (!res)
2715 return;
2716
2717 WARN_ON(!list_empty(&sres->entry));
2718 kfree(sres);
2719}
2720EXPORT_SYMBOL_GPL(spi_res_free);
2721
2722
2723
2724
2725
2726
2727void spi_res_add(struct spi_message *message, void *res)
2728{
2729 struct spi_res *sres = container_of(res, struct spi_res, data);
2730
2731 WARN_ON(!list_empty(&sres->entry));
2732 list_add_tail(&sres->entry, &message->resources);
2733}
2734EXPORT_SYMBOL_GPL(spi_res_add);
2735
2736
2737
2738
2739
2740
2741void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2742{
2743 struct spi_res *res, *tmp;
2744
2745 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2746 if (res->release)
2747 res->release(ctlr, message, res->data);
2748
2749 list_del(&res->entry);
2750
2751 kfree(res);
2752 }
2753}
2754EXPORT_SYMBOL_GPL(spi_res_release);
2755
2756
2757
2758
2759
2760static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2761 struct spi_message *msg,
2762 void *res)
2763{
2764 struct spi_replaced_transfers *rxfer = res;
2765 size_t i;
2766
2767
2768 if (rxfer->release)
2769 rxfer->release(ctlr, msg, res);
2770
2771
2772 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2773
2774
2775 for (i = 0; i < rxfer->inserted; i++)
2776 list_del(&rxfer->inserted_transfers[i].transfer_list);
2777}
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794struct spi_replaced_transfers *spi_replace_transfers(
2795 struct spi_message *msg,
2796 struct spi_transfer *xfer_first,
2797 size_t remove,
2798 size_t insert,
2799 spi_replaced_release_t release,
2800 size_t extradatasize,
2801 gfp_t gfp)
2802{
2803 struct spi_replaced_transfers *rxfer;
2804 struct spi_transfer *xfer;
2805 size_t i;
2806
2807
2808 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2809 struct_size(rxfer, inserted_transfers, insert)
2810 + extradatasize,
2811 gfp);
2812 if (!rxfer)
2813 return ERR_PTR(-ENOMEM);
2814
2815
2816 rxfer->release = release;
2817
2818
2819 if (extradatasize)
2820 rxfer->extradata =
2821 &rxfer->inserted_transfers[insert];
2822
2823
2824 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2825
2826
2827
2828
2829 rxfer->replaced_after = xfer_first->transfer_list.prev;
2830
2831
2832 for (i = 0; i < remove; i++) {
2833
2834
2835
2836
2837 if (rxfer->replaced_after->next == &msg->transfers) {
2838 dev_err(&msg->spi->dev,
2839 "requested to remove more spi_transfers than are available\n");
2840
2841 list_splice(&rxfer->replaced_transfers,
2842 rxfer->replaced_after);
2843
2844
2845 spi_res_free(rxfer);
2846
2847
2848 return ERR_PTR(-EINVAL);
2849 }
2850
2851
2852
2853
2854 list_move_tail(rxfer->replaced_after->next,
2855 &rxfer->replaced_transfers);
2856 }
2857
2858
2859
2860
2861 for (i = 0; i < insert; i++) {
2862
2863 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2864
2865
2866 memcpy(xfer, xfer_first, sizeof(*xfer));
2867
2868
2869 list_add(&xfer->transfer_list, rxfer->replaced_after);
2870
2871
2872 if (i) {
2873 xfer->cs_change = false;
2874 xfer->delay_usecs = 0;
2875 }
2876 }
2877
2878
2879 rxfer->inserted = insert;
2880
2881
2882 spi_res_add(msg, rxfer);
2883
2884 return rxfer;
2885}
2886EXPORT_SYMBOL_GPL(spi_replace_transfers);
2887
2888static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2889 struct spi_message *msg,
2890 struct spi_transfer **xferp,
2891 size_t maxsize,
2892 gfp_t gfp)
2893{
2894 struct spi_transfer *xfer = *xferp, *xfers;
2895 struct spi_replaced_transfers *srt;
2896 size_t offset;
2897 size_t count, i;
2898
2899
2900 count = DIV_ROUND_UP(xfer->len, maxsize);
2901
2902
2903 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2904 if (IS_ERR(srt))
2905 return PTR_ERR(srt);
2906 xfers = srt->inserted_transfers;
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2922
2923
2924 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2925
2926 if (xfers[i].rx_buf)
2927 xfers[i].rx_buf += offset;
2928 if (xfers[i].rx_dma)
2929 xfers[i].rx_dma += offset;
2930 if (xfers[i].tx_buf)
2931 xfers[i].tx_buf += offset;
2932 if (xfers[i].tx_dma)
2933 xfers[i].tx_dma += offset;
2934
2935
2936 xfers[i].len = min(maxsize, xfers[i].len - offset);
2937 }
2938
2939
2940
2941
2942 *xferp = &xfers[count - 1];
2943
2944
2945 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2946 transfers_split_maxsize);
2947 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2948 transfers_split_maxsize);
2949
2950 return 0;
2951}
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2965 struct spi_message *msg,
2966 size_t maxsize,
2967 gfp_t gfp)
2968{
2969 struct spi_transfer *xfer;
2970 int ret;
2971
2972
2973
2974
2975
2976
2977
2978 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2979 if (xfer->len > maxsize) {
2980 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2981 maxsize, gfp);
2982 if (ret)
2983 return ret;
2984 }
2985 }
2986
2987 return 0;
2988}
2989EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2990
2991
2992
2993
2994
2995
2996
2997static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2998 u8 bits_per_word)
2999{
3000 if (ctlr->bits_per_word_mask) {
3001
3002 if (bits_per_word > 32)
3003 return -EINVAL;
3004 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3005 return -EINVAL;
3006 }
3007
3008 return 0;
3009}
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031int spi_setup(struct spi_device *spi)
3032{
3033 unsigned bad_bits, ugly_bits;
3034 int status;
3035
3036
3037
3038 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3039 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3040 dev_err(&spi->dev,
3041 "setup: can not select dual and quad at the same time\n");
3042 return -EINVAL;
3043 }
3044
3045
3046 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3047 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3048 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3049 return -EINVAL;
3050
3051
3052
3053
3054
3055 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3056
3057
3058
3059 if (gpio_is_valid(spi->cs_gpio))
3060 bad_bits &= ~SPI_CS_HIGH;
3061 ugly_bits = bad_bits &
3062 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3063 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3064 if (ugly_bits) {
3065 dev_warn(&spi->dev,
3066 "setup: ignoring unsupported mode bits %x\n",
3067 ugly_bits);
3068 spi->mode &= ~ugly_bits;
3069 bad_bits &= ~ugly_bits;
3070 }
3071 if (bad_bits) {
3072 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3073 bad_bits);
3074 return -EINVAL;
3075 }
3076
3077 if (!spi->bits_per_word)
3078 spi->bits_per_word = 8;
3079
3080 status = __spi_validate_bits_per_word(spi->controller,
3081 spi->bits_per_word);
3082 if (status)
3083 return status;
3084
3085 if (!spi->max_speed_hz)
3086 spi->max_speed_hz = spi->controller->max_speed_hz;
3087
3088 if (spi->controller->setup)
3089 status = spi->controller->setup(spi);
3090
3091 spi_set_cs(spi, false);
3092
3093 if (spi->rt && !spi->controller->rt) {
3094 spi->controller->rt = true;
3095 spi_set_thread_rt(spi->controller);
3096 }
3097
3098 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3099 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3100 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3101 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3102 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3103 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3104 spi->bits_per_word, spi->max_speed_hz,
3105 status);
3106
3107 return status;
3108}
3109EXPORT_SYMBOL_GPL(spi_setup);
3110
3111
3112
3113
3114
3115
3116
3117
3118void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
3119 u8 inactive_dly)
3120{
3121 if (spi->controller->set_cs_timing)
3122 spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
3123}
3124EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3125
3126static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3127{
3128 struct spi_controller *ctlr = spi->controller;
3129 struct spi_transfer *xfer;
3130 int w_size;
3131
3132 if (list_empty(&message->transfers))
3133 return -EINVAL;
3134
3135
3136
3137
3138
3139
3140
3141 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3142 spi->cs_gpiod ||
3143 gpio_is_valid(spi->cs_gpio))) {
3144 size_t maxsize;
3145 int ret;
3146
3147 maxsize = (spi->bits_per_word + 7) / 8;
3148
3149
3150 message->spi = spi;
3151
3152 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3153 GFP_KERNEL);
3154 if (ret)
3155 return ret;
3156
3157 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3158
3159 if (list_is_last(&xfer->transfer_list, &message->transfers))
3160 break;
3161 xfer->cs_change = 1;
3162 }
3163 }
3164
3165
3166
3167
3168
3169
3170 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3171 (spi->mode & SPI_3WIRE)) {
3172 unsigned flags = ctlr->flags;
3173
3174 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3175 if (xfer->rx_buf && xfer->tx_buf)
3176 return -EINVAL;
3177 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3178 return -EINVAL;
3179 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3180 return -EINVAL;
3181 }
3182 }
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192 message->frame_length = 0;
3193 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3194 xfer->effective_speed_hz = 0;
3195 message->frame_length += xfer->len;
3196 if (!xfer->bits_per_word)
3197 xfer->bits_per_word = spi->bits_per_word;
3198
3199 if (!xfer->speed_hz)
3200 xfer->speed_hz = spi->max_speed_hz;
3201
3202 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3203 xfer->speed_hz = ctlr->max_speed_hz;
3204
3205 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3206 return -EINVAL;
3207
3208
3209
3210
3211
3212 if (xfer->bits_per_word <= 8)
3213 w_size = 1;
3214 else if (xfer->bits_per_word <= 16)
3215 w_size = 2;
3216 else
3217 w_size = 4;
3218
3219
3220 if (xfer->len % w_size)
3221 return -EINVAL;
3222
3223 if (xfer->speed_hz && ctlr->min_speed_hz &&
3224 xfer->speed_hz < ctlr->min_speed_hz)
3225 return -EINVAL;
3226
3227 if (xfer->tx_buf && !xfer->tx_nbits)
3228 xfer->tx_nbits = SPI_NBITS_SINGLE;
3229 if (xfer->rx_buf && !xfer->rx_nbits)
3230 xfer->rx_nbits = SPI_NBITS_SINGLE;
3231
3232
3233
3234
3235 if (xfer->tx_buf) {
3236 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3237 xfer->tx_nbits != SPI_NBITS_DUAL &&
3238 xfer->tx_nbits != SPI_NBITS_QUAD)
3239 return -EINVAL;
3240 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3241 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3242 return -EINVAL;
3243 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3244 !(spi->mode & SPI_TX_QUAD))
3245 return -EINVAL;
3246 }
3247
3248 if (xfer->rx_buf) {
3249 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3250 xfer->rx_nbits != SPI_NBITS_DUAL &&
3251 xfer->rx_nbits != SPI_NBITS_QUAD)
3252 return -EINVAL;
3253 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3254 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3255 return -EINVAL;
3256 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3257 !(spi->mode & SPI_RX_QUAD))
3258 return -EINVAL;
3259 }
3260
3261 if (xfer->word_delay_usecs < spi->word_delay_usecs)
3262 xfer->word_delay_usecs = spi->word_delay_usecs;
3263 }
3264
3265 message->status = -EINPROGRESS;
3266
3267 return 0;
3268}
3269
3270static int __spi_async(struct spi_device *spi, struct spi_message *message)
3271{
3272 struct spi_controller *ctlr = spi->controller;
3273
3274
3275
3276
3277
3278 if (!ctlr->transfer)
3279 return -ENOTSUPP;
3280
3281 message->spi = spi;
3282
3283 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3284 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3285
3286 trace_spi_message_submit(message);
3287
3288 return ctlr->transfer(spi, message);
3289}
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322int spi_async(struct spi_device *spi, struct spi_message *message)
3323{
3324 struct spi_controller *ctlr = spi->controller;
3325 int ret;
3326 unsigned long flags;
3327
3328 ret = __spi_validate(spi, message);
3329 if (ret != 0)
3330 return ret;
3331
3332 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3333
3334 if (ctlr->bus_lock_flag)
3335 ret = -EBUSY;
3336 else
3337 ret = __spi_async(spi, message);
3338
3339 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3340
3341 return ret;
3342}
3343EXPORT_SYMBOL_GPL(spi_async);
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3377{
3378 struct spi_controller *ctlr = spi->controller;
3379 int ret;
3380 unsigned long flags;
3381
3382 ret = __spi_validate(spi, message);
3383 if (ret != 0)
3384 return ret;
3385
3386 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3387
3388 ret = __spi_async(spi, message);
3389
3390 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3391
3392 return ret;
3393
3394}
3395EXPORT_SYMBOL_GPL(spi_async_locked);
3396
3397
3398
3399
3400
3401
3402
3403
3404static void spi_complete(void *arg)
3405{
3406 complete(arg);
3407}
3408
3409static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3410{
3411 DECLARE_COMPLETION_ONSTACK(done);
3412 int status;
3413 struct spi_controller *ctlr = spi->controller;
3414 unsigned long flags;
3415
3416 status = __spi_validate(spi, message);
3417 if (status != 0)
3418 return status;
3419
3420 message->complete = spi_complete;
3421 message->context = &done;
3422 message->spi = spi;
3423
3424 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3425 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3426
3427
3428
3429
3430
3431
3432 if (ctlr->transfer == spi_queued_transfer) {
3433 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3434
3435 trace_spi_message_submit(message);
3436
3437 status = __spi_queued_transfer(spi, message, false);
3438
3439 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3440 } else {
3441 status = spi_async_locked(spi, message);
3442 }
3443
3444 if (status == 0) {
3445
3446
3447
3448 if (ctlr->transfer == spi_queued_transfer) {
3449 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3450 spi_sync_immediate);
3451 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3452 spi_sync_immediate);
3453 __spi_pump_messages(ctlr, false);
3454 }
3455
3456 wait_for_completion(&done);
3457 status = message->status;
3458 }
3459 message->context = NULL;
3460 return status;
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484int spi_sync(struct spi_device *spi, struct spi_message *message)
3485{
3486 int ret;
3487
3488 mutex_lock(&spi->controller->bus_lock_mutex);
3489 ret = __spi_sync(spi, message);
3490 mutex_unlock(&spi->controller->bus_lock_mutex);
3491
3492 return ret;
3493}
3494EXPORT_SYMBOL_GPL(spi_sync);
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3513{
3514 return __spi_sync(spi, message);
3515}
3516EXPORT_SYMBOL_GPL(spi_sync_locked);
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533int spi_bus_lock(struct spi_controller *ctlr)
3534{
3535 unsigned long flags;
3536
3537 mutex_lock(&ctlr->bus_lock_mutex);
3538
3539 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3540 ctlr->bus_lock_flag = 1;
3541 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3542
3543
3544
3545 return 0;
3546}
3547EXPORT_SYMBOL_GPL(spi_bus_lock);
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562int spi_bus_unlock(struct spi_controller *ctlr)
3563{
3564 ctlr->bus_lock_flag = 0;
3565
3566 mutex_unlock(&ctlr->bus_lock_mutex);
3567
3568 return 0;
3569}
3570EXPORT_SYMBOL_GPL(spi_bus_unlock);
3571
3572
3573#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3574
3575static u8 *buf;
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598int spi_write_then_read(struct spi_device *spi,
3599 const void *txbuf, unsigned n_tx,
3600 void *rxbuf, unsigned n_rx)
3601{
3602 static DEFINE_MUTEX(lock);
3603
3604 int status;
3605 struct spi_message message;
3606 struct spi_transfer x[2];
3607 u8 *local_buf;
3608
3609
3610
3611
3612
3613
3614 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3615 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3616 GFP_KERNEL | GFP_DMA);
3617 if (!local_buf)
3618 return -ENOMEM;
3619 } else {
3620 local_buf = buf;
3621 }
3622
3623 spi_message_init(&message);
3624 memset(x, 0, sizeof(x));
3625 if (n_tx) {
3626 x[0].len = n_tx;
3627 spi_message_add_tail(&x[0], &message);
3628 }
3629 if (n_rx) {
3630 x[1].len = n_rx;
3631 spi_message_add_tail(&x[1], &message);
3632 }
3633
3634 memcpy(local_buf, txbuf, n_tx);
3635 x[0].tx_buf = local_buf;
3636 x[1].rx_buf = local_buf + n_tx;
3637
3638
3639 status = spi_sync(spi, &message);
3640 if (status == 0)
3641 memcpy(rxbuf, x[1].rx_buf, n_rx);
3642
3643 if (x[0].tx_buf == buf)
3644 mutex_unlock(&lock);
3645 else
3646 kfree(local_buf);
3647
3648 return status;
3649}
3650EXPORT_SYMBOL_GPL(spi_write_then_read);
3651
3652
3653
3654#if IS_ENABLED(CONFIG_OF)
3655static int __spi_of_device_match(struct device *dev, const void *data)
3656{
3657 return dev->of_node == data;
3658}
3659
3660
3661struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3662{
3663 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3664 __spi_of_device_match);
3665 return dev ? to_spi_device(dev) : NULL;
3666}
3667EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3668#endif
3669
3670#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3671static int __spi_of_controller_match(struct device *dev, const void *data)
3672{
3673 return dev->of_node == data;
3674}
3675
3676
3677static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3678{
3679 struct device *dev;
3680
3681 dev = class_find_device(&spi_master_class, NULL, node,
3682 __spi_of_controller_match);
3683 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3684 dev = class_find_device(&spi_slave_class, NULL, node,
3685 __spi_of_controller_match);
3686 if (!dev)
3687 return NULL;
3688
3689
3690 return container_of(dev, struct spi_controller, dev);
3691}
3692
3693static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3694 void *arg)
3695{
3696 struct of_reconfig_data *rd = arg;
3697 struct spi_controller *ctlr;
3698 struct spi_device *spi;
3699
3700 switch (of_reconfig_get_state_change(action, arg)) {
3701 case OF_RECONFIG_CHANGE_ADD:
3702 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3703 if (ctlr == NULL)
3704 return NOTIFY_OK;
3705
3706 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3707 put_device(&ctlr->dev);
3708 return NOTIFY_OK;
3709 }
3710
3711 spi = of_register_spi_device(ctlr, rd->dn);
3712 put_device(&ctlr->dev);
3713
3714 if (IS_ERR(spi)) {
3715 pr_err("%s: failed to create for '%pOF'\n",
3716 __func__, rd->dn);
3717 of_node_clear_flag(rd->dn, OF_POPULATED);
3718 return notifier_from_errno(PTR_ERR(spi));
3719 }
3720 break;
3721
3722 case OF_RECONFIG_CHANGE_REMOVE:
3723
3724 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3725 return NOTIFY_OK;
3726
3727
3728 spi = of_find_spi_device_by_node(rd->dn);
3729 if (spi == NULL)
3730 return NOTIFY_OK;
3731
3732
3733 spi_unregister_device(spi);
3734
3735
3736 put_device(&spi->dev);
3737 break;
3738 }
3739
3740 return NOTIFY_OK;
3741}
3742
3743static struct notifier_block spi_of_notifier = {
3744 .notifier_call = of_spi_notify,
3745};
3746#else
3747extern struct notifier_block spi_of_notifier;
3748#endif
3749
3750#if IS_ENABLED(CONFIG_ACPI)
3751static int spi_acpi_controller_match(struct device *dev, const void *data)
3752{
3753 return ACPI_COMPANION(dev->parent) == data;
3754}
3755
3756static int spi_acpi_device_match(struct device *dev, const void *data)
3757{
3758 return ACPI_COMPANION(dev) == data;
3759}
3760
3761static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3762{
3763 struct device *dev;
3764
3765 dev = class_find_device(&spi_master_class, NULL, adev,
3766 spi_acpi_controller_match);
3767 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3768 dev = class_find_device(&spi_slave_class, NULL, adev,
3769 spi_acpi_controller_match);
3770 if (!dev)
3771 return NULL;
3772
3773 return container_of(dev, struct spi_controller, dev);
3774}
3775
3776static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3777{
3778 struct device *dev;
3779
3780 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3781
3782 return dev ? to_spi_device(dev) : NULL;
3783}
3784
3785static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3786 void *arg)
3787{
3788 struct acpi_device *adev = arg;
3789 struct spi_controller *ctlr;
3790 struct spi_device *spi;
3791
3792 switch (value) {
3793 case ACPI_RECONFIG_DEVICE_ADD:
3794 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3795 if (!ctlr)
3796 break;
3797
3798 acpi_register_spi_device(ctlr, adev);
3799 put_device(&ctlr->dev);
3800 break;
3801 case ACPI_RECONFIG_DEVICE_REMOVE:
3802 if (!acpi_device_enumerated(adev))
3803 break;
3804
3805 spi = acpi_spi_find_device_by_adev(adev);
3806 if (!spi)
3807 break;
3808
3809 spi_unregister_device(spi);
3810 put_device(&spi->dev);
3811 break;
3812 }
3813
3814 return NOTIFY_OK;
3815}
3816
3817static struct notifier_block spi_acpi_notifier = {
3818 .notifier_call = acpi_spi_notify,
3819};
3820#else
3821extern struct notifier_block spi_acpi_notifier;
3822#endif
3823
3824static int __init spi_init(void)
3825{
3826 int status;
3827
3828 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3829 if (!buf) {
3830 status = -ENOMEM;
3831 goto err0;
3832 }
3833
3834 status = bus_register(&spi_bus_type);
3835 if (status < 0)
3836 goto err1;
3837
3838 status = class_register(&spi_master_class);
3839 if (status < 0)
3840 goto err2;
3841
3842 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3843 status = class_register(&spi_slave_class);
3844 if (status < 0)
3845 goto err3;
3846 }
3847
3848 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3849 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3850 if (IS_ENABLED(CONFIG_ACPI))
3851 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3852
3853 return 0;
3854
3855err3:
3856 class_unregister(&spi_master_class);
3857err2:
3858 bus_unregister(&spi_bus_type);
3859err1:
3860 kfree(buf);
3861 buf = NULL;
3862err0:
3863 return status;
3864}
3865
3866
3867
3868
3869
3870
3871
3872
3873postcore_initcall(spi_init);
3874