1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/cache.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/clk/clk-conf.h>
17#include <linux/slab.h>
18#include <linux/mod_devicetable.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h>
21#include <linux/of_gpio.h>
22#include <linux/gpio/consumer.h>
23#include <linux/pm_runtime.h>
24#include <linux/pm_domain.h>
25#include <linux/property.h>
26#include <linux/export.h>
27#include <linux/sched/rt.h>
28#include <uapi/linux/sched/types.h>
29#include <linux/delay.h>
30#include <linux/kthread.h>
31#include <linux/ioport.h>
32#include <linux/acpi.h>
33#include <linux/highmem.h>
34#include <linux/idr.h>
35#include <linux/platform_data/x86/apple.h>
36
37#define CREATE_TRACE_POINTS
38#include <trace/events/spi.h>
39EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42#include "internals.h"
43
44static DEFINE_IDR(spi_master_idr);
45
46static void spidev_release(struct device *dev)
47{
48 struct spi_device *spi = to_spi_device(dev);
49
50
51 if (spi->controller->cleanup)
52 spi->controller->cleanup(spi);
53
54 spi_controller_put(spi->controller);
55 kfree(spi->driver_override);
56 kfree(spi);
57}
58
59static ssize_t
60modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61{
62 const struct spi_device *spi = to_spi_device(dev);
63 int len;
64
65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 if (len != -ENODEV)
67 return len;
68
69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70}
71static DEVICE_ATTR_RO(modalias);
72
73static ssize_t driver_override_store(struct device *dev,
74 struct device_attribute *a,
75 const char *buf, size_t count)
76{
77 struct spi_device *spi = to_spi_device(dev);
78 const char *end = memchr(buf, '\n', count);
79 const size_t len = end ? end - buf : count;
80 const char *driver_override, *old;
81
82
83 if (len >= (PAGE_SIZE - 1))
84 return -EINVAL;
85
86 driver_override = kstrndup(buf, len, GFP_KERNEL);
87 if (!driver_override)
88 return -ENOMEM;
89
90 device_lock(dev);
91 old = spi->driver_override;
92 if (len) {
93 spi->driver_override = driver_override;
94 } else {
95
96 spi->driver_override = NULL;
97 kfree(driver_override);
98 }
99 device_unlock(dev);
100 kfree(old);
101
102 return count;
103}
104
105static ssize_t driver_override_show(struct device *dev,
106 struct device_attribute *a, char *buf)
107{
108 const struct spi_device *spi = to_spi_device(dev);
109 ssize_t len;
110
111 device_lock(dev);
112 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
113 device_unlock(dev);
114 return len;
115}
116static DEVICE_ATTR_RW(driver_override);
117
118#define SPI_STATISTICS_ATTRS(field, file) \
119static ssize_t spi_controller_##field##_show(struct device *dev, \
120 struct device_attribute *attr, \
121 char *buf) \
122{ \
123 struct spi_controller *ctlr = container_of(dev, \
124 struct spi_controller, dev); \
125 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
126} \
127static struct device_attribute dev_attr_spi_controller_##field = { \
128 .attr = { .name = file, .mode = 0444 }, \
129 .show = spi_controller_##field##_show, \
130}; \
131static ssize_t spi_device_##field##_show(struct device *dev, \
132 struct device_attribute *attr, \
133 char *buf) \
134{ \
135 struct spi_device *spi = to_spi_device(dev); \
136 return spi_statistics_##field##_show(&spi->statistics, buf); \
137} \
138static struct device_attribute dev_attr_spi_device_##field = { \
139 .attr = { .name = file, .mode = 0444 }, \
140 .show = spi_device_##field##_show, \
141}
142
143#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
144static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
145 char *buf) \
146{ \
147 unsigned long flags; \
148 ssize_t len; \
149 spin_lock_irqsave(&stat->lock, flags); \
150 len = sprintf(buf, format_string, stat->field); \
151 spin_unlock_irqrestore(&stat->lock, flags); \
152 return len; \
153} \
154SPI_STATISTICS_ATTRS(name, file)
155
156#define SPI_STATISTICS_SHOW(field, format_string) \
157 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
158 field, format_string)
159
160SPI_STATISTICS_SHOW(messages, "%lu");
161SPI_STATISTICS_SHOW(transfers, "%lu");
162SPI_STATISTICS_SHOW(errors, "%lu");
163SPI_STATISTICS_SHOW(timedout, "%lu");
164
165SPI_STATISTICS_SHOW(spi_sync, "%lu");
166SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
167SPI_STATISTICS_SHOW(spi_async, "%lu");
168
169SPI_STATISTICS_SHOW(bytes, "%llu");
170SPI_STATISTICS_SHOW(bytes_rx, "%llu");
171SPI_STATISTICS_SHOW(bytes_tx, "%llu");
172
173#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
174 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
175 "transfer_bytes_histo_" number, \
176 transfer_bytes_histo[index], "%lu")
177SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
178SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
179SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
180SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
181SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
182SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
183SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
184SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
185SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
186SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
187SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
188SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
189SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
190SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
191SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
192SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
193SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
194
195SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
196
197static struct attribute *spi_dev_attrs[] = {
198 &dev_attr_modalias.attr,
199 &dev_attr_driver_override.attr,
200 NULL,
201};
202
203static const struct attribute_group spi_dev_group = {
204 .attrs = spi_dev_attrs,
205};
206
207static struct attribute *spi_device_statistics_attrs[] = {
208 &dev_attr_spi_device_messages.attr,
209 &dev_attr_spi_device_transfers.attr,
210 &dev_attr_spi_device_errors.attr,
211 &dev_attr_spi_device_timedout.attr,
212 &dev_attr_spi_device_spi_sync.attr,
213 &dev_attr_spi_device_spi_sync_immediate.attr,
214 &dev_attr_spi_device_spi_async.attr,
215 &dev_attr_spi_device_bytes.attr,
216 &dev_attr_spi_device_bytes_rx.attr,
217 &dev_attr_spi_device_bytes_tx.attr,
218 &dev_attr_spi_device_transfer_bytes_histo0.attr,
219 &dev_attr_spi_device_transfer_bytes_histo1.attr,
220 &dev_attr_spi_device_transfer_bytes_histo2.attr,
221 &dev_attr_spi_device_transfer_bytes_histo3.attr,
222 &dev_attr_spi_device_transfer_bytes_histo4.attr,
223 &dev_attr_spi_device_transfer_bytes_histo5.attr,
224 &dev_attr_spi_device_transfer_bytes_histo6.attr,
225 &dev_attr_spi_device_transfer_bytes_histo7.attr,
226 &dev_attr_spi_device_transfer_bytes_histo8.attr,
227 &dev_attr_spi_device_transfer_bytes_histo9.attr,
228 &dev_attr_spi_device_transfer_bytes_histo10.attr,
229 &dev_attr_spi_device_transfer_bytes_histo11.attr,
230 &dev_attr_spi_device_transfer_bytes_histo12.attr,
231 &dev_attr_spi_device_transfer_bytes_histo13.attr,
232 &dev_attr_spi_device_transfer_bytes_histo14.attr,
233 &dev_attr_spi_device_transfer_bytes_histo15.attr,
234 &dev_attr_spi_device_transfer_bytes_histo16.attr,
235 &dev_attr_spi_device_transfers_split_maxsize.attr,
236 NULL,
237};
238
239static const struct attribute_group spi_device_statistics_group = {
240 .name = "statistics",
241 .attrs = spi_device_statistics_attrs,
242};
243
244static const struct attribute_group *spi_dev_groups[] = {
245 &spi_dev_group,
246 &spi_device_statistics_group,
247 NULL,
248};
249
250static struct attribute *spi_controller_statistics_attrs[] = {
251 &dev_attr_spi_controller_messages.attr,
252 &dev_attr_spi_controller_transfers.attr,
253 &dev_attr_spi_controller_errors.attr,
254 &dev_attr_spi_controller_timedout.attr,
255 &dev_attr_spi_controller_spi_sync.attr,
256 &dev_attr_spi_controller_spi_sync_immediate.attr,
257 &dev_attr_spi_controller_spi_async.attr,
258 &dev_attr_spi_controller_bytes.attr,
259 &dev_attr_spi_controller_bytes_rx.attr,
260 &dev_attr_spi_controller_bytes_tx.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
274 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
275 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
276 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
277 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
278 &dev_attr_spi_controller_transfers_split_maxsize.attr,
279 NULL,
280};
281
282static const struct attribute_group spi_controller_statistics_group = {
283 .name = "statistics",
284 .attrs = spi_controller_statistics_attrs,
285};
286
287static const struct attribute_group *spi_master_groups[] = {
288 &spi_controller_statistics_group,
289 NULL,
290};
291
292void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
293 struct spi_transfer *xfer,
294 struct spi_controller *ctlr)
295{
296 unsigned long flags;
297 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
298
299 if (l2len < 0)
300 l2len = 0;
301
302 spin_lock_irqsave(&stats->lock, flags);
303
304 stats->transfers++;
305 stats->transfer_bytes_histo[l2len]++;
306
307 stats->bytes += xfer->len;
308 if ((xfer->tx_buf) &&
309 (xfer->tx_buf != ctlr->dummy_tx))
310 stats->bytes_tx += xfer->len;
311 if ((xfer->rx_buf) &&
312 (xfer->rx_buf != ctlr->dummy_rx))
313 stats->bytes_rx += xfer->len;
314
315 spin_unlock_irqrestore(&stats->lock, flags);
316}
317EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
318
319
320
321
322
323static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
324 const struct spi_device *sdev)
325{
326 while (id->name[0]) {
327 if (!strcmp(sdev->modalias, id->name))
328 return id;
329 id++;
330 }
331 return NULL;
332}
333
334const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
335{
336 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
337
338 return spi_match_id(sdrv->id_table, sdev);
339}
340EXPORT_SYMBOL_GPL(spi_get_device_id);
341
342static int spi_match_device(struct device *dev, struct device_driver *drv)
343{
344 const struct spi_device *spi = to_spi_device(dev);
345 const struct spi_driver *sdrv = to_spi_driver(drv);
346
347
348 if (spi->driver_override)
349 return strcmp(spi->driver_override, drv->name) == 0;
350
351
352 if (of_driver_match_device(dev, drv))
353 return 1;
354
355
356 if (acpi_driver_match_device(dev, drv))
357 return 1;
358
359 if (sdrv->id_table)
360 return !!spi_match_id(sdrv->id_table, spi);
361
362 return strcmp(spi->modalias, drv->name) == 0;
363}
364
365static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
366{
367 const struct spi_device *spi = to_spi_device(dev);
368 int rc;
369
370 rc = acpi_device_uevent_modalias(dev, env);
371 if (rc != -ENODEV)
372 return rc;
373
374 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
375}
376
377struct bus_type spi_bus_type = {
378 .name = "spi",
379 .dev_groups = spi_dev_groups,
380 .match = spi_match_device,
381 .uevent = spi_uevent,
382};
383EXPORT_SYMBOL_GPL(spi_bus_type);
384
385
386static int spi_drv_probe(struct device *dev)
387{
388 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
389 struct spi_device *spi = to_spi_device(dev);
390 int ret;
391
392 ret = of_clk_set_defaults(dev->of_node, false);
393 if (ret)
394 return ret;
395
396 if (dev->of_node) {
397 spi->irq = of_irq_get(dev->of_node, 0);
398 if (spi->irq == -EPROBE_DEFER)
399 return -EPROBE_DEFER;
400 if (spi->irq < 0)
401 spi->irq = 0;
402 }
403
404 ret = dev_pm_domain_attach(dev, true);
405 if (ret)
406 return ret;
407
408 ret = sdrv->probe(spi);
409 if (ret)
410 dev_pm_domain_detach(dev, true);
411
412 return ret;
413}
414
415static int spi_drv_remove(struct device *dev)
416{
417 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
418 int ret;
419
420 ret = sdrv->remove(to_spi_device(dev));
421 dev_pm_domain_detach(dev, true);
422
423 return ret;
424}
425
426static void spi_drv_shutdown(struct device *dev)
427{
428 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
429
430 sdrv->shutdown(to_spi_device(dev));
431}
432
433
434
435
436
437
438
439
440
441int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442{
443 sdrv->driver.owner = owner;
444 sdrv->driver.bus = &spi_bus_type;
445 if (sdrv->probe)
446 sdrv->driver.probe = spi_drv_probe;
447 if (sdrv->remove)
448 sdrv->driver.remove = spi_drv_remove;
449 if (sdrv->shutdown)
450 sdrv->driver.shutdown = spi_drv_shutdown;
451 return driver_register(&sdrv->driver);
452}
453EXPORT_SYMBOL_GPL(__spi_register_driver);
454
455
456
457
458
459
460
461
462
463struct boardinfo {
464 struct list_head list;
465 struct spi_board_info board_info;
466};
467
468static LIST_HEAD(board_list);
469static LIST_HEAD(spi_controller_list);
470
471
472
473
474
475
476static DEFINE_MUTEX(board_lock);
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
496{
497 struct spi_device *spi;
498
499 if (!spi_controller_get(ctlr))
500 return NULL;
501
502 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
503 if (!spi) {
504 spi_controller_put(ctlr);
505 return NULL;
506 }
507
508 spi->master = spi->controller = ctlr;
509 spi->dev.parent = &ctlr->dev;
510 spi->dev.bus = &spi_bus_type;
511 spi->dev.release = spidev_release;
512 spi->cs_gpio = -ENOENT;
513 spi->mode = ctlr->buswidth_override_bits;
514
515 spin_lock_init(&spi->statistics.lock);
516
517 device_initialize(&spi->dev);
518 return spi;
519}
520EXPORT_SYMBOL_GPL(spi_alloc_device);
521
522static void spi_dev_set_name(struct spi_device *spi)
523{
524 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
525
526 if (adev) {
527 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
528 return;
529 }
530
531 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
532 spi->chip_select);
533}
534
535static int spi_dev_check(struct device *dev, void *data)
536{
537 struct spi_device *spi = to_spi_device(dev);
538 struct spi_device *new_spi = data;
539
540 if (spi->controller == new_spi->controller &&
541 spi->chip_select == new_spi->chip_select)
542 return -EBUSY;
543 return 0;
544}
545
546
547
548
549
550
551
552
553
554
555int spi_add_device(struct spi_device *spi)
556{
557 static DEFINE_MUTEX(spi_add_lock);
558 struct spi_controller *ctlr = spi->controller;
559 struct device *dev = ctlr->dev.parent;
560 int status;
561
562
563 if (spi->chip_select >= ctlr->num_chipselect) {
564 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
565 ctlr->num_chipselect);
566 return -EINVAL;
567 }
568
569
570 spi_dev_set_name(spi);
571
572
573
574
575
576 mutex_lock(&spi_add_lock);
577
578 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
579 if (status) {
580 dev_err(dev, "chipselect %d already in use\n",
581 spi->chip_select);
582 goto done;
583 }
584
585
586 if (ctlr->cs_gpiods)
587 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
588 else if (ctlr->cs_gpios)
589 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
590
591
592
593
594
595 status = spi_setup(spi);
596 if (status < 0) {
597 dev_err(dev, "can't setup %s, status %d\n",
598 dev_name(&spi->dev), status);
599 goto done;
600 }
601
602
603 status = device_add(&spi->dev);
604 if (status < 0)
605 dev_err(dev, "can't add %s, status %d\n",
606 dev_name(&spi->dev), status);
607 else
608 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
609
610done:
611 mutex_unlock(&spi_add_lock);
612 return status;
613}
614EXPORT_SYMBOL_GPL(spi_add_device);
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630struct spi_device *spi_new_device(struct spi_controller *ctlr,
631 struct spi_board_info *chip)
632{
633 struct spi_device *proxy;
634 int status;
635
636
637
638
639
640
641
642
643 proxy = spi_alloc_device(ctlr);
644 if (!proxy)
645 return NULL;
646
647 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
648
649 proxy->chip_select = chip->chip_select;
650 proxy->max_speed_hz = chip->max_speed_hz;
651 proxy->mode = chip->mode;
652 proxy->irq = chip->irq;
653 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
654 proxy->dev.platform_data = (void *) chip->platform_data;
655 proxy->controller_data = chip->controller_data;
656 proxy->controller_state = NULL;
657
658 if (chip->properties) {
659 status = device_add_properties(&proxy->dev, chip->properties);
660 if (status) {
661 dev_err(&ctlr->dev,
662 "failed to add properties to '%s': %d\n",
663 chip->modalias, status);
664 goto err_dev_put;
665 }
666 }
667
668 status = spi_add_device(proxy);
669 if (status < 0)
670 goto err_remove_props;
671
672 return proxy;
673
674err_remove_props:
675 if (chip->properties)
676 device_remove_properties(&proxy->dev);
677err_dev_put:
678 spi_dev_put(proxy);
679 return NULL;
680}
681EXPORT_SYMBOL_GPL(spi_new_device);
682
683
684
685
686
687
688
689
690void spi_unregister_device(struct spi_device *spi)
691{
692 if (!spi)
693 return;
694
695 if (spi->dev.of_node) {
696 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
697 of_node_put(spi->dev.of_node);
698 }
699 if (ACPI_COMPANION(&spi->dev))
700 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
701 device_unregister(&spi->dev);
702}
703EXPORT_SYMBOL_GPL(spi_unregister_device);
704
705static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
706 struct spi_board_info *bi)
707{
708 struct spi_device *dev;
709
710 if (ctlr->bus_num != bi->bus_num)
711 return;
712
713 dev = spi_new_device(ctlr, bi);
714 if (!dev)
715 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
716 bi->modalias);
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741int spi_register_board_info(struct spi_board_info const *info, unsigned n)
742{
743 struct boardinfo *bi;
744 int i;
745
746 if (!n)
747 return 0;
748
749 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
750 if (!bi)
751 return -ENOMEM;
752
753 for (i = 0; i < n; i++, bi++, info++) {
754 struct spi_controller *ctlr;
755
756 memcpy(&bi->board_info, info, sizeof(*info));
757 if (info->properties) {
758 bi->board_info.properties =
759 property_entries_dup(info->properties);
760 if (IS_ERR(bi->board_info.properties))
761 return PTR_ERR(bi->board_info.properties);
762 }
763
764 mutex_lock(&board_lock);
765 list_add_tail(&bi->list, &board_list);
766 list_for_each_entry(ctlr, &spi_controller_list, list)
767 spi_match_controller_to_boardinfo(ctlr,
768 &bi->board_info);
769 mutex_unlock(&board_lock);
770 }
771
772 return 0;
773}
774
775
776
777static void spi_set_cs(struct spi_device *spi, bool enable)
778{
779 bool enable1 = enable;
780
781 if (!spi->controller->set_cs_timing) {
782 if (enable1)
783 spi_delay_exec(&spi->controller->cs_setup, NULL);
784 else
785 spi_delay_exec(&spi->controller->cs_hold, NULL);
786 }
787
788 if (spi->mode & SPI_CS_HIGH)
789 enable = !enable;
790
791 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
792
793
794
795
796
797
798
799 if (!(spi->mode & SPI_NO_CS)) {
800 if (spi->cs_gpiod)
801 gpiod_set_value_cansleep(spi->cs_gpiod,
802 !enable);
803 else
804 gpio_set_value_cansleep(spi->cs_gpio, !enable);
805 }
806
807 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
808 spi->controller->set_cs)
809 spi->controller->set_cs(spi, !enable);
810 } else if (spi->controller->set_cs) {
811 spi->controller->set_cs(spi, !enable);
812 }
813
814 if (!spi->controller->set_cs_timing) {
815 if (!enable1)
816 spi_delay_exec(&spi->controller->cs_inactive, NULL);
817 }
818}
819
820#ifdef CONFIG_HAS_DMA
821int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
822 struct sg_table *sgt, void *buf, size_t len,
823 enum dma_data_direction dir)
824{
825 const bool vmalloced_buf = is_vmalloc_addr(buf);
826 unsigned int max_seg_size = dma_get_max_seg_size(dev);
827#ifdef CONFIG_HIGHMEM
828 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
829 (unsigned long)buf < (PKMAP_BASE +
830 (LAST_PKMAP * PAGE_SIZE)));
831#else
832 const bool kmap_buf = false;
833#endif
834 int desc_len;
835 int sgs;
836 struct page *vm_page;
837 struct scatterlist *sg;
838 void *sg_buf;
839 size_t min;
840 int i, ret;
841
842 if (vmalloced_buf || kmap_buf) {
843 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
844 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
845 } else if (virt_addr_valid(buf)) {
846 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
847 sgs = DIV_ROUND_UP(len, desc_len);
848 } else {
849 return -EINVAL;
850 }
851
852 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
853 if (ret != 0)
854 return ret;
855
856 sg = &sgt->sgl[0];
857 for (i = 0; i < sgs; i++) {
858
859 if (vmalloced_buf || kmap_buf) {
860
861
862
863
864
865 min = min_t(size_t, desc_len,
866 min_t(size_t, len,
867 PAGE_SIZE - offset_in_page(buf)));
868 if (vmalloced_buf)
869 vm_page = vmalloc_to_page(buf);
870 else
871 vm_page = kmap_to_page(buf);
872 if (!vm_page) {
873 sg_free_table(sgt);
874 return -ENOMEM;
875 }
876 sg_set_page(sg, vm_page,
877 min, offset_in_page(buf));
878 } else {
879 min = min_t(size_t, len, desc_len);
880 sg_buf = buf;
881 sg_set_buf(sg, sg_buf, min);
882 }
883
884 buf += min;
885 len -= min;
886 sg = sg_next(sg);
887 }
888
889 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
890 if (!ret)
891 ret = -ENOMEM;
892 if (ret < 0) {
893 sg_free_table(sgt);
894 return ret;
895 }
896
897 sgt->nents = ret;
898
899 return 0;
900}
901
902void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
903 struct sg_table *sgt, enum dma_data_direction dir)
904{
905 if (sgt->orig_nents) {
906 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
907 sg_free_table(sgt);
908 }
909}
910
911static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
912{
913 struct device *tx_dev, *rx_dev;
914 struct spi_transfer *xfer;
915 int ret;
916
917 if (!ctlr->can_dma)
918 return 0;
919
920 if (ctlr->dma_tx)
921 tx_dev = ctlr->dma_tx->device->dev;
922 else
923 tx_dev = ctlr->dev.parent;
924
925 if (ctlr->dma_rx)
926 rx_dev = ctlr->dma_rx->device->dev;
927 else
928 rx_dev = ctlr->dev.parent;
929
930 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
931 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
932 continue;
933
934 if (xfer->tx_buf != NULL) {
935 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
936 (void *)xfer->tx_buf, xfer->len,
937 DMA_TO_DEVICE);
938 if (ret != 0)
939 return ret;
940 }
941
942 if (xfer->rx_buf != NULL) {
943 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
944 xfer->rx_buf, xfer->len,
945 DMA_FROM_DEVICE);
946 if (ret != 0) {
947 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
948 DMA_TO_DEVICE);
949 return ret;
950 }
951 }
952 }
953
954 ctlr->cur_msg_mapped = true;
955
956 return 0;
957}
958
959static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
960{
961 struct spi_transfer *xfer;
962 struct device *tx_dev, *rx_dev;
963
964 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
965 return 0;
966
967 if (ctlr->dma_tx)
968 tx_dev = ctlr->dma_tx->device->dev;
969 else
970 tx_dev = ctlr->dev.parent;
971
972 if (ctlr->dma_rx)
973 rx_dev = ctlr->dma_rx->device->dev;
974 else
975 rx_dev = ctlr->dev.parent;
976
977 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
978 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
979 continue;
980
981 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
982 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
983 }
984
985 return 0;
986}
987#else
988static inline int __spi_map_msg(struct spi_controller *ctlr,
989 struct spi_message *msg)
990{
991 return 0;
992}
993
994static inline int __spi_unmap_msg(struct spi_controller *ctlr,
995 struct spi_message *msg)
996{
997 return 0;
998}
999#endif
1000
1001static inline int spi_unmap_msg(struct spi_controller *ctlr,
1002 struct spi_message *msg)
1003{
1004 struct spi_transfer *xfer;
1005
1006 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1007
1008
1009
1010
1011 if (xfer->tx_buf == ctlr->dummy_tx)
1012 xfer->tx_buf = NULL;
1013 if (xfer->rx_buf == ctlr->dummy_rx)
1014 xfer->rx_buf = NULL;
1015 }
1016
1017 return __spi_unmap_msg(ctlr, msg);
1018}
1019
1020static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1021{
1022 struct spi_transfer *xfer;
1023 void *tmp;
1024 unsigned int max_tx, max_rx;
1025
1026 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1027 max_tx = 0;
1028 max_rx = 0;
1029
1030 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1031 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1032 !xfer->tx_buf)
1033 max_tx = max(xfer->len, max_tx);
1034 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1035 !xfer->rx_buf)
1036 max_rx = max(xfer->len, max_rx);
1037 }
1038
1039 if (max_tx) {
1040 tmp = krealloc(ctlr->dummy_tx, max_tx,
1041 GFP_KERNEL | GFP_DMA);
1042 if (!tmp)
1043 return -ENOMEM;
1044 ctlr->dummy_tx = tmp;
1045 memset(tmp, 0, max_tx);
1046 }
1047
1048 if (max_rx) {
1049 tmp = krealloc(ctlr->dummy_rx, max_rx,
1050 GFP_KERNEL | GFP_DMA);
1051 if (!tmp)
1052 return -ENOMEM;
1053 ctlr->dummy_rx = tmp;
1054 }
1055
1056 if (max_tx || max_rx) {
1057 list_for_each_entry(xfer, &msg->transfers,
1058 transfer_list) {
1059 if (!xfer->len)
1060 continue;
1061 if (!xfer->tx_buf)
1062 xfer->tx_buf = ctlr->dummy_tx;
1063 if (!xfer->rx_buf)
1064 xfer->rx_buf = ctlr->dummy_rx;
1065 }
1066 }
1067 }
1068
1069 return __spi_map_msg(ctlr, msg);
1070}
1071
1072static int spi_transfer_wait(struct spi_controller *ctlr,
1073 struct spi_message *msg,
1074 struct spi_transfer *xfer)
1075{
1076 struct spi_statistics *statm = &ctlr->statistics;
1077 struct spi_statistics *stats = &msg->spi->statistics;
1078 unsigned long long ms = 1;
1079
1080 if (spi_controller_is_slave(ctlr)) {
1081 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1082 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1083 return -EINTR;
1084 }
1085 } else {
1086 ms = 8LL * 1000LL * xfer->len;
1087 do_div(ms, xfer->speed_hz);
1088 ms += ms + 200;
1089
1090 if (ms > UINT_MAX)
1091 ms = UINT_MAX;
1092
1093 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1094 msecs_to_jiffies(ms));
1095
1096 if (ms == 0) {
1097 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1098 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1099 dev_err(&msg->spi->dev,
1100 "SPI transfer timed out\n");
1101 return -ETIMEDOUT;
1102 }
1103 }
1104
1105 return 0;
1106}
1107
1108static void _spi_transfer_delay_ns(u32 ns)
1109{
1110 if (!ns)
1111 return;
1112 if (ns <= 1000) {
1113 ndelay(ns);
1114 } else {
1115 u32 us = DIV_ROUND_UP(ns, 1000);
1116
1117 if (us <= 10)
1118 udelay(us);
1119 else
1120 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1121 }
1122}
1123
1124int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1125{
1126 u32 delay = _delay->value;
1127 u32 unit = _delay->unit;
1128 u32 hz;
1129
1130 if (!delay)
1131 return 0;
1132
1133 switch (unit) {
1134 case SPI_DELAY_UNIT_USECS:
1135 delay *= 1000;
1136 break;
1137 case SPI_DELAY_UNIT_NSECS:
1138 break;
1139 case SPI_DELAY_UNIT_SCK:
1140
1141 if (!xfer)
1142 return -EINVAL;
1143
1144
1145
1146 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1147 if (!hz)
1148 return -EINVAL;
1149 delay *= DIV_ROUND_UP(1000000000, hz);
1150 break;
1151 default:
1152 return -EINVAL;
1153 }
1154
1155 return delay;
1156}
1157EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1158
1159int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1160{
1161 int delay;
1162
1163 if (!_delay)
1164 return -EINVAL;
1165
1166 delay = spi_delay_to_ns(_delay, xfer);
1167 if (delay < 0)
1168 return delay;
1169
1170 _spi_transfer_delay_ns(delay);
1171
1172 return 0;
1173}
1174EXPORT_SYMBOL_GPL(spi_delay_exec);
1175
1176static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1177 struct spi_transfer *xfer)
1178{
1179 u32 delay = xfer->cs_change_delay.value;
1180 u32 unit = xfer->cs_change_delay.unit;
1181 int ret;
1182
1183
1184 if (!delay) {
1185 if (unit == SPI_DELAY_UNIT_USECS)
1186 _spi_transfer_delay_ns(10000);
1187 return;
1188 }
1189
1190 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1191 if (ret) {
1192 dev_err_once(&msg->spi->dev,
1193 "Use of unsupported delay unit %i, using default of 10us\n",
1194 unit);
1195 _spi_transfer_delay_ns(10000);
1196 }
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206static int spi_transfer_one_message(struct spi_controller *ctlr,
1207 struct spi_message *msg)
1208{
1209 struct spi_transfer *xfer;
1210 bool keep_cs = false;
1211 int ret = 0;
1212 struct spi_statistics *statm = &ctlr->statistics;
1213 struct spi_statistics *stats = &msg->spi->statistics;
1214
1215 spi_set_cs(msg->spi, true);
1216
1217 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1218 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1219
1220 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1221 trace_spi_transfer_start(msg, xfer);
1222
1223 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1224 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1225
1226 if (!ctlr->ptp_sts_supported) {
1227 xfer->ptp_sts_word_pre = 0;
1228 ptp_read_system_prets(xfer->ptp_sts);
1229 }
1230
1231 if (xfer->tx_buf || xfer->rx_buf) {
1232 reinit_completion(&ctlr->xfer_completion);
1233
1234 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1235 if (ret < 0) {
1236 SPI_STATISTICS_INCREMENT_FIELD(statm,
1237 errors);
1238 SPI_STATISTICS_INCREMENT_FIELD(stats,
1239 errors);
1240 dev_err(&msg->spi->dev,
1241 "SPI transfer failed: %d\n", ret);
1242 goto out;
1243 }
1244
1245 if (ret > 0) {
1246 ret = spi_transfer_wait(ctlr, msg, xfer);
1247 if (ret < 0)
1248 msg->status = ret;
1249 }
1250 } else {
1251 if (xfer->len)
1252 dev_err(&msg->spi->dev,
1253 "Bufferless transfer has length %u\n",
1254 xfer->len);
1255 }
1256
1257 if (!ctlr->ptp_sts_supported) {
1258 ptp_read_system_postts(xfer->ptp_sts);
1259 xfer->ptp_sts_word_post = xfer->len;
1260 }
1261
1262 trace_spi_transfer_stop(msg, xfer);
1263
1264 if (msg->status != -EINPROGRESS)
1265 goto out;
1266
1267 spi_transfer_delay_exec(xfer);
1268
1269 if (xfer->cs_change) {
1270 if (list_is_last(&xfer->transfer_list,
1271 &msg->transfers)) {
1272 keep_cs = true;
1273 } else {
1274 spi_set_cs(msg->spi, false);
1275 _spi_transfer_cs_change_delay(msg, xfer);
1276 spi_set_cs(msg->spi, true);
1277 }
1278 }
1279
1280 msg->actual_length += xfer->len;
1281 }
1282
1283out:
1284 if (ret != 0 || !keep_cs)
1285 spi_set_cs(msg->spi, false);
1286
1287 if (msg->status == -EINPROGRESS)
1288 msg->status = ret;
1289
1290 if (msg->status && ctlr->handle_err)
1291 ctlr->handle_err(ctlr, msg);
1292
1293 spi_res_release(ctlr, msg);
1294
1295 spi_finalize_current_message(ctlr);
1296
1297 return ret;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308void spi_finalize_current_transfer(struct spi_controller *ctlr)
1309{
1310 complete(&ctlr->xfer_completion);
1311}
1312EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1328{
1329 struct spi_transfer *xfer;
1330 struct spi_message *msg;
1331 bool was_busy = false;
1332 unsigned long flags;
1333 int ret;
1334
1335
1336 spin_lock_irqsave(&ctlr->queue_lock, flags);
1337
1338
1339 if (ctlr->cur_msg) {
1340 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1341 return;
1342 }
1343
1344
1345 if (ctlr->idling) {
1346 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1347 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1348 return;
1349 }
1350
1351
1352 if (list_empty(&ctlr->queue) || !ctlr->running) {
1353 if (!ctlr->busy) {
1354 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1355 return;
1356 }
1357
1358
1359 if (!in_kthread) {
1360 kthread_queue_work(&ctlr->kworker,
1361 &ctlr->pump_messages);
1362 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1363 return;
1364 }
1365
1366 ctlr->busy = false;
1367 ctlr->idling = true;
1368 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1369
1370 kfree(ctlr->dummy_rx);
1371 ctlr->dummy_rx = NULL;
1372 kfree(ctlr->dummy_tx);
1373 ctlr->dummy_tx = NULL;
1374 if (ctlr->unprepare_transfer_hardware &&
1375 ctlr->unprepare_transfer_hardware(ctlr))
1376 dev_err(&ctlr->dev,
1377 "failed to unprepare transfer hardware\n");
1378 if (ctlr->auto_runtime_pm) {
1379 pm_runtime_mark_last_busy(ctlr->dev.parent);
1380 pm_runtime_put_autosuspend(ctlr->dev.parent);
1381 }
1382 trace_spi_controller_idle(ctlr);
1383
1384 spin_lock_irqsave(&ctlr->queue_lock, flags);
1385 ctlr->idling = false;
1386 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1387 return;
1388 }
1389
1390
1391 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1392 ctlr->cur_msg = msg;
1393
1394 list_del_init(&msg->queue);
1395 if (ctlr->busy)
1396 was_busy = true;
1397 else
1398 ctlr->busy = true;
1399 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1400
1401 mutex_lock(&ctlr->io_mutex);
1402
1403 if (!was_busy && ctlr->auto_runtime_pm) {
1404 ret = pm_runtime_get_sync(ctlr->dev.parent);
1405 if (ret < 0) {
1406 pm_runtime_put_noidle(ctlr->dev.parent);
1407 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1408 ret);
1409 mutex_unlock(&ctlr->io_mutex);
1410 return;
1411 }
1412 }
1413
1414 if (!was_busy)
1415 trace_spi_controller_busy(ctlr);
1416
1417 if (!was_busy && ctlr->prepare_transfer_hardware) {
1418 ret = ctlr->prepare_transfer_hardware(ctlr);
1419 if (ret) {
1420 dev_err(&ctlr->dev,
1421 "failed to prepare transfer hardware: %d\n",
1422 ret);
1423
1424 if (ctlr->auto_runtime_pm)
1425 pm_runtime_put(ctlr->dev.parent);
1426
1427 msg->status = ret;
1428 spi_finalize_current_message(ctlr);
1429
1430 mutex_unlock(&ctlr->io_mutex);
1431 return;
1432 }
1433 }
1434
1435 trace_spi_message_start(msg);
1436
1437 if (ctlr->prepare_message) {
1438 ret = ctlr->prepare_message(ctlr, msg);
1439 if (ret) {
1440 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1441 ret);
1442 msg->status = ret;
1443 spi_finalize_current_message(ctlr);
1444 goto out;
1445 }
1446 ctlr->cur_msg_prepared = true;
1447 }
1448
1449 ret = spi_map_msg(ctlr, msg);
1450 if (ret) {
1451 msg->status = ret;
1452 spi_finalize_current_message(ctlr);
1453 goto out;
1454 }
1455
1456 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1457 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1458 xfer->ptp_sts_word_pre = 0;
1459 ptp_read_system_prets(xfer->ptp_sts);
1460 }
1461 }
1462
1463 ret = ctlr->transfer_one_message(ctlr, msg);
1464 if (ret) {
1465 dev_err(&ctlr->dev,
1466 "failed to transfer one message from queue\n");
1467 goto out;
1468 }
1469
1470out:
1471 mutex_unlock(&ctlr->io_mutex);
1472
1473
1474 if (!ret)
1475 cond_resched();
1476}
1477
1478
1479
1480
1481
1482static void spi_pump_messages(struct kthread_work *work)
1483{
1484 struct spi_controller *ctlr =
1485 container_of(work, struct spi_controller, pump_messages);
1486
1487 __spi_pump_messages(ctlr, true);
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511void spi_take_timestamp_pre(struct spi_controller *ctlr,
1512 struct spi_transfer *xfer,
1513 size_t progress, bool irqs_off)
1514{
1515 if (!xfer->ptp_sts)
1516 return;
1517
1518 if (xfer->timestamped)
1519 return;
1520
1521 if (progress > xfer->ptp_sts_word_pre)
1522 return;
1523
1524
1525 xfer->ptp_sts_word_pre = progress;
1526
1527 if (irqs_off) {
1528 local_irq_save(ctlr->irq_flags);
1529 preempt_disable();
1530 }
1531
1532 ptp_read_system_prets(xfer->ptp_sts);
1533}
1534EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548void spi_take_timestamp_post(struct spi_controller *ctlr,
1549 struct spi_transfer *xfer,
1550 size_t progress, bool irqs_off)
1551{
1552 if (!xfer->ptp_sts)
1553 return;
1554
1555 if (xfer->timestamped)
1556 return;
1557
1558 if (progress < xfer->ptp_sts_word_post)
1559 return;
1560
1561 ptp_read_system_postts(xfer->ptp_sts);
1562
1563 if (irqs_off) {
1564 local_irq_restore(ctlr->irq_flags);
1565 preempt_enable();
1566 }
1567
1568
1569 xfer->ptp_sts_word_post = progress;
1570
1571 xfer->timestamped = true;
1572}
1573EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static void spi_set_thread_rt(struct spi_controller *ctlr)
1591{
1592 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
1593
1594 dev_info(&ctlr->dev,
1595 "will run message pump with realtime priority\n");
1596 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1597}
1598
1599static int spi_init_queue(struct spi_controller *ctlr)
1600{
1601 ctlr->running = false;
1602 ctlr->busy = false;
1603
1604 kthread_init_worker(&ctlr->kworker);
1605 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1606 "%s", dev_name(&ctlr->dev));
1607 if (IS_ERR(ctlr->kworker_task)) {
1608 dev_err(&ctlr->dev, "failed to create message pump task\n");
1609 return PTR_ERR(ctlr->kworker_task);
1610 }
1611 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1612
1613
1614
1615
1616
1617
1618
1619
1620 if (ctlr->rt)
1621 spi_set_thread_rt(ctlr);
1622
1623 return 0;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1637{
1638 struct spi_message *next;
1639 unsigned long flags;
1640
1641
1642 spin_lock_irqsave(&ctlr->queue_lock, flags);
1643 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1644 queue);
1645 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1646
1647 return next;
1648}
1649EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1650
1651
1652
1653
1654
1655
1656
1657
1658void spi_finalize_current_message(struct spi_controller *ctlr)
1659{
1660 struct spi_transfer *xfer;
1661 struct spi_message *mesg;
1662 unsigned long flags;
1663 int ret;
1664
1665 spin_lock_irqsave(&ctlr->queue_lock, flags);
1666 mesg = ctlr->cur_msg;
1667 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1668
1669 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1670 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1671 ptp_read_system_postts(xfer->ptp_sts);
1672 xfer->ptp_sts_word_post = xfer->len;
1673 }
1674 }
1675
1676 if (unlikely(ctlr->ptp_sts_supported))
1677 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1678 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1679
1680 spi_unmap_msg(ctlr, mesg);
1681
1682 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1683 ret = ctlr->unprepare_message(ctlr, mesg);
1684 if (ret) {
1685 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1686 ret);
1687 }
1688 }
1689
1690 spin_lock_irqsave(&ctlr->queue_lock, flags);
1691 ctlr->cur_msg = NULL;
1692 ctlr->cur_msg_prepared = false;
1693 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1694 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1695
1696 trace_spi_message_done(mesg);
1697
1698 mesg->state = NULL;
1699 if (mesg->complete)
1700 mesg->complete(mesg->context);
1701}
1702EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1703
1704static int spi_start_queue(struct spi_controller *ctlr)
1705{
1706 unsigned long flags;
1707
1708 spin_lock_irqsave(&ctlr->queue_lock, flags);
1709
1710 if (ctlr->running || ctlr->busy) {
1711 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1712 return -EBUSY;
1713 }
1714
1715 ctlr->running = true;
1716 ctlr->cur_msg = NULL;
1717 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1718
1719 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1720
1721 return 0;
1722}
1723
1724static int spi_stop_queue(struct spi_controller *ctlr)
1725{
1726 unsigned long flags;
1727 unsigned limit = 500;
1728 int ret = 0;
1729
1730 spin_lock_irqsave(&ctlr->queue_lock, flags);
1731
1732
1733
1734
1735
1736
1737
1738 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1739 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1740 usleep_range(10000, 11000);
1741 spin_lock_irqsave(&ctlr->queue_lock, flags);
1742 }
1743
1744 if (!list_empty(&ctlr->queue) || ctlr->busy)
1745 ret = -EBUSY;
1746 else
1747 ctlr->running = false;
1748
1749 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1750
1751 if (ret) {
1752 dev_warn(&ctlr->dev, "could not stop message queue\n");
1753 return ret;
1754 }
1755 return ret;
1756}
1757
1758static int spi_destroy_queue(struct spi_controller *ctlr)
1759{
1760 int ret;
1761
1762 ret = spi_stop_queue(ctlr);
1763
1764
1765
1766
1767
1768
1769
1770 if (ret) {
1771 dev_err(&ctlr->dev, "problem destroying queue\n");
1772 return ret;
1773 }
1774
1775 kthread_flush_worker(&ctlr->kworker);
1776 kthread_stop(ctlr->kworker_task);
1777
1778 return 0;
1779}
1780
1781static int __spi_queued_transfer(struct spi_device *spi,
1782 struct spi_message *msg,
1783 bool need_pump)
1784{
1785 struct spi_controller *ctlr = spi->controller;
1786 unsigned long flags;
1787
1788 spin_lock_irqsave(&ctlr->queue_lock, flags);
1789
1790 if (!ctlr->running) {
1791 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1792 return -ESHUTDOWN;
1793 }
1794 msg->actual_length = 0;
1795 msg->status = -EINPROGRESS;
1796
1797 list_add_tail(&msg->queue, &ctlr->queue);
1798 if (!ctlr->busy && need_pump)
1799 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1800
1801 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1802 return 0;
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1813{
1814 return __spi_queued_transfer(spi, msg, true);
1815}
1816
1817static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1818{
1819 int ret;
1820
1821 ctlr->transfer = spi_queued_transfer;
1822 if (!ctlr->transfer_one_message)
1823 ctlr->transfer_one_message = spi_transfer_one_message;
1824
1825
1826 ret = spi_init_queue(ctlr);
1827 if (ret) {
1828 dev_err(&ctlr->dev, "problem initializing queue\n");
1829 goto err_init_queue;
1830 }
1831 ctlr->queued = true;
1832 ret = spi_start_queue(ctlr);
1833 if (ret) {
1834 dev_err(&ctlr->dev, "problem starting queue\n");
1835 goto err_start_queue;
1836 }
1837
1838 return 0;
1839
1840err_start_queue:
1841 spi_destroy_queue(ctlr);
1842err_init_queue:
1843 return ret;
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856void spi_flush_queue(struct spi_controller *ctlr)
1857{
1858 if (ctlr->transfer == spi_queued_transfer)
1859 __spi_pump_messages(ctlr, false);
1860}
1861
1862
1863
1864#if defined(CONFIG_OF)
1865static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1866 struct device_node *nc)
1867{
1868 u32 value;
1869 int rc;
1870
1871
1872 if (of_property_read_bool(nc, "spi-cpha"))
1873 spi->mode |= SPI_CPHA;
1874 if (of_property_read_bool(nc, "spi-cpol"))
1875 spi->mode |= SPI_CPOL;
1876 if (of_property_read_bool(nc, "spi-3wire"))
1877 spi->mode |= SPI_3WIRE;
1878 if (of_property_read_bool(nc, "spi-lsb-first"))
1879 spi->mode |= SPI_LSB_FIRST;
1880 if (of_property_read_bool(nc, "spi-cs-high"))
1881 spi->mode |= SPI_CS_HIGH;
1882
1883
1884 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1885 switch (value) {
1886 case 1:
1887 break;
1888 case 2:
1889 spi->mode |= SPI_TX_DUAL;
1890 break;
1891 case 4:
1892 spi->mode |= SPI_TX_QUAD;
1893 break;
1894 case 8:
1895 spi->mode |= SPI_TX_OCTAL;
1896 break;
1897 default:
1898 dev_warn(&ctlr->dev,
1899 "spi-tx-bus-width %d not supported\n",
1900 value);
1901 break;
1902 }
1903 }
1904
1905 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1906 switch (value) {
1907 case 1:
1908 break;
1909 case 2:
1910 spi->mode |= SPI_RX_DUAL;
1911 break;
1912 case 4:
1913 spi->mode |= SPI_RX_QUAD;
1914 break;
1915 case 8:
1916 spi->mode |= SPI_RX_OCTAL;
1917 break;
1918 default:
1919 dev_warn(&ctlr->dev,
1920 "spi-rx-bus-width %d not supported\n",
1921 value);
1922 break;
1923 }
1924 }
1925
1926 if (spi_controller_is_slave(ctlr)) {
1927 if (!of_node_name_eq(nc, "slave")) {
1928 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1929 nc);
1930 return -EINVAL;
1931 }
1932 return 0;
1933 }
1934
1935
1936 rc = of_property_read_u32(nc, "reg", &value);
1937 if (rc) {
1938 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1939 nc, rc);
1940 return rc;
1941 }
1942 spi->chip_select = value;
1943
1944
1945
1946
1947
1948
1949 if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
1950 ctlr->cs_gpiods[spi->chip_select])
1951 spi->mode |= SPI_CS_HIGH;
1952
1953
1954 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
1955 spi->max_speed_hz = value;
1956
1957 return 0;
1958}
1959
1960static struct spi_device *
1961of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1962{
1963 struct spi_device *spi;
1964 int rc;
1965
1966
1967 spi = spi_alloc_device(ctlr);
1968 if (!spi) {
1969 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1970 rc = -ENOMEM;
1971 goto err_out;
1972 }
1973
1974
1975 rc = of_modalias_node(nc, spi->modalias,
1976 sizeof(spi->modalias));
1977 if (rc < 0) {
1978 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1979 goto err_out;
1980 }
1981
1982 rc = of_spi_parse_dt(ctlr, spi, nc);
1983 if (rc)
1984 goto err_out;
1985
1986
1987 of_node_get(nc);
1988 spi->dev.of_node = nc;
1989
1990
1991 rc = spi_add_device(spi);
1992 if (rc) {
1993 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1994 goto err_of_node_put;
1995 }
1996
1997 return spi;
1998
1999err_of_node_put:
2000 of_node_put(nc);
2001err_out:
2002 spi_dev_put(spi);
2003 return ERR_PTR(rc);
2004}
2005
2006
2007
2008
2009
2010
2011
2012
2013static void of_register_spi_devices(struct spi_controller *ctlr)
2014{
2015 struct spi_device *spi;
2016 struct device_node *nc;
2017
2018 if (!ctlr->dev.of_node)
2019 return;
2020
2021 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2022 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2023 continue;
2024 spi = of_register_spi_device(ctlr, nc);
2025 if (IS_ERR(spi)) {
2026 dev_warn(&ctlr->dev,
2027 "Failed to create SPI device for %pOF\n", nc);
2028 of_node_clear_flag(nc, OF_POPULATED);
2029 }
2030 }
2031}
2032#else
2033static void of_register_spi_devices(struct spi_controller *ctlr) { }
2034#endif
2035
2036#ifdef CONFIG_ACPI
2037struct acpi_spi_lookup {
2038 struct spi_controller *ctlr;
2039 u32 max_speed_hz;
2040 u32 mode;
2041 int irq;
2042 u8 bits_per_word;
2043 u8 chip_select;
2044};
2045
2046static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2047 struct acpi_spi_lookup *lookup)
2048{
2049 const union acpi_object *obj;
2050
2051 if (!x86_apple_machine)
2052 return;
2053
2054 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2055 && obj->buffer.length >= 4)
2056 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2057
2058 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2059 && obj->buffer.length == 8)
2060 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2061
2062 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2063 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2064 lookup->mode |= SPI_LSB_FIRST;
2065
2066 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2067 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2068 lookup->mode |= SPI_CPOL;
2069
2070 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2071 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2072 lookup->mode |= SPI_CPHA;
2073}
2074
2075static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2076{
2077 struct acpi_spi_lookup *lookup = data;
2078 struct spi_controller *ctlr = lookup->ctlr;
2079
2080 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2081 struct acpi_resource_spi_serialbus *sb;
2082 acpi_handle parent_handle;
2083 acpi_status status;
2084
2085 sb = &ares->data.spi_serial_bus;
2086 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2087
2088 status = acpi_get_handle(NULL,
2089 sb->resource_source.string_ptr,
2090 &parent_handle);
2091
2092 if (ACPI_FAILURE(status) ||
2093 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2094 return -ENODEV;
2095
2096
2097
2098
2099
2100
2101
2102
2103 if (ctlr->fw_translate_cs) {
2104 int cs = ctlr->fw_translate_cs(ctlr,
2105 sb->device_selection);
2106 if (cs < 0)
2107 return cs;
2108 lookup->chip_select = cs;
2109 } else {
2110 lookup->chip_select = sb->device_selection;
2111 }
2112
2113 lookup->max_speed_hz = sb->connection_speed;
2114
2115 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2116 lookup->mode |= SPI_CPHA;
2117 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2118 lookup->mode |= SPI_CPOL;
2119 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2120 lookup->mode |= SPI_CS_HIGH;
2121 }
2122 } else if (lookup->irq < 0) {
2123 struct resource r;
2124
2125 if (acpi_dev_resource_interrupt(ares, 0, &r))
2126 lookup->irq = r.start;
2127 }
2128
2129
2130 return 1;
2131}
2132
2133static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2134 struct acpi_device *adev)
2135{
2136 acpi_handle parent_handle = NULL;
2137 struct list_head resource_list;
2138 struct acpi_spi_lookup lookup = {};
2139 struct spi_device *spi;
2140 int ret;
2141
2142 if (acpi_bus_get_status(adev) || !adev->status.present ||
2143 acpi_device_enumerated(adev))
2144 return AE_OK;
2145
2146 lookup.ctlr = ctlr;
2147 lookup.irq = -1;
2148
2149 INIT_LIST_HEAD(&resource_list);
2150 ret = acpi_dev_get_resources(adev, &resource_list,
2151 acpi_spi_add_resource, &lookup);
2152 acpi_dev_free_resource_list(&resource_list);
2153
2154 if (ret < 0)
2155
2156 return AE_OK;
2157
2158 if (!lookup.max_speed_hz &&
2159 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
2160 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2161
2162 acpi_spi_parse_apple_properties(adev, &lookup);
2163 }
2164
2165 if (!lookup.max_speed_hz)
2166 return AE_OK;
2167
2168 spi = spi_alloc_device(ctlr);
2169 if (!spi) {
2170 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2171 dev_name(&adev->dev));
2172 return AE_NO_MEMORY;
2173 }
2174
2175
2176 ACPI_COMPANION_SET(&spi->dev, adev);
2177 spi->max_speed_hz = lookup.max_speed_hz;
2178 spi->mode |= lookup.mode;
2179 spi->irq = lookup.irq;
2180 spi->bits_per_word = lookup.bits_per_word;
2181 spi->chip_select = lookup.chip_select;
2182
2183 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2184 sizeof(spi->modalias));
2185
2186 if (spi->irq < 0)
2187 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2188
2189 acpi_device_set_enumerated(adev);
2190
2191 adev->power.flags.ignore_parent = true;
2192 if (spi_add_device(spi)) {
2193 adev->power.flags.ignore_parent = false;
2194 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2195 dev_name(&adev->dev));
2196 spi_dev_put(spi);
2197 }
2198
2199 return AE_OK;
2200}
2201
2202static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2203 void *data, void **return_value)
2204{
2205 struct spi_controller *ctlr = data;
2206 struct acpi_device *adev;
2207
2208 if (acpi_bus_get_device(handle, &adev))
2209 return AE_OK;
2210
2211 return acpi_register_spi_device(ctlr, adev);
2212}
2213
2214#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2215
2216static void acpi_register_spi_devices(struct spi_controller *ctlr)
2217{
2218 acpi_status status;
2219 acpi_handle handle;
2220
2221 handle = ACPI_HANDLE(ctlr->dev.parent);
2222 if (!handle)
2223 return;
2224
2225 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2226 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2227 acpi_spi_add_device, NULL, ctlr, NULL);
2228 if (ACPI_FAILURE(status))
2229 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2230}
2231#else
2232static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2233#endif
2234
2235static void spi_controller_release(struct device *dev)
2236{
2237 struct spi_controller *ctlr;
2238
2239 ctlr = container_of(dev, struct spi_controller, dev);
2240 kfree(ctlr);
2241}
2242
2243static struct class spi_master_class = {
2244 .name = "spi_master",
2245 .owner = THIS_MODULE,
2246 .dev_release = spi_controller_release,
2247 .dev_groups = spi_master_groups,
2248};
2249
2250#ifdef CONFIG_SPI_SLAVE
2251
2252
2253
2254
2255
2256int spi_slave_abort(struct spi_device *spi)
2257{
2258 struct spi_controller *ctlr = spi->controller;
2259
2260 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2261 return ctlr->slave_abort(ctlr);
2262
2263 return -ENOTSUPP;
2264}
2265EXPORT_SYMBOL_GPL(spi_slave_abort);
2266
2267static int match_true(struct device *dev, void *data)
2268{
2269 return 1;
2270}
2271
2272static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2273 char *buf)
2274{
2275 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2276 dev);
2277 struct device *child;
2278
2279 child = device_find_child(&ctlr->dev, NULL, match_true);
2280 return sprintf(buf, "%s\n",
2281 child ? to_spi_device(child)->modalias : NULL);
2282}
2283
2284static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2285 const char *buf, size_t count)
2286{
2287 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2288 dev);
2289 struct spi_device *spi;
2290 struct device *child;
2291 char name[32];
2292 int rc;
2293
2294 rc = sscanf(buf, "%31s", name);
2295 if (rc != 1 || !name[0])
2296 return -EINVAL;
2297
2298 child = device_find_child(&ctlr->dev, NULL, match_true);
2299 if (child) {
2300
2301 device_unregister(child);
2302 put_device(child);
2303 }
2304
2305 if (strcmp(name, "(null)")) {
2306
2307 spi = spi_alloc_device(ctlr);
2308 if (!spi)
2309 return -ENOMEM;
2310
2311 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2312
2313 rc = spi_add_device(spi);
2314 if (rc) {
2315 spi_dev_put(spi);
2316 return rc;
2317 }
2318 }
2319
2320 return count;
2321}
2322
2323static DEVICE_ATTR_RW(slave);
2324
2325static struct attribute *spi_slave_attrs[] = {
2326 &dev_attr_slave.attr,
2327 NULL,
2328};
2329
2330static const struct attribute_group spi_slave_group = {
2331 .attrs = spi_slave_attrs,
2332};
2333
2334static const struct attribute_group *spi_slave_groups[] = {
2335 &spi_controller_statistics_group,
2336 &spi_slave_group,
2337 NULL,
2338};
2339
2340static struct class spi_slave_class = {
2341 .name = "spi_slave",
2342 .owner = THIS_MODULE,
2343 .dev_release = spi_controller_release,
2344 .dev_groups = spi_slave_groups,
2345};
2346#else
2347extern struct class spi_slave_class;
2348#endif
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375struct spi_controller *__spi_alloc_controller(struct device *dev,
2376 unsigned int size, bool slave)
2377{
2378 struct spi_controller *ctlr;
2379 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2380
2381 if (!dev)
2382 return NULL;
2383
2384 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2385 if (!ctlr)
2386 return NULL;
2387
2388 device_initialize(&ctlr->dev);
2389 ctlr->bus_num = -1;
2390 ctlr->num_chipselect = 1;
2391 ctlr->slave = slave;
2392 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2393 ctlr->dev.class = &spi_slave_class;
2394 else
2395 ctlr->dev.class = &spi_master_class;
2396 ctlr->dev.parent = dev;
2397 pm_suspend_ignore_children(&ctlr->dev, true);
2398 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2399
2400 return ctlr;
2401}
2402EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2403
2404#ifdef CONFIG_OF
2405static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2406{
2407 int nb, i, *cs;
2408 struct device_node *np = ctlr->dev.of_node;
2409
2410 if (!np)
2411 return 0;
2412
2413 nb = of_gpio_named_count(np, "cs-gpios");
2414 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2415
2416
2417 if (nb == 0 || nb == -ENOENT)
2418 return 0;
2419 else if (nb < 0)
2420 return nb;
2421
2422 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2423 GFP_KERNEL);
2424 ctlr->cs_gpios = cs;
2425
2426 if (!ctlr->cs_gpios)
2427 return -ENOMEM;
2428
2429 for (i = 0; i < ctlr->num_chipselect; i++)
2430 cs[i] = -ENOENT;
2431
2432 for (i = 0; i < nb; i++)
2433 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2434
2435 return 0;
2436}
2437#else
2438static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2439{
2440 return 0;
2441}
2442#endif
2443
2444
2445
2446
2447
2448static int spi_get_gpio_descs(struct spi_controller *ctlr)
2449{
2450 int nb, i;
2451 struct gpio_desc **cs;
2452 struct device *dev = &ctlr->dev;
2453 unsigned long native_cs_mask = 0;
2454 unsigned int num_cs_gpios = 0;
2455
2456 nb = gpiod_count(dev, "cs");
2457 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2458
2459
2460 if (nb == 0 || nb == -ENOENT)
2461 return 0;
2462 else if (nb < 0)
2463 return nb;
2464
2465 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2466 GFP_KERNEL);
2467 if (!cs)
2468 return -ENOMEM;
2469 ctlr->cs_gpiods = cs;
2470
2471 for (i = 0; i < nb; i++) {
2472
2473
2474
2475
2476
2477
2478
2479 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2480 GPIOD_OUT_LOW);
2481 if (IS_ERR(cs[i]))
2482 return PTR_ERR(cs[i]);
2483
2484 if (cs[i]) {
2485
2486
2487
2488
2489 char *gpioname;
2490
2491 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2492 dev_name(dev), i);
2493 if (!gpioname)
2494 return -ENOMEM;
2495 gpiod_set_consumer_name(cs[i], gpioname);
2496 num_cs_gpios++;
2497 continue;
2498 }
2499
2500 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2501 dev_err(dev, "Invalid native chip select %d\n", i);
2502 return -EINVAL;
2503 }
2504 native_cs_mask |= BIT(i);
2505 }
2506
2507 ctlr->unused_native_cs = ffz(native_cs_mask);
2508 if (num_cs_gpios && ctlr->max_native_cs &&
2509 ctlr->unused_native_cs >= ctlr->max_native_cs) {
2510 dev_err(dev, "No unused native chip select available\n");
2511 return -EINVAL;
2512 }
2513
2514 return 0;
2515}
2516
2517static int spi_controller_check_ops(struct spi_controller *ctlr)
2518{
2519
2520
2521
2522
2523
2524
2525
2526 if (ctlr->mem_ops) {
2527 if (!ctlr->mem_ops->exec_op)
2528 return -EINVAL;
2529 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2530 !ctlr->transfer_one_message) {
2531 return -EINVAL;
2532 }
2533
2534 return 0;
2535}
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560int spi_register_controller(struct spi_controller *ctlr)
2561{
2562 struct device *dev = ctlr->dev.parent;
2563 struct boardinfo *bi;
2564 int status;
2565 int id, first_dynamic;
2566
2567 if (!dev)
2568 return -ENODEV;
2569
2570
2571
2572
2573
2574 status = spi_controller_check_ops(ctlr);
2575 if (status)
2576 return status;
2577
2578 if (ctlr->bus_num >= 0) {
2579
2580 mutex_lock(&board_lock);
2581 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2582 ctlr->bus_num + 1, GFP_KERNEL);
2583 mutex_unlock(&board_lock);
2584 if (WARN(id < 0, "couldn't get idr"))
2585 return id == -ENOSPC ? -EBUSY : id;
2586 ctlr->bus_num = id;
2587 } else if (ctlr->dev.of_node) {
2588
2589 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2590 if (id >= 0) {
2591 ctlr->bus_num = id;
2592 mutex_lock(&board_lock);
2593 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2594 ctlr->bus_num + 1, GFP_KERNEL);
2595 mutex_unlock(&board_lock);
2596 if (WARN(id < 0, "couldn't get idr"))
2597 return id == -ENOSPC ? -EBUSY : id;
2598 }
2599 }
2600 if (ctlr->bus_num < 0) {
2601 first_dynamic = of_alias_get_highest_id("spi");
2602 if (first_dynamic < 0)
2603 first_dynamic = 0;
2604 else
2605 first_dynamic++;
2606
2607 mutex_lock(&board_lock);
2608 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2609 0, GFP_KERNEL);
2610 mutex_unlock(&board_lock);
2611 if (WARN(id < 0, "couldn't get idr"))
2612 return id;
2613 ctlr->bus_num = id;
2614 }
2615 INIT_LIST_HEAD(&ctlr->queue);
2616 spin_lock_init(&ctlr->queue_lock);
2617 spin_lock_init(&ctlr->bus_lock_spinlock);
2618 mutex_init(&ctlr->bus_lock_mutex);
2619 mutex_init(&ctlr->io_mutex);
2620 ctlr->bus_lock_flag = 0;
2621 init_completion(&ctlr->xfer_completion);
2622 if (!ctlr->max_dma_len)
2623 ctlr->max_dma_len = INT_MAX;
2624
2625
2626
2627
2628 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2629
2630 if (!spi_controller_is_slave(ctlr)) {
2631 if (ctlr->use_gpio_descriptors) {
2632 status = spi_get_gpio_descs(ctlr);
2633 if (status)
2634 goto free_bus_id;
2635
2636
2637
2638
2639 ctlr->mode_bits |= SPI_CS_HIGH;
2640 } else {
2641
2642 status = of_spi_get_gpio_numbers(ctlr);
2643 if (status)
2644 goto free_bus_id;
2645 }
2646 }
2647
2648
2649
2650
2651
2652 if (!ctlr->num_chipselect) {
2653 status = -EINVAL;
2654 goto free_bus_id;
2655 }
2656
2657 status = device_add(&ctlr->dev);
2658 if (status < 0)
2659 goto free_bus_id;
2660 dev_dbg(dev, "registered %s %s\n",
2661 spi_controller_is_slave(ctlr) ? "slave" : "master",
2662 dev_name(&ctlr->dev));
2663
2664
2665
2666
2667
2668
2669 if (ctlr->transfer) {
2670 dev_info(dev, "controller is unqueued, this is deprecated\n");
2671 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2672 status = spi_controller_initialize_queue(ctlr);
2673 if (status) {
2674 device_del(&ctlr->dev);
2675 goto free_bus_id;
2676 }
2677 }
2678
2679 spin_lock_init(&ctlr->statistics.lock);
2680
2681 mutex_lock(&board_lock);
2682 list_add_tail(&ctlr->list, &spi_controller_list);
2683 list_for_each_entry(bi, &board_list, list)
2684 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2685 mutex_unlock(&board_lock);
2686
2687
2688 of_register_spi_devices(ctlr);
2689 acpi_register_spi_devices(ctlr);
2690 return status;
2691
2692free_bus_id:
2693 mutex_lock(&board_lock);
2694 idr_remove(&spi_master_idr, ctlr->bus_num);
2695 mutex_unlock(&board_lock);
2696 return status;
2697}
2698EXPORT_SYMBOL_GPL(spi_register_controller);
2699
2700static void devm_spi_unregister(struct device *dev, void *res)
2701{
2702 spi_unregister_controller(*(struct spi_controller **)res);
2703}
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718int devm_spi_register_controller(struct device *dev,
2719 struct spi_controller *ctlr)
2720{
2721 struct spi_controller **ptr;
2722 int ret;
2723
2724 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2725 if (!ptr)
2726 return -ENOMEM;
2727
2728 ret = spi_register_controller(ctlr);
2729 if (!ret) {
2730 *ptr = ctlr;
2731 devres_add(dev, ptr);
2732 } else {
2733 devres_free(ptr);
2734 }
2735
2736 return ret;
2737}
2738EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2739
2740static int __unregister(struct device *dev, void *null)
2741{
2742 spi_unregister_device(to_spi_device(dev));
2743 return 0;
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758void spi_unregister_controller(struct spi_controller *ctlr)
2759{
2760 struct spi_controller *found;
2761 int id = ctlr->bus_num;
2762
2763
2764 mutex_lock(&board_lock);
2765 found = idr_find(&spi_master_idr, id);
2766 mutex_unlock(&board_lock);
2767 if (ctlr->queued) {
2768 if (spi_destroy_queue(ctlr))
2769 dev_err(&ctlr->dev, "queue remove failed\n");
2770 }
2771 mutex_lock(&board_lock);
2772 list_del(&ctlr->list);
2773 mutex_unlock(&board_lock);
2774
2775 device_for_each_child(&ctlr->dev, NULL, __unregister);
2776 device_unregister(&ctlr->dev);
2777
2778 mutex_lock(&board_lock);
2779 if (found == ctlr)
2780 idr_remove(&spi_master_idr, id);
2781 mutex_unlock(&board_lock);
2782}
2783EXPORT_SYMBOL_GPL(spi_unregister_controller);
2784
2785int spi_controller_suspend(struct spi_controller *ctlr)
2786{
2787 int ret;
2788
2789
2790 if (!ctlr->queued)
2791 return 0;
2792
2793 ret = spi_stop_queue(ctlr);
2794 if (ret)
2795 dev_err(&ctlr->dev, "queue stop failed\n");
2796
2797 return ret;
2798}
2799EXPORT_SYMBOL_GPL(spi_controller_suspend);
2800
2801int spi_controller_resume(struct spi_controller *ctlr)
2802{
2803 int ret;
2804
2805 if (!ctlr->queued)
2806 return 0;
2807
2808 ret = spi_start_queue(ctlr);
2809 if (ret)
2810 dev_err(&ctlr->dev, "queue restart failed\n");
2811
2812 return ret;
2813}
2814EXPORT_SYMBOL_GPL(spi_controller_resume);
2815
2816static int __spi_controller_match(struct device *dev, const void *data)
2817{
2818 struct spi_controller *ctlr;
2819 const u16 *bus_num = data;
2820
2821 ctlr = container_of(dev, struct spi_controller, dev);
2822 return ctlr->bus_num == *bus_num;
2823}
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837struct spi_controller *spi_busnum_to_master(u16 bus_num)
2838{
2839 struct device *dev;
2840 struct spi_controller *ctlr = NULL;
2841
2842 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2843 __spi_controller_match);
2844 if (dev)
2845 ctlr = container_of(dev, struct spi_controller, dev);
2846
2847 return ctlr;
2848}
2849EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869void *spi_res_alloc(struct spi_device *spi,
2870 spi_res_release_t release,
2871 size_t size, gfp_t gfp)
2872{
2873 struct spi_res *sres;
2874
2875 sres = kzalloc(sizeof(*sres) + size, gfp);
2876 if (!sres)
2877 return NULL;
2878
2879 INIT_LIST_HEAD(&sres->entry);
2880 sres->release = release;
2881
2882 return sres->data;
2883}
2884EXPORT_SYMBOL_GPL(spi_res_alloc);
2885
2886
2887
2888
2889
2890
2891void spi_res_free(void *res)
2892{
2893 struct spi_res *sres = container_of(res, struct spi_res, data);
2894
2895 if (!res)
2896 return;
2897
2898 WARN_ON(!list_empty(&sres->entry));
2899 kfree(sres);
2900}
2901EXPORT_SYMBOL_GPL(spi_res_free);
2902
2903
2904
2905
2906
2907
2908void spi_res_add(struct spi_message *message, void *res)
2909{
2910 struct spi_res *sres = container_of(res, struct spi_res, data);
2911
2912 WARN_ON(!list_empty(&sres->entry));
2913 list_add_tail(&sres->entry, &message->resources);
2914}
2915EXPORT_SYMBOL_GPL(spi_res_add);
2916
2917
2918
2919
2920
2921
2922void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2923{
2924 struct spi_res *res, *tmp;
2925
2926 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2927 if (res->release)
2928 res->release(ctlr, message, res->data);
2929
2930 list_del(&res->entry);
2931
2932 kfree(res);
2933 }
2934}
2935EXPORT_SYMBOL_GPL(spi_res_release);
2936
2937
2938
2939
2940
2941static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2942 struct spi_message *msg,
2943 void *res)
2944{
2945 struct spi_replaced_transfers *rxfer = res;
2946 size_t i;
2947
2948
2949 if (rxfer->release)
2950 rxfer->release(ctlr, msg, res);
2951
2952
2953 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2954
2955
2956 for (i = 0; i < rxfer->inserted; i++)
2957 list_del(&rxfer->inserted_transfers[i].transfer_list);
2958}
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975struct spi_replaced_transfers *spi_replace_transfers(
2976 struct spi_message *msg,
2977 struct spi_transfer *xfer_first,
2978 size_t remove,
2979 size_t insert,
2980 spi_replaced_release_t release,
2981 size_t extradatasize,
2982 gfp_t gfp)
2983{
2984 struct spi_replaced_transfers *rxfer;
2985 struct spi_transfer *xfer;
2986 size_t i;
2987
2988
2989 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2990 struct_size(rxfer, inserted_transfers, insert)
2991 + extradatasize,
2992 gfp);
2993 if (!rxfer)
2994 return ERR_PTR(-ENOMEM);
2995
2996
2997 rxfer->release = release;
2998
2999
3000 if (extradatasize)
3001 rxfer->extradata =
3002 &rxfer->inserted_transfers[insert];
3003
3004
3005 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3006
3007
3008
3009
3010 rxfer->replaced_after = xfer_first->transfer_list.prev;
3011
3012
3013 for (i = 0; i < remove; i++) {
3014
3015
3016
3017
3018 if (rxfer->replaced_after->next == &msg->transfers) {
3019 dev_err(&msg->spi->dev,
3020 "requested to remove more spi_transfers than are available\n");
3021
3022 list_splice(&rxfer->replaced_transfers,
3023 rxfer->replaced_after);
3024
3025
3026 spi_res_free(rxfer);
3027
3028
3029 return ERR_PTR(-EINVAL);
3030 }
3031
3032
3033
3034
3035 list_move_tail(rxfer->replaced_after->next,
3036 &rxfer->replaced_transfers);
3037 }
3038
3039
3040
3041
3042 for (i = 0; i < insert; i++) {
3043
3044 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3045
3046
3047 memcpy(xfer, xfer_first, sizeof(*xfer));
3048
3049
3050 list_add(&xfer->transfer_list, rxfer->replaced_after);
3051
3052
3053 if (i) {
3054 xfer->cs_change = false;
3055 xfer->delay_usecs = 0;
3056 xfer->delay.value = 0;
3057 }
3058 }
3059
3060
3061 rxfer->inserted = insert;
3062
3063
3064 spi_res_add(msg, rxfer);
3065
3066 return rxfer;
3067}
3068EXPORT_SYMBOL_GPL(spi_replace_transfers);
3069
3070static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3071 struct spi_message *msg,
3072 struct spi_transfer **xferp,
3073 size_t maxsize,
3074 gfp_t gfp)
3075{
3076 struct spi_transfer *xfer = *xferp, *xfers;
3077 struct spi_replaced_transfers *srt;
3078 size_t offset;
3079 size_t count, i;
3080
3081
3082 count = DIV_ROUND_UP(xfer->len, maxsize);
3083
3084
3085 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3086 if (IS_ERR(srt))
3087 return PTR_ERR(srt);
3088 xfers = srt->inserted_transfers;
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3104
3105
3106 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3107
3108 if (xfers[i].rx_buf)
3109 xfers[i].rx_buf += offset;
3110 if (xfers[i].rx_dma)
3111 xfers[i].rx_dma += offset;
3112 if (xfers[i].tx_buf)
3113 xfers[i].tx_buf += offset;
3114 if (xfers[i].tx_dma)
3115 xfers[i].tx_dma += offset;
3116
3117
3118 xfers[i].len = min(maxsize, xfers[i].len - offset);
3119 }
3120
3121
3122
3123
3124 *xferp = &xfers[count - 1];
3125
3126
3127 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3128 transfers_split_maxsize);
3129 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3130 transfers_split_maxsize);
3131
3132 return 0;
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3147 struct spi_message *msg,
3148 size_t maxsize,
3149 gfp_t gfp)
3150{
3151 struct spi_transfer *xfer;
3152 int ret;
3153
3154
3155
3156
3157
3158
3159
3160 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3161 if (xfer->len > maxsize) {
3162 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3163 maxsize, gfp);
3164 if (ret)
3165 return ret;
3166 }
3167 }
3168
3169 return 0;
3170}
3171EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3172
3173
3174
3175
3176
3177
3178
3179static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3180 u8 bits_per_word)
3181{
3182 if (ctlr->bits_per_word_mask) {
3183
3184 if (bits_per_word > 32)
3185 return -EINVAL;
3186 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3187 return -EINVAL;
3188 }
3189
3190 return 0;
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213int spi_setup(struct spi_device *spi)
3214{
3215 unsigned bad_bits, ugly_bits;
3216 int status;
3217
3218
3219
3220 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3221 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3222 dev_err(&spi->dev,
3223 "setup: can not select dual and quad at the same time\n");
3224 return -EINVAL;
3225 }
3226
3227
3228 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3229 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3230 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3231 return -EINVAL;
3232
3233
3234
3235
3236
3237 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3238
3239
3240
3241 if (gpio_is_valid(spi->cs_gpio))
3242 bad_bits &= ~SPI_CS_HIGH;
3243 ugly_bits = bad_bits &
3244 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3245 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3246 if (ugly_bits) {
3247 dev_warn(&spi->dev,
3248 "setup: ignoring unsupported mode bits %x\n",
3249 ugly_bits);
3250 spi->mode &= ~ugly_bits;
3251 bad_bits &= ~ugly_bits;
3252 }
3253 if (bad_bits) {
3254 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3255 bad_bits);
3256 return -EINVAL;
3257 }
3258
3259 if (!spi->bits_per_word)
3260 spi->bits_per_word = 8;
3261
3262 status = __spi_validate_bits_per_word(spi->controller,
3263 spi->bits_per_word);
3264 if (status)
3265 return status;
3266
3267 if (!spi->max_speed_hz)
3268 spi->max_speed_hz = spi->controller->max_speed_hz;
3269
3270 if (spi->controller->setup)
3271 status = spi->controller->setup(spi);
3272
3273 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3274 status = pm_runtime_get_sync(spi->controller->dev.parent);
3275 if (status < 0) {
3276 pm_runtime_put_noidle(spi->controller->dev.parent);
3277 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3278 status);
3279 return status;
3280 }
3281
3282
3283
3284
3285
3286
3287
3288 status = 0;
3289
3290 spi_set_cs(spi, false);
3291 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3292 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3293 } else {
3294 spi_set_cs(spi, false);
3295 }
3296
3297 if (spi->rt && !spi->controller->rt) {
3298 spi->controller->rt = true;
3299 spi_set_thread_rt(spi->controller);
3300 }
3301
3302 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3303 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3304 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3305 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3306 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3307 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3308 spi->bits_per_word, spi->max_speed_hz,
3309 status);
3310
3311 return status;
3312}
3313EXPORT_SYMBOL_GPL(spi_setup);
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
3325 struct spi_delay *hold, struct spi_delay *inactive)
3326{
3327 size_t len;
3328
3329 if (spi->controller->set_cs_timing)
3330 return spi->controller->set_cs_timing(spi, setup, hold,
3331 inactive);
3332
3333 if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
3334 (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
3335 (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
3336 dev_err(&spi->dev,
3337 "Clock-cycle delays for CS not supported in SW mode\n");
3338 return -ENOTSUPP;
3339 }
3340
3341 len = sizeof(struct spi_delay);
3342
3343
3344 if (setup)
3345 memcpy(&spi->controller->cs_setup, setup, len);
3346 else
3347 memset(&spi->controller->cs_setup, 0, len);
3348
3349 if (hold)
3350 memcpy(&spi->controller->cs_hold, hold, len);
3351 else
3352 memset(&spi->controller->cs_hold, 0, len);
3353
3354 if (inactive)
3355 memcpy(&spi->controller->cs_inactive, inactive, len);
3356 else
3357 memset(&spi->controller->cs_inactive, 0, len);
3358
3359 return 0;
3360}
3361EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3362
3363static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3364 struct spi_device *spi)
3365{
3366 int delay1, delay2;
3367
3368 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3369 if (delay1 < 0)
3370 return delay1;
3371
3372 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3373 if (delay2 < 0)
3374 return delay2;
3375
3376 if (delay1 < delay2)
3377 memcpy(&xfer->word_delay, &spi->word_delay,
3378 sizeof(xfer->word_delay));
3379
3380 return 0;
3381}
3382
3383static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3384{
3385 struct spi_controller *ctlr = spi->controller;
3386 struct spi_transfer *xfer;
3387 int w_size;
3388
3389 if (list_empty(&message->transfers))
3390 return -EINVAL;
3391
3392
3393
3394
3395
3396
3397
3398 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3399 spi->cs_gpiod ||
3400 gpio_is_valid(spi->cs_gpio))) {
3401 size_t maxsize;
3402 int ret;
3403
3404 maxsize = (spi->bits_per_word + 7) / 8;
3405
3406
3407 message->spi = spi;
3408
3409 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3410 GFP_KERNEL);
3411 if (ret)
3412 return ret;
3413
3414 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3415
3416 if (list_is_last(&xfer->transfer_list, &message->transfers))
3417 break;
3418 xfer->cs_change = 1;
3419 }
3420 }
3421
3422
3423
3424
3425
3426
3427 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3428 (spi->mode & SPI_3WIRE)) {
3429 unsigned flags = ctlr->flags;
3430
3431 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3432 if (xfer->rx_buf && xfer->tx_buf)
3433 return -EINVAL;
3434 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3435 return -EINVAL;
3436 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3437 return -EINVAL;
3438 }
3439 }
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449 message->frame_length = 0;
3450 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3451 xfer->effective_speed_hz = 0;
3452 message->frame_length += xfer->len;
3453 if (!xfer->bits_per_word)
3454 xfer->bits_per_word = spi->bits_per_word;
3455
3456 if (!xfer->speed_hz)
3457 xfer->speed_hz = spi->max_speed_hz;
3458
3459 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3460 xfer->speed_hz = ctlr->max_speed_hz;
3461
3462 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3463 return -EINVAL;
3464
3465
3466
3467
3468
3469 if (xfer->bits_per_word <= 8)
3470 w_size = 1;
3471 else if (xfer->bits_per_word <= 16)
3472 w_size = 2;
3473 else
3474 w_size = 4;
3475
3476
3477 if (xfer->len % w_size)
3478 return -EINVAL;
3479
3480 if (xfer->speed_hz && ctlr->min_speed_hz &&
3481 xfer->speed_hz < ctlr->min_speed_hz)
3482 return -EINVAL;
3483
3484 if (xfer->tx_buf && !xfer->tx_nbits)
3485 xfer->tx_nbits = SPI_NBITS_SINGLE;
3486 if (xfer->rx_buf && !xfer->rx_nbits)
3487 xfer->rx_nbits = SPI_NBITS_SINGLE;
3488
3489
3490
3491
3492 if (xfer->tx_buf) {
3493 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3494 xfer->tx_nbits != SPI_NBITS_DUAL &&
3495 xfer->tx_nbits != SPI_NBITS_QUAD)
3496 return -EINVAL;
3497 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3498 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3499 return -EINVAL;
3500 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3501 !(spi->mode & SPI_TX_QUAD))
3502 return -EINVAL;
3503 }
3504
3505 if (xfer->rx_buf) {
3506 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3507 xfer->rx_nbits != SPI_NBITS_DUAL &&
3508 xfer->rx_nbits != SPI_NBITS_QUAD)
3509 return -EINVAL;
3510 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3511 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3512 return -EINVAL;
3513 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3514 !(spi->mode & SPI_RX_QUAD))
3515 return -EINVAL;
3516 }
3517
3518 if (_spi_xfer_word_delay_update(xfer, spi))
3519 return -EINVAL;
3520 }
3521
3522 message->status = -EINPROGRESS;
3523
3524 return 0;
3525}
3526
3527static int __spi_async(struct spi_device *spi, struct spi_message *message)
3528{
3529 struct spi_controller *ctlr = spi->controller;
3530 struct spi_transfer *xfer;
3531
3532
3533
3534
3535
3536 if (!ctlr->transfer)
3537 return -ENOTSUPP;
3538
3539 message->spi = spi;
3540
3541 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3542 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3543
3544 trace_spi_message_submit(message);
3545
3546 if (!ctlr->ptp_sts_supported) {
3547 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3548 xfer->ptp_sts_word_pre = 0;
3549 ptp_read_system_prets(xfer->ptp_sts);
3550 }
3551 }
3552
3553 return ctlr->transfer(spi, message);
3554}
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587int spi_async(struct spi_device *spi, struct spi_message *message)
3588{
3589 struct spi_controller *ctlr = spi->controller;
3590 int ret;
3591 unsigned long flags;
3592
3593 ret = __spi_validate(spi, message);
3594 if (ret != 0)
3595 return ret;
3596
3597 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3598
3599 if (ctlr->bus_lock_flag)
3600 ret = -EBUSY;
3601 else
3602 ret = __spi_async(spi, message);
3603
3604 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3605
3606 return ret;
3607}
3608EXPORT_SYMBOL_GPL(spi_async);
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3642{
3643 struct spi_controller *ctlr = spi->controller;
3644 int ret;
3645 unsigned long flags;
3646
3647 ret = __spi_validate(spi, message);
3648 if (ret != 0)
3649 return ret;
3650
3651 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3652
3653 ret = __spi_async(spi, message);
3654
3655 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3656
3657 return ret;
3658
3659}
3660EXPORT_SYMBOL_GPL(spi_async_locked);
3661
3662
3663
3664
3665
3666
3667
3668
3669static void spi_complete(void *arg)
3670{
3671 complete(arg);
3672}
3673
3674static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3675{
3676 DECLARE_COMPLETION_ONSTACK(done);
3677 int status;
3678 struct spi_controller *ctlr = spi->controller;
3679 unsigned long flags;
3680
3681 status = __spi_validate(spi, message);
3682 if (status != 0)
3683 return status;
3684
3685 message->complete = spi_complete;
3686 message->context = &done;
3687 message->spi = spi;
3688
3689 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3690 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3691
3692
3693
3694
3695
3696
3697 if (ctlr->transfer == spi_queued_transfer) {
3698 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3699
3700 trace_spi_message_submit(message);
3701
3702 status = __spi_queued_transfer(spi, message, false);
3703
3704 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3705 } else {
3706 status = spi_async_locked(spi, message);
3707 }
3708
3709 if (status == 0) {
3710
3711
3712
3713 if (ctlr->transfer == spi_queued_transfer) {
3714 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3715 spi_sync_immediate);
3716 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3717 spi_sync_immediate);
3718 __spi_pump_messages(ctlr, false);
3719 }
3720
3721 wait_for_completion(&done);
3722 status = message->status;
3723 }
3724 message->context = NULL;
3725 return status;
3726}
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749int spi_sync(struct spi_device *spi, struct spi_message *message)
3750{
3751 int ret;
3752
3753 mutex_lock(&spi->controller->bus_lock_mutex);
3754 ret = __spi_sync(spi, message);
3755 mutex_unlock(&spi->controller->bus_lock_mutex);
3756
3757 return ret;
3758}
3759EXPORT_SYMBOL_GPL(spi_sync);
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3778{
3779 return __spi_sync(spi, message);
3780}
3781EXPORT_SYMBOL_GPL(spi_sync_locked);
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798int spi_bus_lock(struct spi_controller *ctlr)
3799{
3800 unsigned long flags;
3801
3802 mutex_lock(&ctlr->bus_lock_mutex);
3803
3804 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3805 ctlr->bus_lock_flag = 1;
3806 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3807
3808
3809
3810 return 0;
3811}
3812EXPORT_SYMBOL_GPL(spi_bus_lock);
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827int spi_bus_unlock(struct spi_controller *ctlr)
3828{
3829 ctlr->bus_lock_flag = 0;
3830
3831 mutex_unlock(&ctlr->bus_lock_mutex);
3832
3833 return 0;
3834}
3835EXPORT_SYMBOL_GPL(spi_bus_unlock);
3836
3837
3838#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3839
3840static u8 *buf;
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863int spi_write_then_read(struct spi_device *spi,
3864 const void *txbuf, unsigned n_tx,
3865 void *rxbuf, unsigned n_rx)
3866{
3867 static DEFINE_MUTEX(lock);
3868
3869 int status;
3870 struct spi_message message;
3871 struct spi_transfer x[2];
3872 u8 *local_buf;
3873
3874
3875
3876
3877
3878
3879 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3880 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3881 GFP_KERNEL | GFP_DMA);
3882 if (!local_buf)
3883 return -ENOMEM;
3884 } else {
3885 local_buf = buf;
3886 }
3887
3888 spi_message_init(&message);
3889 memset(x, 0, sizeof(x));
3890 if (n_tx) {
3891 x[0].len = n_tx;
3892 spi_message_add_tail(&x[0], &message);
3893 }
3894 if (n_rx) {
3895 x[1].len = n_rx;
3896 spi_message_add_tail(&x[1], &message);
3897 }
3898
3899 memcpy(local_buf, txbuf, n_tx);
3900 x[0].tx_buf = local_buf;
3901 x[1].rx_buf = local_buf + n_tx;
3902
3903
3904 status = spi_sync(spi, &message);
3905 if (status == 0)
3906 memcpy(rxbuf, x[1].rx_buf, n_rx);
3907
3908 if (x[0].tx_buf == buf)
3909 mutex_unlock(&lock);
3910 else
3911 kfree(local_buf);
3912
3913 return status;
3914}
3915EXPORT_SYMBOL_GPL(spi_write_then_read);
3916
3917
3918
3919#if IS_ENABLED(CONFIG_OF)
3920
3921struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3922{
3923 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
3924
3925 return dev ? to_spi_device(dev) : NULL;
3926}
3927EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3928#endif
3929
3930#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3931
3932static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3933{
3934 struct device *dev;
3935
3936 dev = class_find_device_by_of_node(&spi_master_class, node);
3937 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3938 dev = class_find_device_by_of_node(&spi_slave_class, node);
3939 if (!dev)
3940 return NULL;
3941
3942
3943 return container_of(dev, struct spi_controller, dev);
3944}
3945
3946static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3947 void *arg)
3948{
3949 struct of_reconfig_data *rd = arg;
3950 struct spi_controller *ctlr;
3951 struct spi_device *spi;
3952
3953 switch (of_reconfig_get_state_change(action, arg)) {
3954 case OF_RECONFIG_CHANGE_ADD:
3955 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3956 if (ctlr == NULL)
3957 return NOTIFY_OK;
3958
3959 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3960 put_device(&ctlr->dev);
3961 return NOTIFY_OK;
3962 }
3963
3964 spi = of_register_spi_device(ctlr, rd->dn);
3965 put_device(&ctlr->dev);
3966
3967 if (IS_ERR(spi)) {
3968 pr_err("%s: failed to create for '%pOF'\n",
3969 __func__, rd->dn);
3970 of_node_clear_flag(rd->dn, OF_POPULATED);
3971 return notifier_from_errno(PTR_ERR(spi));
3972 }
3973 break;
3974
3975 case OF_RECONFIG_CHANGE_REMOVE:
3976
3977 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3978 return NOTIFY_OK;
3979
3980
3981 spi = of_find_spi_device_by_node(rd->dn);
3982 if (spi == NULL)
3983 return NOTIFY_OK;
3984
3985
3986 spi_unregister_device(spi);
3987
3988
3989 put_device(&spi->dev);
3990 break;
3991 }
3992
3993 return NOTIFY_OK;
3994}
3995
3996static struct notifier_block spi_of_notifier = {
3997 .notifier_call = of_spi_notify,
3998};
3999#else
4000extern struct notifier_block spi_of_notifier;
4001#endif
4002
4003#if IS_ENABLED(CONFIG_ACPI)
4004static int spi_acpi_controller_match(struct device *dev, const void *data)
4005{
4006 return ACPI_COMPANION(dev->parent) == data;
4007}
4008
4009static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4010{
4011 struct device *dev;
4012
4013 dev = class_find_device(&spi_master_class, NULL, adev,
4014 spi_acpi_controller_match);
4015 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4016 dev = class_find_device(&spi_slave_class, NULL, adev,
4017 spi_acpi_controller_match);
4018 if (!dev)
4019 return NULL;
4020
4021 return container_of(dev, struct spi_controller, dev);
4022}
4023
4024static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4025{
4026 struct device *dev;
4027
4028 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4029 return to_spi_device(dev);
4030}
4031
4032static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4033 void *arg)
4034{
4035 struct acpi_device *adev = arg;
4036 struct spi_controller *ctlr;
4037 struct spi_device *spi;
4038
4039 switch (value) {
4040 case ACPI_RECONFIG_DEVICE_ADD:
4041 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4042 if (!ctlr)
4043 break;
4044
4045 acpi_register_spi_device(ctlr, adev);
4046 put_device(&ctlr->dev);
4047 break;
4048 case ACPI_RECONFIG_DEVICE_REMOVE:
4049 if (!acpi_device_enumerated(adev))
4050 break;
4051
4052 spi = acpi_spi_find_device_by_adev(adev);
4053 if (!spi)
4054 break;
4055
4056 spi_unregister_device(spi);
4057 put_device(&spi->dev);
4058 break;
4059 }
4060
4061 return NOTIFY_OK;
4062}
4063
4064static struct notifier_block spi_acpi_notifier = {
4065 .notifier_call = acpi_spi_notify,
4066};
4067#else
4068extern struct notifier_block spi_acpi_notifier;
4069#endif
4070
4071static int __init spi_init(void)
4072{
4073 int status;
4074
4075 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4076 if (!buf) {
4077 status = -ENOMEM;
4078 goto err0;
4079 }
4080
4081 status = bus_register(&spi_bus_type);
4082 if (status < 0)
4083 goto err1;
4084
4085 status = class_register(&spi_master_class);
4086 if (status < 0)
4087 goto err2;
4088
4089 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4090 status = class_register(&spi_slave_class);
4091 if (status < 0)
4092 goto err3;
4093 }
4094
4095 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4096 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4097 if (IS_ENABLED(CONFIG_ACPI))
4098 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4099
4100 return 0;
4101
4102err3:
4103 class_unregister(&spi_master_class);
4104err2:
4105 bus_unregister(&spi_bus_type);
4106err1:
4107 kfree(buf);
4108 buf = NULL;
4109err0:
4110 return status;
4111}
4112
4113
4114
4115
4116
4117
4118
4119
4120postcore_initcall(spi_init);
4121