1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/cache.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/clk/clk-conf.h>
17#include <linux/slab.h>
18#include <linux/mod_devicetable.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h>
21#include <linux/of_gpio.h>
22#include <linux/pm_runtime.h>
23#include <linux/pm_domain.h>
24#include <linux/property.h>
25#include <linux/export.h>
26#include <linux/sched/rt.h>
27#include <uapi/linux/sched/types.h>
28#include <linux/delay.h>
29#include <linux/kthread.h>
30#include <linux/ioport.h>
31#include <linux/acpi.h>
32#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/platform_data/x86/apple.h>
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/spi.h>
38
39#include "internals.h"
40
41static DEFINE_IDR(spi_master_idr);
42
43static void spidev_release(struct device *dev)
44{
45 struct spi_device *spi = to_spi_device(dev);
46
47
48 if (spi->controller->cleanup)
49 spi->controller->cleanup(spi);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 kfree(spi);
54}
55
56static ssize_t
57modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58{
59 const struct spi_device *spi = to_spi_device(dev);
60 int len;
61
62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63 if (len != -ENODEV)
64 return len;
65
66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67}
68static DEVICE_ATTR_RO(modalias);
69
70static ssize_t driver_override_store(struct device *dev,
71 struct device_attribute *a,
72 const char *buf, size_t count)
73{
74 struct spi_device *spi = to_spi_device(dev);
75 const char *end = memchr(buf, '\n', count);
76 const size_t len = end ? end - buf : count;
77 const char *driver_override, *old;
78
79
80 if (len >= (PAGE_SIZE - 1))
81 return -EINVAL;
82
83 driver_override = kstrndup(buf, len, GFP_KERNEL);
84 if (!driver_override)
85 return -ENOMEM;
86
87 device_lock(dev);
88 old = spi->driver_override;
89 if (len) {
90 spi->driver_override = driver_override;
91 } else {
92
93 spi->driver_override = NULL;
94 kfree(driver_override);
95 }
96 device_unlock(dev);
97 kfree(old);
98
99 return count;
100}
101
102static ssize_t driver_override_show(struct device *dev,
103 struct device_attribute *a, char *buf)
104{
105 const struct spi_device *spi = to_spi_device(dev);
106 ssize_t len;
107
108 device_lock(dev);
109 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
110 device_unlock(dev);
111 return len;
112}
113static DEVICE_ATTR_RW(driver_override);
114
115#define SPI_STATISTICS_ATTRS(field, file) \
116static ssize_t spi_controller_##field##_show(struct device *dev, \
117 struct device_attribute *attr, \
118 char *buf) \
119{ \
120 struct spi_controller *ctlr = container_of(dev, \
121 struct spi_controller, dev); \
122 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
123} \
124static struct device_attribute dev_attr_spi_controller_##field = { \
125 .attr = { .name = file, .mode = 0444 }, \
126 .show = spi_controller_##field##_show, \
127}; \
128static ssize_t spi_device_##field##_show(struct device *dev, \
129 struct device_attribute *attr, \
130 char *buf) \
131{ \
132 struct spi_device *spi = to_spi_device(dev); \
133 return spi_statistics_##field##_show(&spi->statistics, buf); \
134} \
135static struct device_attribute dev_attr_spi_device_##field = { \
136 .attr = { .name = file, .mode = 0444 }, \
137 .show = spi_device_##field##_show, \
138}
139
140#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
141static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
142 char *buf) \
143{ \
144 unsigned long flags; \
145 ssize_t len; \
146 spin_lock_irqsave(&stat->lock, flags); \
147 len = sprintf(buf, format_string, stat->field); \
148 spin_unlock_irqrestore(&stat->lock, flags); \
149 return len; \
150} \
151SPI_STATISTICS_ATTRS(name, file)
152
153#define SPI_STATISTICS_SHOW(field, format_string) \
154 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
155 field, format_string)
156
157SPI_STATISTICS_SHOW(messages, "%lu");
158SPI_STATISTICS_SHOW(transfers, "%lu");
159SPI_STATISTICS_SHOW(errors, "%lu");
160SPI_STATISTICS_SHOW(timedout, "%lu");
161
162SPI_STATISTICS_SHOW(spi_sync, "%lu");
163SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
164SPI_STATISTICS_SHOW(spi_async, "%lu");
165
166SPI_STATISTICS_SHOW(bytes, "%llu");
167SPI_STATISTICS_SHOW(bytes_rx, "%llu");
168SPI_STATISTICS_SHOW(bytes_tx, "%llu");
169
170#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
171 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
172 "transfer_bytes_histo_" number, \
173 transfer_bytes_histo[index], "%lu")
174SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
175SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
176SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
177SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
178SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
179SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
180SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
181SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
182SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
183SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
184SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
185SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
186SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
187SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
188SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
189SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
190SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
191
192SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
193
194static struct attribute *spi_dev_attrs[] = {
195 &dev_attr_modalias.attr,
196 &dev_attr_driver_override.attr,
197 NULL,
198};
199
200static const struct attribute_group spi_dev_group = {
201 .attrs = spi_dev_attrs,
202};
203
204static struct attribute *spi_device_statistics_attrs[] = {
205 &dev_attr_spi_device_messages.attr,
206 &dev_attr_spi_device_transfers.attr,
207 &dev_attr_spi_device_errors.attr,
208 &dev_attr_spi_device_timedout.attr,
209 &dev_attr_spi_device_spi_sync.attr,
210 &dev_attr_spi_device_spi_sync_immediate.attr,
211 &dev_attr_spi_device_spi_async.attr,
212 &dev_attr_spi_device_bytes.attr,
213 &dev_attr_spi_device_bytes_rx.attr,
214 &dev_attr_spi_device_bytes_tx.attr,
215 &dev_attr_spi_device_transfer_bytes_histo0.attr,
216 &dev_attr_spi_device_transfer_bytes_histo1.attr,
217 &dev_attr_spi_device_transfer_bytes_histo2.attr,
218 &dev_attr_spi_device_transfer_bytes_histo3.attr,
219 &dev_attr_spi_device_transfer_bytes_histo4.attr,
220 &dev_attr_spi_device_transfer_bytes_histo5.attr,
221 &dev_attr_spi_device_transfer_bytes_histo6.attr,
222 &dev_attr_spi_device_transfer_bytes_histo7.attr,
223 &dev_attr_spi_device_transfer_bytes_histo8.attr,
224 &dev_attr_spi_device_transfer_bytes_histo9.attr,
225 &dev_attr_spi_device_transfer_bytes_histo10.attr,
226 &dev_attr_spi_device_transfer_bytes_histo11.attr,
227 &dev_attr_spi_device_transfer_bytes_histo12.attr,
228 &dev_attr_spi_device_transfer_bytes_histo13.attr,
229 &dev_attr_spi_device_transfer_bytes_histo14.attr,
230 &dev_attr_spi_device_transfer_bytes_histo15.attr,
231 &dev_attr_spi_device_transfer_bytes_histo16.attr,
232 &dev_attr_spi_device_transfers_split_maxsize.attr,
233 NULL,
234};
235
236static const struct attribute_group spi_device_statistics_group = {
237 .name = "statistics",
238 .attrs = spi_device_statistics_attrs,
239};
240
241static const struct attribute_group *spi_dev_groups[] = {
242 &spi_dev_group,
243 &spi_device_statistics_group,
244 NULL,
245};
246
247static struct attribute *spi_controller_statistics_attrs[] = {
248 &dev_attr_spi_controller_messages.attr,
249 &dev_attr_spi_controller_transfers.attr,
250 &dev_attr_spi_controller_errors.attr,
251 &dev_attr_spi_controller_timedout.attr,
252 &dev_attr_spi_controller_spi_sync.attr,
253 &dev_attr_spi_controller_spi_sync_immediate.attr,
254 &dev_attr_spi_controller_spi_async.attr,
255 &dev_attr_spi_controller_bytes.attr,
256 &dev_attr_spi_controller_bytes_rx.attr,
257 &dev_attr_spi_controller_bytes_tx.attr,
258 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
259 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
260 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
274 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
275 &dev_attr_spi_controller_transfers_split_maxsize.attr,
276 NULL,
277};
278
279static const struct attribute_group spi_controller_statistics_group = {
280 .name = "statistics",
281 .attrs = spi_controller_statistics_attrs,
282};
283
284static const struct attribute_group *spi_master_groups[] = {
285 &spi_controller_statistics_group,
286 NULL,
287};
288
289void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
290 struct spi_transfer *xfer,
291 struct spi_controller *ctlr)
292{
293 unsigned long flags;
294 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
295
296 if (l2len < 0)
297 l2len = 0;
298
299 spin_lock_irqsave(&stats->lock, flags);
300
301 stats->transfers++;
302 stats->transfer_bytes_histo[l2len]++;
303
304 stats->bytes += xfer->len;
305 if ((xfer->tx_buf) &&
306 (xfer->tx_buf != ctlr->dummy_tx))
307 stats->bytes_tx += xfer->len;
308 if ((xfer->rx_buf) &&
309 (xfer->rx_buf != ctlr->dummy_rx))
310 stats->bytes_rx += xfer->len;
311
312 spin_unlock_irqrestore(&stats->lock, flags);
313}
314EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
315
316
317
318
319
320static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
321 const struct spi_device *sdev)
322{
323 while (id->name[0]) {
324 if (!strcmp(sdev->modalias, id->name))
325 return id;
326 id++;
327 }
328 return NULL;
329}
330
331const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
332{
333 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
334
335 return spi_match_id(sdrv->id_table, sdev);
336}
337EXPORT_SYMBOL_GPL(spi_get_device_id);
338
339static int spi_match_device(struct device *dev, struct device_driver *drv)
340{
341 const struct spi_device *spi = to_spi_device(dev);
342 const struct spi_driver *sdrv = to_spi_driver(drv);
343
344
345 if (spi->driver_override)
346 return strcmp(spi->driver_override, drv->name) == 0;
347
348
349 if (of_driver_match_device(dev, drv))
350 return 1;
351
352
353 if (acpi_driver_match_device(dev, drv))
354 return 1;
355
356 if (sdrv->id_table)
357 return !!spi_match_id(sdrv->id_table, spi);
358
359 return strcmp(spi->modalias, drv->name) == 0;
360}
361
362static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
363{
364 const struct spi_device *spi = to_spi_device(dev);
365 int rc;
366
367 rc = acpi_device_uevent_modalias(dev, env);
368 if (rc != -ENODEV)
369 return rc;
370
371 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
372}
373
374struct bus_type spi_bus_type = {
375 .name = "spi",
376 .dev_groups = spi_dev_groups,
377 .match = spi_match_device,
378 .uevent = spi_uevent,
379};
380EXPORT_SYMBOL_GPL(spi_bus_type);
381
382
383static int spi_drv_probe(struct device *dev)
384{
385 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
386 struct spi_device *spi = to_spi_device(dev);
387 int ret;
388
389 ret = of_clk_set_defaults(dev->of_node, false);
390 if (ret)
391 return ret;
392
393 if (dev->of_node) {
394 spi->irq = of_irq_get(dev->of_node, 0);
395 if (spi->irq == -EPROBE_DEFER)
396 return -EPROBE_DEFER;
397 if (spi->irq < 0)
398 spi->irq = 0;
399 }
400
401 ret = dev_pm_domain_attach(dev, true);
402 if (ret)
403 return ret;
404
405 ret = sdrv->probe(spi);
406 if (ret)
407 dev_pm_domain_detach(dev, true);
408
409 return ret;
410}
411
412static int spi_drv_remove(struct device *dev)
413{
414 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
415 int ret;
416
417 ret = sdrv->remove(to_spi_device(dev));
418 dev_pm_domain_detach(dev, true);
419
420 return ret;
421}
422
423static void spi_drv_shutdown(struct device *dev)
424{
425 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
426
427 sdrv->shutdown(to_spi_device(dev));
428}
429
430
431
432
433
434
435
436
437
438int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
439{
440 sdrv->driver.owner = owner;
441 sdrv->driver.bus = &spi_bus_type;
442 if (sdrv->probe)
443 sdrv->driver.probe = spi_drv_probe;
444 if (sdrv->remove)
445 sdrv->driver.remove = spi_drv_remove;
446 if (sdrv->shutdown)
447 sdrv->driver.shutdown = spi_drv_shutdown;
448 return driver_register(&sdrv->driver);
449}
450EXPORT_SYMBOL_GPL(__spi_register_driver);
451
452
453
454
455
456
457
458
459
460struct boardinfo {
461 struct list_head list;
462 struct spi_board_info board_info;
463};
464
465static LIST_HEAD(board_list);
466static LIST_HEAD(spi_controller_list);
467
468
469
470
471
472
473static DEFINE_MUTEX(board_lock);
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
493{
494 struct spi_device *spi;
495
496 if (!spi_controller_get(ctlr))
497 return NULL;
498
499 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
500 if (!spi) {
501 spi_controller_put(ctlr);
502 return NULL;
503 }
504
505 spi->master = spi->controller = ctlr;
506 spi->dev.parent = &ctlr->dev;
507 spi->dev.bus = &spi_bus_type;
508 spi->dev.release = spidev_release;
509 spi->cs_gpio = -ENOENT;
510
511 spin_lock_init(&spi->statistics.lock);
512
513 device_initialize(&spi->dev);
514 return spi;
515}
516EXPORT_SYMBOL_GPL(spi_alloc_device);
517
518static void spi_dev_set_name(struct spi_device *spi)
519{
520 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
521
522 if (adev) {
523 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
524 return;
525 }
526
527 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
528 spi->chip_select);
529}
530
531static int spi_dev_check(struct device *dev, void *data)
532{
533 struct spi_device *spi = to_spi_device(dev);
534 struct spi_device *new_spi = data;
535
536 if (spi->controller == new_spi->controller &&
537 spi->chip_select == new_spi->chip_select)
538 return -EBUSY;
539 return 0;
540}
541
542
543
544
545
546
547
548
549
550
551int spi_add_device(struct spi_device *spi)
552{
553 static DEFINE_MUTEX(spi_add_lock);
554 struct spi_controller *ctlr = spi->controller;
555 struct device *dev = ctlr->dev.parent;
556 int status;
557
558
559 if (spi->chip_select >= ctlr->num_chipselect) {
560 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
561 ctlr->num_chipselect);
562 return -EINVAL;
563 }
564
565
566 spi_dev_set_name(spi);
567
568
569
570
571
572 mutex_lock(&spi_add_lock);
573
574 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
575 if (status) {
576 dev_err(dev, "chipselect %d already in use\n",
577 spi->chip_select);
578 goto done;
579 }
580
581 if (ctlr->cs_gpios)
582 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
583
584
585
586
587
588 status = spi_setup(spi);
589 if (status < 0) {
590 dev_err(dev, "can't setup %s, status %d\n",
591 dev_name(&spi->dev), status);
592 goto done;
593 }
594
595
596 status = device_add(&spi->dev);
597 if (status < 0)
598 dev_err(dev, "can't add %s, status %d\n",
599 dev_name(&spi->dev), status);
600 else
601 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
602
603done:
604 mutex_unlock(&spi_add_lock);
605 return status;
606}
607EXPORT_SYMBOL_GPL(spi_add_device);
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623struct spi_device *spi_new_device(struct spi_controller *ctlr,
624 struct spi_board_info *chip)
625{
626 struct spi_device *proxy;
627 int status;
628
629
630
631
632
633
634
635
636 proxy = spi_alloc_device(ctlr);
637 if (!proxy)
638 return NULL;
639
640 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
641
642 proxy->chip_select = chip->chip_select;
643 proxy->max_speed_hz = chip->max_speed_hz;
644 proxy->mode = chip->mode;
645 proxy->irq = chip->irq;
646 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
647 proxy->dev.platform_data = (void *) chip->platform_data;
648 proxy->controller_data = chip->controller_data;
649 proxy->controller_state = NULL;
650
651 if (chip->properties) {
652 status = device_add_properties(&proxy->dev, chip->properties);
653 if (status) {
654 dev_err(&ctlr->dev,
655 "failed to add properties to '%s': %d\n",
656 chip->modalias, status);
657 goto err_dev_put;
658 }
659 }
660
661 status = spi_add_device(proxy);
662 if (status < 0)
663 goto err_remove_props;
664
665 return proxy;
666
667err_remove_props:
668 if (chip->properties)
669 device_remove_properties(&proxy->dev);
670err_dev_put:
671 spi_dev_put(proxy);
672 return NULL;
673}
674EXPORT_SYMBOL_GPL(spi_new_device);
675
676
677
678
679
680
681
682
683void spi_unregister_device(struct spi_device *spi)
684{
685 if (!spi)
686 return;
687
688 if (spi->dev.of_node) {
689 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
690 of_node_put(spi->dev.of_node);
691 }
692 if (ACPI_COMPANION(&spi->dev))
693 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
694 device_unregister(&spi->dev);
695}
696EXPORT_SYMBOL_GPL(spi_unregister_device);
697
698static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
699 struct spi_board_info *bi)
700{
701 struct spi_device *dev;
702
703 if (ctlr->bus_num != bi->bus_num)
704 return;
705
706 dev = spi_new_device(ctlr, bi);
707 if (!dev)
708 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
709 bi->modalias);
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734int spi_register_board_info(struct spi_board_info const *info, unsigned n)
735{
736 struct boardinfo *bi;
737 int i;
738
739 if (!n)
740 return 0;
741
742 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
743 if (!bi)
744 return -ENOMEM;
745
746 for (i = 0; i < n; i++, bi++, info++) {
747 struct spi_controller *ctlr;
748
749 memcpy(&bi->board_info, info, sizeof(*info));
750 if (info->properties) {
751 bi->board_info.properties =
752 property_entries_dup(info->properties);
753 if (IS_ERR(bi->board_info.properties))
754 return PTR_ERR(bi->board_info.properties);
755 }
756
757 mutex_lock(&board_lock);
758 list_add_tail(&bi->list, &board_list);
759 list_for_each_entry(ctlr, &spi_controller_list, list)
760 spi_match_controller_to_boardinfo(ctlr,
761 &bi->board_info);
762 mutex_unlock(&board_lock);
763 }
764
765 return 0;
766}
767
768
769
770static void spi_set_cs(struct spi_device *spi, bool enable)
771{
772 if (spi->mode & SPI_CS_HIGH)
773 enable = !enable;
774
775 if (gpio_is_valid(spi->cs_gpio)) {
776
777 if (!(spi->mode & SPI_NO_CS))
778 gpio_set_value(spi->cs_gpio, !enable);
779
780 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
781 spi->controller->set_cs)
782 spi->controller->set_cs(spi, !enable);
783 } else if (spi->controller->set_cs) {
784 spi->controller->set_cs(spi, !enable);
785 }
786}
787
788#ifdef CONFIG_HAS_DMA
789int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
790 struct sg_table *sgt, void *buf, size_t len,
791 enum dma_data_direction dir)
792{
793 const bool vmalloced_buf = is_vmalloc_addr(buf);
794 unsigned int max_seg_size = dma_get_max_seg_size(dev);
795#ifdef CONFIG_HIGHMEM
796 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
797 (unsigned long)buf < (PKMAP_BASE +
798 (LAST_PKMAP * PAGE_SIZE)));
799#else
800 const bool kmap_buf = false;
801#endif
802 int desc_len;
803 int sgs;
804 struct page *vm_page;
805 struct scatterlist *sg;
806 void *sg_buf;
807 size_t min;
808 int i, ret;
809
810 if (vmalloced_buf || kmap_buf) {
811 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
812 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
813 } else if (virt_addr_valid(buf)) {
814 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
815 sgs = DIV_ROUND_UP(len, desc_len);
816 } else {
817 return -EINVAL;
818 }
819
820 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
821 if (ret != 0)
822 return ret;
823
824 sg = &sgt->sgl[0];
825 for (i = 0; i < sgs; i++) {
826
827 if (vmalloced_buf || kmap_buf) {
828
829
830
831
832
833 min = min_t(size_t, desc_len,
834 min_t(size_t, len,
835 PAGE_SIZE - offset_in_page(buf)));
836 if (vmalloced_buf)
837 vm_page = vmalloc_to_page(buf);
838 else
839 vm_page = kmap_to_page(buf);
840 if (!vm_page) {
841 sg_free_table(sgt);
842 return -ENOMEM;
843 }
844 sg_set_page(sg, vm_page,
845 min, offset_in_page(buf));
846 } else {
847 min = min_t(size_t, len, desc_len);
848 sg_buf = buf;
849 sg_set_buf(sg, sg_buf, min);
850 }
851
852 buf += min;
853 len -= min;
854 sg = sg_next(sg);
855 }
856
857 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
858 if (!ret)
859 ret = -ENOMEM;
860 if (ret < 0) {
861 sg_free_table(sgt);
862 return ret;
863 }
864
865 sgt->nents = ret;
866
867 return 0;
868}
869
870void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
871 struct sg_table *sgt, enum dma_data_direction dir)
872{
873 if (sgt->orig_nents) {
874 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
875 sg_free_table(sgt);
876 }
877}
878
879static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
880{
881 struct device *tx_dev, *rx_dev;
882 struct spi_transfer *xfer;
883 int ret;
884
885 if (!ctlr->can_dma)
886 return 0;
887
888 if (ctlr->dma_tx)
889 tx_dev = ctlr->dma_tx->device->dev;
890 else
891 tx_dev = ctlr->dev.parent;
892
893 if (ctlr->dma_rx)
894 rx_dev = ctlr->dma_rx->device->dev;
895 else
896 rx_dev = ctlr->dev.parent;
897
898 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
899 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
900 continue;
901
902 if (xfer->tx_buf != NULL) {
903 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
904 (void *)xfer->tx_buf, xfer->len,
905 DMA_TO_DEVICE);
906 if (ret != 0)
907 return ret;
908 }
909
910 if (xfer->rx_buf != NULL) {
911 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
912 xfer->rx_buf, xfer->len,
913 DMA_FROM_DEVICE);
914 if (ret != 0) {
915 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
916 DMA_TO_DEVICE);
917 return ret;
918 }
919 }
920 }
921
922 ctlr->cur_msg_mapped = true;
923
924 return 0;
925}
926
927static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
928{
929 struct spi_transfer *xfer;
930 struct device *tx_dev, *rx_dev;
931
932 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
933 return 0;
934
935 if (ctlr->dma_tx)
936 tx_dev = ctlr->dma_tx->device->dev;
937 else
938 tx_dev = ctlr->dev.parent;
939
940 if (ctlr->dma_rx)
941 rx_dev = ctlr->dma_rx->device->dev;
942 else
943 rx_dev = ctlr->dev.parent;
944
945 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
946 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
947 continue;
948
949 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
950 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
951 }
952
953 return 0;
954}
955#else
956static inline int __spi_map_msg(struct spi_controller *ctlr,
957 struct spi_message *msg)
958{
959 return 0;
960}
961
962static inline int __spi_unmap_msg(struct spi_controller *ctlr,
963 struct spi_message *msg)
964{
965 return 0;
966}
967#endif
968
969static inline int spi_unmap_msg(struct spi_controller *ctlr,
970 struct spi_message *msg)
971{
972 struct spi_transfer *xfer;
973
974 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
975
976
977
978
979 if (xfer->tx_buf == ctlr->dummy_tx)
980 xfer->tx_buf = NULL;
981 if (xfer->rx_buf == ctlr->dummy_rx)
982 xfer->rx_buf = NULL;
983 }
984
985 return __spi_unmap_msg(ctlr, msg);
986}
987
988static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
989{
990 struct spi_transfer *xfer;
991 void *tmp;
992 unsigned int max_tx, max_rx;
993
994 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
995 max_tx = 0;
996 max_rx = 0;
997
998 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
999 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1000 !xfer->tx_buf)
1001 max_tx = max(xfer->len, max_tx);
1002 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1003 !xfer->rx_buf)
1004 max_rx = max(xfer->len, max_rx);
1005 }
1006
1007 if (max_tx) {
1008 tmp = krealloc(ctlr->dummy_tx, max_tx,
1009 GFP_KERNEL | GFP_DMA);
1010 if (!tmp)
1011 return -ENOMEM;
1012 ctlr->dummy_tx = tmp;
1013 memset(tmp, 0, max_tx);
1014 }
1015
1016 if (max_rx) {
1017 tmp = krealloc(ctlr->dummy_rx, max_rx,
1018 GFP_KERNEL | GFP_DMA);
1019 if (!tmp)
1020 return -ENOMEM;
1021 ctlr->dummy_rx = tmp;
1022 }
1023
1024 if (max_tx || max_rx) {
1025 list_for_each_entry(xfer, &msg->transfers,
1026 transfer_list) {
1027 if (!xfer->tx_buf)
1028 xfer->tx_buf = ctlr->dummy_tx;
1029 if (!xfer->rx_buf)
1030 xfer->rx_buf = ctlr->dummy_rx;
1031 }
1032 }
1033 }
1034
1035 return __spi_map_msg(ctlr, msg);
1036}
1037
1038static int spi_transfer_wait(struct spi_controller *ctlr,
1039 struct spi_message *msg,
1040 struct spi_transfer *xfer)
1041{
1042 struct spi_statistics *statm = &ctlr->statistics;
1043 struct spi_statistics *stats = &msg->spi->statistics;
1044 unsigned long long ms = 1;
1045
1046 if (spi_controller_is_slave(ctlr)) {
1047 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1048 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1049 return -EINTR;
1050 }
1051 } else {
1052 ms = 8LL * 1000LL * xfer->len;
1053 do_div(ms, xfer->speed_hz);
1054 ms += ms + 200;
1055
1056 if (ms > UINT_MAX)
1057 ms = UINT_MAX;
1058
1059 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1060 msecs_to_jiffies(ms));
1061
1062 if (ms == 0) {
1063 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1064 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1065 dev_err(&msg->spi->dev,
1066 "SPI transfer timed out\n");
1067 return -ETIMEDOUT;
1068 }
1069 }
1070
1071 return 0;
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081static int spi_transfer_one_message(struct spi_controller *ctlr,
1082 struct spi_message *msg)
1083{
1084 struct spi_transfer *xfer;
1085 bool keep_cs = false;
1086 int ret = 0;
1087 struct spi_statistics *statm = &ctlr->statistics;
1088 struct spi_statistics *stats = &msg->spi->statistics;
1089
1090 spi_set_cs(msg->spi, true);
1091
1092 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1093 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1094
1095 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1096 trace_spi_transfer_start(msg, xfer);
1097
1098 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1099 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1100
1101 if (xfer->tx_buf || xfer->rx_buf) {
1102 reinit_completion(&ctlr->xfer_completion);
1103
1104 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1105 if (ret < 0) {
1106 SPI_STATISTICS_INCREMENT_FIELD(statm,
1107 errors);
1108 SPI_STATISTICS_INCREMENT_FIELD(stats,
1109 errors);
1110 dev_err(&msg->spi->dev,
1111 "SPI transfer failed: %d\n", ret);
1112 goto out;
1113 }
1114
1115 if (ret > 0) {
1116 ret = spi_transfer_wait(ctlr, msg, xfer);
1117 if (ret < 0)
1118 msg->status = ret;
1119 }
1120 } else {
1121 if (xfer->len)
1122 dev_err(&msg->spi->dev,
1123 "Bufferless transfer has length %u\n",
1124 xfer->len);
1125 }
1126
1127 trace_spi_transfer_stop(msg, xfer);
1128
1129 if (msg->status != -EINPROGRESS)
1130 goto out;
1131
1132 if (xfer->delay_usecs) {
1133 u16 us = xfer->delay_usecs;
1134
1135 if (us <= 10)
1136 udelay(us);
1137 else
1138 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1139 }
1140
1141 if (xfer->cs_change) {
1142 if (list_is_last(&xfer->transfer_list,
1143 &msg->transfers)) {
1144 keep_cs = true;
1145 } else {
1146 spi_set_cs(msg->spi, false);
1147 udelay(10);
1148 spi_set_cs(msg->spi, true);
1149 }
1150 }
1151
1152 msg->actual_length += xfer->len;
1153 }
1154
1155out:
1156 if (ret != 0 || !keep_cs)
1157 spi_set_cs(msg->spi, false);
1158
1159 if (msg->status == -EINPROGRESS)
1160 msg->status = ret;
1161
1162 if (msg->status && ctlr->handle_err)
1163 ctlr->handle_err(ctlr, msg);
1164
1165 spi_res_release(ctlr, msg);
1166
1167 spi_finalize_current_message(ctlr);
1168
1169 return ret;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180void spi_finalize_current_transfer(struct spi_controller *ctlr)
1181{
1182 complete(&ctlr->xfer_completion);
1183}
1184EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1200{
1201 unsigned long flags;
1202 bool was_busy = false;
1203 int ret;
1204
1205
1206 spin_lock_irqsave(&ctlr->queue_lock, flags);
1207
1208
1209 if (ctlr->cur_msg) {
1210 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1211 return;
1212 }
1213
1214
1215 if (ctlr->idling) {
1216 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1217 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1218 return;
1219 }
1220
1221
1222 if (list_empty(&ctlr->queue) || !ctlr->running) {
1223 if (!ctlr->busy) {
1224 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1225 return;
1226 }
1227
1228
1229 if (!in_kthread) {
1230 kthread_queue_work(&ctlr->kworker,
1231 &ctlr->pump_messages);
1232 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1233 return;
1234 }
1235
1236 ctlr->busy = false;
1237 ctlr->idling = true;
1238 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1239
1240 kfree(ctlr->dummy_rx);
1241 ctlr->dummy_rx = NULL;
1242 kfree(ctlr->dummy_tx);
1243 ctlr->dummy_tx = NULL;
1244 if (ctlr->unprepare_transfer_hardware &&
1245 ctlr->unprepare_transfer_hardware(ctlr))
1246 dev_err(&ctlr->dev,
1247 "failed to unprepare transfer hardware\n");
1248 if (ctlr->auto_runtime_pm) {
1249 pm_runtime_mark_last_busy(ctlr->dev.parent);
1250 pm_runtime_put_autosuspend(ctlr->dev.parent);
1251 }
1252 trace_spi_controller_idle(ctlr);
1253
1254 spin_lock_irqsave(&ctlr->queue_lock, flags);
1255 ctlr->idling = false;
1256 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1257 return;
1258 }
1259
1260
1261 ctlr->cur_msg =
1262 list_first_entry(&ctlr->queue, struct spi_message, queue);
1263
1264 list_del_init(&ctlr->cur_msg->queue);
1265 if (ctlr->busy)
1266 was_busy = true;
1267 else
1268 ctlr->busy = true;
1269 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1270
1271 mutex_lock(&ctlr->io_mutex);
1272
1273 if (!was_busy && ctlr->auto_runtime_pm) {
1274 ret = pm_runtime_get_sync(ctlr->dev.parent);
1275 if (ret < 0) {
1276 pm_runtime_put_noidle(ctlr->dev.parent);
1277 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1278 ret);
1279 mutex_unlock(&ctlr->io_mutex);
1280 return;
1281 }
1282 }
1283
1284 if (!was_busy)
1285 trace_spi_controller_busy(ctlr);
1286
1287 if (!was_busy && ctlr->prepare_transfer_hardware) {
1288 ret = ctlr->prepare_transfer_hardware(ctlr);
1289 if (ret) {
1290 dev_err(&ctlr->dev,
1291 "failed to prepare transfer hardware\n");
1292
1293 if (ctlr->auto_runtime_pm)
1294 pm_runtime_put(ctlr->dev.parent);
1295 mutex_unlock(&ctlr->io_mutex);
1296 return;
1297 }
1298 }
1299
1300 trace_spi_message_start(ctlr->cur_msg);
1301
1302 if (ctlr->prepare_message) {
1303 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1304 if (ret) {
1305 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1306 ret);
1307 ctlr->cur_msg->status = ret;
1308 spi_finalize_current_message(ctlr);
1309 goto out;
1310 }
1311 ctlr->cur_msg_prepared = true;
1312 }
1313
1314 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1315 if (ret) {
1316 ctlr->cur_msg->status = ret;
1317 spi_finalize_current_message(ctlr);
1318 goto out;
1319 }
1320
1321 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1322 if (ret) {
1323 dev_err(&ctlr->dev,
1324 "failed to transfer one message from queue\n");
1325 goto out;
1326 }
1327
1328out:
1329 mutex_unlock(&ctlr->io_mutex);
1330
1331
1332 if (!ret)
1333 cond_resched();
1334}
1335
1336
1337
1338
1339
1340static void spi_pump_messages(struct kthread_work *work)
1341{
1342 struct spi_controller *ctlr =
1343 container_of(work, struct spi_controller, pump_messages);
1344
1345 __spi_pump_messages(ctlr, true);
1346}
1347
1348static int spi_init_queue(struct spi_controller *ctlr)
1349{
1350 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1351
1352 ctlr->running = false;
1353 ctlr->busy = false;
1354
1355 kthread_init_worker(&ctlr->kworker);
1356 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1357 "%s", dev_name(&ctlr->dev));
1358 if (IS_ERR(ctlr->kworker_task)) {
1359 dev_err(&ctlr->dev, "failed to create message pump task\n");
1360 return PTR_ERR(ctlr->kworker_task);
1361 }
1362 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1363
1364
1365
1366
1367
1368
1369
1370
1371 if (ctlr->rt) {
1372 dev_info(&ctlr->dev,
1373 "will run message pump with realtime priority\n");
1374 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1375 }
1376
1377 return 0;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1391{
1392 struct spi_message *next;
1393 unsigned long flags;
1394
1395
1396 spin_lock_irqsave(&ctlr->queue_lock, flags);
1397 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1398 queue);
1399 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1400
1401 return next;
1402}
1403EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1404
1405
1406
1407
1408
1409
1410
1411
1412void spi_finalize_current_message(struct spi_controller *ctlr)
1413{
1414 struct spi_message *mesg;
1415 unsigned long flags;
1416 int ret;
1417
1418 spin_lock_irqsave(&ctlr->queue_lock, flags);
1419 mesg = ctlr->cur_msg;
1420 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1421
1422 spi_unmap_msg(ctlr, mesg);
1423
1424 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1425 ret = ctlr->unprepare_message(ctlr, mesg);
1426 if (ret) {
1427 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1428 ret);
1429 }
1430 }
1431
1432 spin_lock_irqsave(&ctlr->queue_lock, flags);
1433 ctlr->cur_msg = NULL;
1434 ctlr->cur_msg_prepared = false;
1435 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1436 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1437
1438 trace_spi_message_done(mesg);
1439
1440 mesg->state = NULL;
1441 if (mesg->complete)
1442 mesg->complete(mesg->context);
1443}
1444EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1445
1446static int spi_start_queue(struct spi_controller *ctlr)
1447{
1448 unsigned long flags;
1449
1450 spin_lock_irqsave(&ctlr->queue_lock, flags);
1451
1452 if (ctlr->running || ctlr->busy) {
1453 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1454 return -EBUSY;
1455 }
1456
1457 ctlr->running = true;
1458 ctlr->cur_msg = NULL;
1459 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1460
1461 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1462
1463 return 0;
1464}
1465
1466static int spi_stop_queue(struct spi_controller *ctlr)
1467{
1468 unsigned long flags;
1469 unsigned limit = 500;
1470 int ret = 0;
1471
1472 spin_lock_irqsave(&ctlr->queue_lock, flags);
1473
1474
1475
1476
1477
1478
1479
1480 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1481 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1482 usleep_range(10000, 11000);
1483 spin_lock_irqsave(&ctlr->queue_lock, flags);
1484 }
1485
1486 if (!list_empty(&ctlr->queue) || ctlr->busy)
1487 ret = -EBUSY;
1488 else
1489 ctlr->running = false;
1490
1491 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1492
1493 if (ret) {
1494 dev_warn(&ctlr->dev, "could not stop message queue\n");
1495 return ret;
1496 }
1497 return ret;
1498}
1499
1500static int spi_destroy_queue(struct spi_controller *ctlr)
1501{
1502 int ret;
1503
1504 ret = spi_stop_queue(ctlr);
1505
1506
1507
1508
1509
1510
1511
1512 if (ret) {
1513 dev_err(&ctlr->dev, "problem destroying queue\n");
1514 return ret;
1515 }
1516
1517 kthread_flush_worker(&ctlr->kworker);
1518 kthread_stop(ctlr->kworker_task);
1519
1520 return 0;
1521}
1522
1523static int __spi_queued_transfer(struct spi_device *spi,
1524 struct spi_message *msg,
1525 bool need_pump)
1526{
1527 struct spi_controller *ctlr = spi->controller;
1528 unsigned long flags;
1529
1530 spin_lock_irqsave(&ctlr->queue_lock, flags);
1531
1532 if (!ctlr->running) {
1533 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1534 return -ESHUTDOWN;
1535 }
1536 msg->actual_length = 0;
1537 msg->status = -EINPROGRESS;
1538
1539 list_add_tail(&msg->queue, &ctlr->queue);
1540 if (!ctlr->busy && need_pump)
1541 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1542
1543 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1544 return 0;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1555{
1556 return __spi_queued_transfer(spi, msg, true);
1557}
1558
1559static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1560{
1561 int ret;
1562
1563 ctlr->transfer = spi_queued_transfer;
1564 if (!ctlr->transfer_one_message)
1565 ctlr->transfer_one_message = spi_transfer_one_message;
1566
1567
1568 ret = spi_init_queue(ctlr);
1569 if (ret) {
1570 dev_err(&ctlr->dev, "problem initializing queue\n");
1571 goto err_init_queue;
1572 }
1573 ctlr->queued = true;
1574 ret = spi_start_queue(ctlr);
1575 if (ret) {
1576 dev_err(&ctlr->dev, "problem starting queue\n");
1577 goto err_start_queue;
1578 }
1579
1580 return 0;
1581
1582err_start_queue:
1583 spi_destroy_queue(ctlr);
1584err_init_queue:
1585 return ret;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598void spi_flush_queue(struct spi_controller *ctlr)
1599{
1600 if (ctlr->transfer == spi_queued_transfer)
1601 __spi_pump_messages(ctlr, false);
1602}
1603
1604
1605
1606#if defined(CONFIG_OF)
1607static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1608 struct device_node *nc)
1609{
1610 u32 value;
1611 int rc;
1612
1613
1614 if (of_property_read_bool(nc, "spi-cpha"))
1615 spi->mode |= SPI_CPHA;
1616 if (of_property_read_bool(nc, "spi-cpol"))
1617 spi->mode |= SPI_CPOL;
1618 if (of_property_read_bool(nc, "spi-cs-high"))
1619 spi->mode |= SPI_CS_HIGH;
1620 if (of_property_read_bool(nc, "spi-3wire"))
1621 spi->mode |= SPI_3WIRE;
1622 if (of_property_read_bool(nc, "spi-lsb-first"))
1623 spi->mode |= SPI_LSB_FIRST;
1624
1625
1626 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1627 switch (value) {
1628 case 1:
1629 break;
1630 case 2:
1631 spi->mode |= SPI_TX_DUAL;
1632 break;
1633 case 4:
1634 spi->mode |= SPI_TX_QUAD;
1635 break;
1636 case 8:
1637 spi->mode |= SPI_TX_OCTAL;
1638 break;
1639 default:
1640 dev_warn(&ctlr->dev,
1641 "spi-tx-bus-width %d not supported\n",
1642 value);
1643 break;
1644 }
1645 }
1646
1647 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1648 switch (value) {
1649 case 1:
1650 break;
1651 case 2:
1652 spi->mode |= SPI_RX_DUAL;
1653 break;
1654 case 4:
1655 spi->mode |= SPI_RX_QUAD;
1656 break;
1657 case 8:
1658 spi->mode |= SPI_RX_OCTAL;
1659 break;
1660 default:
1661 dev_warn(&ctlr->dev,
1662 "spi-rx-bus-width %d not supported\n",
1663 value);
1664 break;
1665 }
1666 }
1667
1668 if (spi_controller_is_slave(ctlr)) {
1669 if (!of_node_name_eq(nc, "slave")) {
1670 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1671 nc);
1672 return -EINVAL;
1673 }
1674 return 0;
1675 }
1676
1677
1678 rc = of_property_read_u32(nc, "reg", &value);
1679 if (rc) {
1680 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1681 nc, rc);
1682 return rc;
1683 }
1684 spi->chip_select = value;
1685
1686
1687 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1688 if (rc) {
1689 dev_err(&ctlr->dev,
1690 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1691 return rc;
1692 }
1693 spi->max_speed_hz = value;
1694
1695 return 0;
1696}
1697
1698static struct spi_device *
1699of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1700{
1701 struct spi_device *spi;
1702 int rc;
1703
1704
1705 spi = spi_alloc_device(ctlr);
1706 if (!spi) {
1707 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1708 rc = -ENOMEM;
1709 goto err_out;
1710 }
1711
1712
1713 rc = of_modalias_node(nc, spi->modalias,
1714 sizeof(spi->modalias));
1715 if (rc < 0) {
1716 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1717 goto err_out;
1718 }
1719
1720 rc = of_spi_parse_dt(ctlr, spi, nc);
1721 if (rc)
1722 goto err_out;
1723
1724
1725 of_node_get(nc);
1726 spi->dev.of_node = nc;
1727
1728
1729 rc = spi_add_device(spi);
1730 if (rc) {
1731 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1732 goto err_of_node_put;
1733 }
1734
1735 return spi;
1736
1737err_of_node_put:
1738 of_node_put(nc);
1739err_out:
1740 spi_dev_put(spi);
1741 return ERR_PTR(rc);
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751static void of_register_spi_devices(struct spi_controller *ctlr)
1752{
1753 struct spi_device *spi;
1754 struct device_node *nc;
1755
1756 if (!ctlr->dev.of_node)
1757 return;
1758
1759 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1760 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1761 continue;
1762 spi = of_register_spi_device(ctlr, nc);
1763 if (IS_ERR(spi)) {
1764 dev_warn(&ctlr->dev,
1765 "Failed to create SPI device for %pOF\n", nc);
1766 of_node_clear_flag(nc, OF_POPULATED);
1767 }
1768 }
1769}
1770#else
1771static void of_register_spi_devices(struct spi_controller *ctlr) { }
1772#endif
1773
1774#ifdef CONFIG_ACPI
1775static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1776{
1777 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1778 const union acpi_object *obj;
1779
1780 if (!x86_apple_machine)
1781 return;
1782
1783 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1784 && obj->buffer.length >= 4)
1785 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1786
1787 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1788 && obj->buffer.length == 8)
1789 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1790
1791 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1792 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1793 spi->mode |= SPI_LSB_FIRST;
1794
1795 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1796 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1797 spi->mode |= SPI_CPOL;
1798
1799 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1800 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1801 spi->mode |= SPI_CPHA;
1802}
1803
1804static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1805{
1806 struct spi_device *spi = data;
1807 struct spi_controller *ctlr = spi->controller;
1808
1809 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1810 struct acpi_resource_spi_serialbus *sb;
1811
1812 sb = &ares->data.spi_serial_bus;
1813 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1814
1815
1816
1817
1818
1819
1820
1821 if (ctlr->fw_translate_cs) {
1822 int cs = ctlr->fw_translate_cs(ctlr,
1823 sb->device_selection);
1824 if (cs < 0)
1825 return cs;
1826 spi->chip_select = cs;
1827 } else {
1828 spi->chip_select = sb->device_selection;
1829 }
1830
1831 spi->max_speed_hz = sb->connection_speed;
1832
1833 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1834 spi->mode |= SPI_CPHA;
1835 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1836 spi->mode |= SPI_CPOL;
1837 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1838 spi->mode |= SPI_CS_HIGH;
1839 }
1840 } else if (spi->irq < 0) {
1841 struct resource r;
1842
1843 if (acpi_dev_resource_interrupt(ares, 0, &r))
1844 spi->irq = r.start;
1845 }
1846
1847
1848 return 1;
1849}
1850
1851static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1852 struct acpi_device *adev)
1853{
1854 struct list_head resource_list;
1855 struct spi_device *spi;
1856 int ret;
1857
1858 if (acpi_bus_get_status(adev) || !adev->status.present ||
1859 acpi_device_enumerated(adev))
1860 return AE_OK;
1861
1862 spi = spi_alloc_device(ctlr);
1863 if (!spi) {
1864 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1865 dev_name(&adev->dev));
1866 return AE_NO_MEMORY;
1867 }
1868
1869 ACPI_COMPANION_SET(&spi->dev, adev);
1870 spi->irq = -1;
1871
1872 INIT_LIST_HEAD(&resource_list);
1873 ret = acpi_dev_get_resources(adev, &resource_list,
1874 acpi_spi_add_resource, spi);
1875 acpi_dev_free_resource_list(&resource_list);
1876
1877 acpi_spi_parse_apple_properties(spi);
1878
1879 if (ret < 0 || !spi->max_speed_hz) {
1880 spi_dev_put(spi);
1881 return AE_OK;
1882 }
1883
1884 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1885 sizeof(spi->modalias));
1886
1887 if (spi->irq < 0)
1888 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1889
1890 acpi_device_set_enumerated(adev);
1891
1892 adev->power.flags.ignore_parent = true;
1893 if (spi_add_device(spi)) {
1894 adev->power.flags.ignore_parent = false;
1895 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1896 dev_name(&adev->dev));
1897 spi_dev_put(spi);
1898 }
1899
1900 return AE_OK;
1901}
1902
1903static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1904 void *data, void **return_value)
1905{
1906 struct spi_controller *ctlr = data;
1907 struct acpi_device *adev;
1908
1909 if (acpi_bus_get_device(handle, &adev))
1910 return AE_OK;
1911
1912 return acpi_register_spi_device(ctlr, adev);
1913}
1914
1915static void acpi_register_spi_devices(struct spi_controller *ctlr)
1916{
1917 acpi_status status;
1918 acpi_handle handle;
1919
1920 handle = ACPI_HANDLE(ctlr->dev.parent);
1921 if (!handle)
1922 return;
1923
1924 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1925 acpi_spi_add_device, NULL, ctlr, NULL);
1926 if (ACPI_FAILURE(status))
1927 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1928}
1929#else
1930static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1931#endif
1932
1933static void spi_controller_release(struct device *dev)
1934{
1935 struct spi_controller *ctlr;
1936
1937 ctlr = container_of(dev, struct spi_controller, dev);
1938 kfree(ctlr);
1939}
1940
1941static struct class spi_master_class = {
1942 .name = "spi_master",
1943 .owner = THIS_MODULE,
1944 .dev_release = spi_controller_release,
1945 .dev_groups = spi_master_groups,
1946};
1947
1948#ifdef CONFIG_SPI_SLAVE
1949
1950
1951
1952
1953
1954int spi_slave_abort(struct spi_device *spi)
1955{
1956 struct spi_controller *ctlr = spi->controller;
1957
1958 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1959 return ctlr->slave_abort(ctlr);
1960
1961 return -ENOTSUPP;
1962}
1963EXPORT_SYMBOL_GPL(spi_slave_abort);
1964
1965static int match_true(struct device *dev, void *data)
1966{
1967 return 1;
1968}
1969
1970static ssize_t spi_slave_show(struct device *dev,
1971 struct device_attribute *attr, char *buf)
1972{
1973 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1974 dev);
1975 struct device *child;
1976
1977 child = device_find_child(&ctlr->dev, NULL, match_true);
1978 return sprintf(buf, "%s\n",
1979 child ? to_spi_device(child)->modalias : NULL);
1980}
1981
1982static ssize_t spi_slave_store(struct device *dev,
1983 struct device_attribute *attr, const char *buf,
1984 size_t count)
1985{
1986 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1987 dev);
1988 struct spi_device *spi;
1989 struct device *child;
1990 char name[32];
1991 int rc;
1992
1993 rc = sscanf(buf, "%31s", name);
1994 if (rc != 1 || !name[0])
1995 return -EINVAL;
1996
1997 child = device_find_child(&ctlr->dev, NULL, match_true);
1998 if (child) {
1999
2000 device_unregister(child);
2001 put_device(child);
2002 }
2003
2004 if (strcmp(name, "(null)")) {
2005
2006 spi = spi_alloc_device(ctlr);
2007 if (!spi)
2008 return -ENOMEM;
2009
2010 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2011
2012 rc = spi_add_device(spi);
2013 if (rc) {
2014 spi_dev_put(spi);
2015 return rc;
2016 }
2017 }
2018
2019 return count;
2020}
2021
2022static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2023
2024static struct attribute *spi_slave_attrs[] = {
2025 &dev_attr_slave.attr,
2026 NULL,
2027};
2028
2029static const struct attribute_group spi_slave_group = {
2030 .attrs = spi_slave_attrs,
2031};
2032
2033static const struct attribute_group *spi_slave_groups[] = {
2034 &spi_controller_statistics_group,
2035 &spi_slave_group,
2036 NULL,
2037};
2038
2039static struct class spi_slave_class = {
2040 .name = "spi_slave",
2041 .owner = THIS_MODULE,
2042 .dev_release = spi_controller_release,
2043 .dev_groups = spi_slave_groups,
2044};
2045#else
2046extern struct class spi_slave_class;
2047#endif
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072struct spi_controller *__spi_alloc_controller(struct device *dev,
2073 unsigned int size, bool slave)
2074{
2075 struct spi_controller *ctlr;
2076
2077 if (!dev)
2078 return NULL;
2079
2080 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2081 if (!ctlr)
2082 return NULL;
2083
2084 device_initialize(&ctlr->dev);
2085 ctlr->bus_num = -1;
2086 ctlr->num_chipselect = 1;
2087 ctlr->slave = slave;
2088 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2089 ctlr->dev.class = &spi_slave_class;
2090 else
2091 ctlr->dev.class = &spi_master_class;
2092 ctlr->dev.parent = dev;
2093 pm_suspend_ignore_children(&ctlr->dev, true);
2094 spi_controller_set_devdata(ctlr, &ctlr[1]);
2095
2096 return ctlr;
2097}
2098EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2099
2100#ifdef CONFIG_OF
2101static int of_spi_register_master(struct spi_controller *ctlr)
2102{
2103 int nb, i, *cs;
2104 struct device_node *np = ctlr->dev.of_node;
2105
2106 if (!np)
2107 return 0;
2108
2109 nb = of_gpio_named_count(np, "cs-gpios");
2110 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2111
2112
2113 if (nb == 0 || nb == -ENOENT)
2114 return 0;
2115 else if (nb < 0)
2116 return nb;
2117
2118 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2119 GFP_KERNEL);
2120 ctlr->cs_gpios = cs;
2121
2122 if (!ctlr->cs_gpios)
2123 return -ENOMEM;
2124
2125 for (i = 0; i < ctlr->num_chipselect; i++)
2126 cs[i] = -ENOENT;
2127
2128 for (i = 0; i < nb; i++)
2129 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2130
2131 return 0;
2132}
2133#else
2134static int of_spi_register_master(struct spi_controller *ctlr)
2135{
2136 return 0;
2137}
2138#endif
2139
2140static int spi_controller_check_ops(struct spi_controller *ctlr)
2141{
2142
2143
2144
2145
2146
2147
2148
2149 if (ctlr->mem_ops) {
2150 if (!ctlr->mem_ops->exec_op)
2151 return -EINVAL;
2152 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2153 !ctlr->transfer_one_message) {
2154 return -EINVAL;
2155 }
2156
2157 return 0;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183int spi_register_controller(struct spi_controller *ctlr)
2184{
2185 struct device *dev = ctlr->dev.parent;
2186 struct boardinfo *bi;
2187 int status = -ENODEV;
2188 int id, first_dynamic;
2189
2190 if (!dev)
2191 return -ENODEV;
2192
2193
2194
2195
2196
2197 status = spi_controller_check_ops(ctlr);
2198 if (status)
2199 return status;
2200
2201 if (!spi_controller_is_slave(ctlr)) {
2202 status = of_spi_register_master(ctlr);
2203 if (status)
2204 return status;
2205 }
2206
2207
2208
2209
2210 if (ctlr->num_chipselect == 0)
2211 return -EINVAL;
2212 if (ctlr->bus_num >= 0) {
2213
2214 mutex_lock(&board_lock);
2215 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2216 ctlr->bus_num + 1, GFP_KERNEL);
2217 mutex_unlock(&board_lock);
2218 if (WARN(id < 0, "couldn't get idr"))
2219 return id == -ENOSPC ? -EBUSY : id;
2220 ctlr->bus_num = id;
2221 } else if (ctlr->dev.of_node) {
2222
2223 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2224 if (id >= 0) {
2225 ctlr->bus_num = id;
2226 mutex_lock(&board_lock);
2227 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2228 ctlr->bus_num + 1, GFP_KERNEL);
2229 mutex_unlock(&board_lock);
2230 if (WARN(id < 0, "couldn't get idr"))
2231 return id == -ENOSPC ? -EBUSY : id;
2232 }
2233 }
2234 if (ctlr->bus_num < 0) {
2235 first_dynamic = of_alias_get_highest_id("spi");
2236 if (first_dynamic < 0)
2237 first_dynamic = 0;
2238 else
2239 first_dynamic++;
2240
2241 mutex_lock(&board_lock);
2242 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2243 0, GFP_KERNEL);
2244 mutex_unlock(&board_lock);
2245 if (WARN(id < 0, "couldn't get idr"))
2246 return id;
2247 ctlr->bus_num = id;
2248 }
2249 INIT_LIST_HEAD(&ctlr->queue);
2250 spin_lock_init(&ctlr->queue_lock);
2251 spin_lock_init(&ctlr->bus_lock_spinlock);
2252 mutex_init(&ctlr->bus_lock_mutex);
2253 mutex_init(&ctlr->io_mutex);
2254 ctlr->bus_lock_flag = 0;
2255 init_completion(&ctlr->xfer_completion);
2256 if (!ctlr->max_dma_len)
2257 ctlr->max_dma_len = INT_MAX;
2258
2259
2260
2261
2262 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2263 status = device_add(&ctlr->dev);
2264 if (status < 0) {
2265
2266 mutex_lock(&board_lock);
2267 idr_remove(&spi_master_idr, ctlr->bus_num);
2268 mutex_unlock(&board_lock);
2269 goto done;
2270 }
2271 dev_dbg(dev, "registered %s %s\n",
2272 spi_controller_is_slave(ctlr) ? "slave" : "master",
2273 dev_name(&ctlr->dev));
2274
2275
2276
2277
2278
2279
2280 if (ctlr->transfer) {
2281 dev_info(dev, "controller is unqueued, this is deprecated\n");
2282 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2283 status = spi_controller_initialize_queue(ctlr);
2284 if (status) {
2285 device_del(&ctlr->dev);
2286
2287 mutex_lock(&board_lock);
2288 idr_remove(&spi_master_idr, ctlr->bus_num);
2289 mutex_unlock(&board_lock);
2290 goto done;
2291 }
2292 }
2293
2294 spin_lock_init(&ctlr->statistics.lock);
2295
2296 mutex_lock(&board_lock);
2297 list_add_tail(&ctlr->list, &spi_controller_list);
2298 list_for_each_entry(bi, &board_list, list)
2299 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2300 mutex_unlock(&board_lock);
2301
2302
2303 of_register_spi_devices(ctlr);
2304 acpi_register_spi_devices(ctlr);
2305done:
2306 return status;
2307}
2308EXPORT_SYMBOL_GPL(spi_register_controller);
2309
2310static void devm_spi_unregister(struct device *dev, void *res)
2311{
2312 spi_unregister_controller(*(struct spi_controller **)res);
2313}
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328int devm_spi_register_controller(struct device *dev,
2329 struct spi_controller *ctlr)
2330{
2331 struct spi_controller **ptr;
2332 int ret;
2333
2334 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2335 if (!ptr)
2336 return -ENOMEM;
2337
2338 ret = spi_register_controller(ctlr);
2339 if (!ret) {
2340 *ptr = ctlr;
2341 devres_add(dev, ptr);
2342 } else {
2343 devres_free(ptr);
2344 }
2345
2346 return ret;
2347}
2348EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2349
2350static int __unregister(struct device *dev, void *null)
2351{
2352 spi_unregister_device(to_spi_device(dev));
2353 return 0;
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368void spi_unregister_controller(struct spi_controller *ctlr)
2369{
2370 struct spi_controller *found;
2371 int id = ctlr->bus_num;
2372 int dummy;
2373
2374
2375 mutex_lock(&board_lock);
2376 found = idr_find(&spi_master_idr, id);
2377 mutex_unlock(&board_lock);
2378 if (ctlr->queued) {
2379 if (spi_destroy_queue(ctlr))
2380 dev_err(&ctlr->dev, "queue remove failed\n");
2381 }
2382 mutex_lock(&board_lock);
2383 list_del(&ctlr->list);
2384 mutex_unlock(&board_lock);
2385
2386 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2387 device_unregister(&ctlr->dev);
2388
2389 mutex_lock(&board_lock);
2390 if (found == ctlr)
2391 idr_remove(&spi_master_idr, id);
2392 mutex_unlock(&board_lock);
2393}
2394EXPORT_SYMBOL_GPL(spi_unregister_controller);
2395
2396int spi_controller_suspend(struct spi_controller *ctlr)
2397{
2398 int ret;
2399
2400
2401 if (!ctlr->queued)
2402 return 0;
2403
2404 ret = spi_stop_queue(ctlr);
2405 if (ret)
2406 dev_err(&ctlr->dev, "queue stop failed\n");
2407
2408 return ret;
2409}
2410EXPORT_SYMBOL_GPL(spi_controller_suspend);
2411
2412int spi_controller_resume(struct spi_controller *ctlr)
2413{
2414 int ret;
2415
2416 if (!ctlr->queued)
2417 return 0;
2418
2419 ret = spi_start_queue(ctlr);
2420 if (ret)
2421 dev_err(&ctlr->dev, "queue restart failed\n");
2422
2423 return ret;
2424}
2425EXPORT_SYMBOL_GPL(spi_controller_resume);
2426
2427static int __spi_controller_match(struct device *dev, const void *data)
2428{
2429 struct spi_controller *ctlr;
2430 const u16 *bus_num = data;
2431
2432 ctlr = container_of(dev, struct spi_controller, dev);
2433 return ctlr->bus_num == *bus_num;
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448struct spi_controller *spi_busnum_to_master(u16 bus_num)
2449{
2450 struct device *dev;
2451 struct spi_controller *ctlr = NULL;
2452
2453 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2454 __spi_controller_match);
2455 if (dev)
2456 ctlr = container_of(dev, struct spi_controller, dev);
2457
2458 return ctlr;
2459}
2460EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480void *spi_res_alloc(struct spi_device *spi,
2481 spi_res_release_t release,
2482 size_t size, gfp_t gfp)
2483{
2484 struct spi_res *sres;
2485
2486 sres = kzalloc(sizeof(*sres) + size, gfp);
2487 if (!sres)
2488 return NULL;
2489
2490 INIT_LIST_HEAD(&sres->entry);
2491 sres->release = release;
2492
2493 return sres->data;
2494}
2495EXPORT_SYMBOL_GPL(spi_res_alloc);
2496
2497
2498
2499
2500
2501
2502void spi_res_free(void *res)
2503{
2504 struct spi_res *sres = container_of(res, struct spi_res, data);
2505
2506 if (!res)
2507 return;
2508
2509 WARN_ON(!list_empty(&sres->entry));
2510 kfree(sres);
2511}
2512EXPORT_SYMBOL_GPL(spi_res_free);
2513
2514
2515
2516
2517
2518
2519void spi_res_add(struct spi_message *message, void *res)
2520{
2521 struct spi_res *sres = container_of(res, struct spi_res, data);
2522
2523 WARN_ON(!list_empty(&sres->entry));
2524 list_add_tail(&sres->entry, &message->resources);
2525}
2526EXPORT_SYMBOL_GPL(spi_res_add);
2527
2528
2529
2530
2531
2532
2533void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2534{
2535 struct spi_res *res;
2536
2537 while (!list_empty(&message->resources)) {
2538 res = list_last_entry(&message->resources,
2539 struct spi_res, entry);
2540
2541 if (res->release)
2542 res->release(ctlr, message, res->data);
2543
2544 list_del(&res->entry);
2545
2546 kfree(res);
2547 }
2548}
2549EXPORT_SYMBOL_GPL(spi_res_release);
2550
2551
2552
2553
2554
2555static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2556 struct spi_message *msg,
2557 void *res)
2558{
2559 struct spi_replaced_transfers *rxfer = res;
2560 size_t i;
2561
2562
2563 if (rxfer->release)
2564 rxfer->release(ctlr, msg, res);
2565
2566
2567 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2568
2569
2570 for (i = 0; i < rxfer->inserted; i++)
2571 list_del(&rxfer->inserted_transfers[i].transfer_list);
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589struct spi_replaced_transfers *spi_replace_transfers(
2590 struct spi_message *msg,
2591 struct spi_transfer *xfer_first,
2592 size_t remove,
2593 size_t insert,
2594 spi_replaced_release_t release,
2595 size_t extradatasize,
2596 gfp_t gfp)
2597{
2598 struct spi_replaced_transfers *rxfer;
2599 struct spi_transfer *xfer;
2600 size_t i;
2601
2602
2603 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2604 insert * sizeof(struct spi_transfer)
2605 + sizeof(struct spi_replaced_transfers)
2606 + extradatasize,
2607 gfp);
2608 if (!rxfer)
2609 return ERR_PTR(-ENOMEM);
2610
2611
2612 rxfer->release = release;
2613
2614
2615 if (extradatasize)
2616 rxfer->extradata =
2617 &rxfer->inserted_transfers[insert];
2618
2619
2620 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2621
2622
2623
2624
2625 rxfer->replaced_after = xfer_first->transfer_list.prev;
2626
2627
2628 for (i = 0; i < remove; i++) {
2629
2630
2631
2632
2633 if (rxfer->replaced_after->next == &msg->transfers) {
2634 dev_err(&msg->spi->dev,
2635 "requested to remove more spi_transfers than are available\n");
2636
2637 list_splice(&rxfer->replaced_transfers,
2638 rxfer->replaced_after);
2639
2640
2641 spi_res_free(rxfer);
2642
2643
2644 return ERR_PTR(-EINVAL);
2645 }
2646
2647
2648
2649
2650 list_move_tail(rxfer->replaced_after->next,
2651 &rxfer->replaced_transfers);
2652 }
2653
2654
2655
2656
2657 for (i = 0; i < insert; i++) {
2658
2659 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2660
2661
2662 memcpy(xfer, xfer_first, sizeof(*xfer));
2663
2664
2665 list_add(&xfer->transfer_list, rxfer->replaced_after);
2666
2667
2668 if (i) {
2669 xfer->cs_change = false;
2670 xfer->delay_usecs = 0;
2671 }
2672 }
2673
2674
2675 rxfer->inserted = insert;
2676
2677
2678 spi_res_add(msg, rxfer);
2679
2680 return rxfer;
2681}
2682EXPORT_SYMBOL_GPL(spi_replace_transfers);
2683
2684static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2685 struct spi_message *msg,
2686 struct spi_transfer **xferp,
2687 size_t maxsize,
2688 gfp_t gfp)
2689{
2690 struct spi_transfer *xfer = *xferp, *xfers;
2691 struct spi_replaced_transfers *srt;
2692 size_t offset;
2693 size_t count, i;
2694
2695
2696 dev_warn_once(&msg->spi->dev,
2697 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2698 xfer->len, maxsize);
2699
2700
2701 count = DIV_ROUND_UP(xfer->len, maxsize);
2702
2703
2704 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2705 if (IS_ERR(srt))
2706 return PTR_ERR(srt);
2707 xfers = srt->inserted_transfers;
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2723
2724
2725 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2726
2727 if (xfers[i].rx_buf)
2728 xfers[i].rx_buf += offset;
2729 if (xfers[i].rx_dma)
2730 xfers[i].rx_dma += offset;
2731 if (xfers[i].tx_buf)
2732 xfers[i].tx_buf += offset;
2733 if (xfers[i].tx_dma)
2734 xfers[i].tx_dma += offset;
2735
2736
2737 xfers[i].len = min(maxsize, xfers[i].len - offset);
2738 }
2739
2740
2741
2742
2743 *xferp = &xfers[count - 1];
2744
2745
2746 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2747 transfers_split_maxsize);
2748 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2749 transfers_split_maxsize);
2750
2751 return 0;
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2766 struct spi_message *msg,
2767 size_t maxsize,
2768 gfp_t gfp)
2769{
2770 struct spi_transfer *xfer;
2771 int ret;
2772
2773
2774
2775
2776
2777
2778
2779 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2780 if (xfer->len > maxsize) {
2781 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2782 maxsize, gfp);
2783 if (ret)
2784 return ret;
2785 }
2786 }
2787
2788 return 0;
2789}
2790EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2791
2792
2793
2794
2795
2796
2797
2798static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2799 u8 bits_per_word)
2800{
2801 if (ctlr->bits_per_word_mask) {
2802
2803 if (bits_per_word > 32)
2804 return -EINVAL;
2805 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2806 return -EINVAL;
2807 }
2808
2809 return 0;
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832int spi_setup(struct spi_device *spi)
2833{
2834 unsigned bad_bits, ugly_bits;
2835 int status;
2836
2837
2838
2839 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2840 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2841 dev_err(&spi->dev,
2842 "setup: can not select dual and quad at the same time\n");
2843 return -EINVAL;
2844 }
2845
2846
2847 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2848 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2849 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
2850 return -EINVAL;
2851
2852
2853
2854
2855
2856 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
2857 ugly_bits = bad_bits &
2858 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2859 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
2860 if (ugly_bits) {
2861 dev_warn(&spi->dev,
2862 "setup: ignoring unsupported mode bits %x\n",
2863 ugly_bits);
2864 spi->mode &= ~ugly_bits;
2865 bad_bits &= ~ugly_bits;
2866 }
2867 if (bad_bits) {
2868 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2869 bad_bits);
2870 return -EINVAL;
2871 }
2872
2873 if (!spi->bits_per_word)
2874 spi->bits_per_word = 8;
2875
2876 status = __spi_validate_bits_per_word(spi->controller,
2877 spi->bits_per_word);
2878 if (status)
2879 return status;
2880
2881 if (!spi->max_speed_hz)
2882 spi->max_speed_hz = spi->controller->max_speed_hz;
2883
2884 if (spi->controller->setup)
2885 status = spi->controller->setup(spi);
2886
2887 spi_set_cs(spi, false);
2888
2889 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2890 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2891 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2892 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2893 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2894 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2895 spi->bits_per_word, spi->max_speed_hz,
2896 status);
2897
2898 return status;
2899}
2900EXPORT_SYMBOL_GPL(spi_setup);
2901
2902static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2903{
2904 struct spi_controller *ctlr = spi->controller;
2905 struct spi_transfer *xfer;
2906 int w_size;
2907
2908 if (list_empty(&message->transfers))
2909 return -EINVAL;
2910
2911
2912
2913
2914
2915
2916
2917 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
2918 gpio_is_valid(spi->cs_gpio))) {
2919 size_t maxsize;
2920 int ret;
2921
2922 maxsize = (spi->bits_per_word + 7) / 8;
2923
2924
2925 message->spi = spi;
2926
2927 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
2928 GFP_KERNEL);
2929 if (ret)
2930 return ret;
2931
2932 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2933
2934 if (list_is_last(&xfer->transfer_list, &message->transfers))
2935 break;
2936 xfer->cs_change = 1;
2937 }
2938 }
2939
2940
2941
2942
2943
2944
2945 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2946 (spi->mode & SPI_3WIRE)) {
2947 unsigned flags = ctlr->flags;
2948
2949 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2950 if (xfer->rx_buf && xfer->tx_buf)
2951 return -EINVAL;
2952 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2953 return -EINVAL;
2954 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2955 return -EINVAL;
2956 }
2957 }
2958
2959
2960
2961
2962
2963
2964
2965 message->frame_length = 0;
2966 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2967 message->frame_length += xfer->len;
2968 if (!xfer->bits_per_word)
2969 xfer->bits_per_word = spi->bits_per_word;
2970
2971 if (!xfer->speed_hz)
2972 xfer->speed_hz = spi->max_speed_hz;
2973 if (!xfer->speed_hz)
2974 xfer->speed_hz = ctlr->max_speed_hz;
2975
2976 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2977 xfer->speed_hz = ctlr->max_speed_hz;
2978
2979 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2980 return -EINVAL;
2981
2982
2983
2984
2985
2986 if (xfer->bits_per_word <= 8)
2987 w_size = 1;
2988 else if (xfer->bits_per_word <= 16)
2989 w_size = 2;
2990 else
2991 w_size = 4;
2992
2993
2994 if (xfer->len % w_size)
2995 return -EINVAL;
2996
2997 if (xfer->speed_hz && ctlr->min_speed_hz &&
2998 xfer->speed_hz < ctlr->min_speed_hz)
2999 return -EINVAL;
3000
3001 if (xfer->tx_buf && !xfer->tx_nbits)
3002 xfer->tx_nbits = SPI_NBITS_SINGLE;
3003 if (xfer->rx_buf && !xfer->rx_nbits)
3004 xfer->rx_nbits = SPI_NBITS_SINGLE;
3005
3006
3007
3008
3009 if (xfer->tx_buf) {
3010 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3011 xfer->tx_nbits != SPI_NBITS_DUAL &&
3012 xfer->tx_nbits != SPI_NBITS_QUAD)
3013 return -EINVAL;
3014 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3015 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3016 return -EINVAL;
3017 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3018 !(spi->mode & SPI_TX_QUAD))
3019 return -EINVAL;
3020 }
3021
3022 if (xfer->rx_buf) {
3023 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3024 xfer->rx_nbits != SPI_NBITS_DUAL &&
3025 xfer->rx_nbits != SPI_NBITS_QUAD)
3026 return -EINVAL;
3027 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3028 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3029 return -EINVAL;
3030 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3031 !(spi->mode & SPI_RX_QUAD))
3032 return -EINVAL;
3033 }
3034 }
3035
3036 message->status = -EINPROGRESS;
3037
3038 return 0;
3039}
3040
3041static int __spi_async(struct spi_device *spi, struct spi_message *message)
3042{
3043 struct spi_controller *ctlr = spi->controller;
3044
3045
3046
3047
3048
3049 if (!ctlr->transfer)
3050 return -ENOTSUPP;
3051
3052 message->spi = spi;
3053
3054 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3055 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3056
3057 trace_spi_message_submit(message);
3058
3059 return ctlr->transfer(spi, message);
3060}
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093int spi_async(struct spi_device *spi, struct spi_message *message)
3094{
3095 struct spi_controller *ctlr = spi->controller;
3096 int ret;
3097 unsigned long flags;
3098
3099 ret = __spi_validate(spi, message);
3100 if (ret != 0)
3101 return ret;
3102
3103 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3104
3105 if (ctlr->bus_lock_flag)
3106 ret = -EBUSY;
3107 else
3108 ret = __spi_async(spi, message);
3109
3110 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3111
3112 return ret;
3113}
3114EXPORT_SYMBOL_GPL(spi_async);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3148{
3149 struct spi_controller *ctlr = spi->controller;
3150 int ret;
3151 unsigned long flags;
3152
3153 ret = __spi_validate(spi, message);
3154 if (ret != 0)
3155 return ret;
3156
3157 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3158
3159 ret = __spi_async(spi, message);
3160
3161 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3162
3163 return ret;
3164
3165}
3166EXPORT_SYMBOL_GPL(spi_async_locked);
3167
3168
3169
3170
3171
3172
3173
3174
3175static void spi_complete(void *arg)
3176{
3177 complete(arg);
3178}
3179
3180static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3181{
3182 DECLARE_COMPLETION_ONSTACK(done);
3183 int status;
3184 struct spi_controller *ctlr = spi->controller;
3185 unsigned long flags;
3186
3187 status = __spi_validate(spi, message);
3188 if (status != 0)
3189 return status;
3190
3191 message->complete = spi_complete;
3192 message->context = &done;
3193 message->spi = spi;
3194
3195 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3196 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3197
3198
3199
3200
3201
3202
3203 if (ctlr->transfer == spi_queued_transfer) {
3204 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3205
3206 trace_spi_message_submit(message);
3207
3208 status = __spi_queued_transfer(spi, message, false);
3209
3210 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3211 } else {
3212 status = spi_async_locked(spi, message);
3213 }
3214
3215 if (status == 0) {
3216
3217
3218
3219 if (ctlr->transfer == spi_queued_transfer) {
3220 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3221 spi_sync_immediate);
3222 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3223 spi_sync_immediate);
3224 __spi_pump_messages(ctlr, false);
3225 }
3226
3227 wait_for_completion(&done);
3228 status = message->status;
3229 }
3230 message->context = NULL;
3231 return status;
3232}
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255int spi_sync(struct spi_device *spi, struct spi_message *message)
3256{
3257 int ret;
3258
3259 mutex_lock(&spi->controller->bus_lock_mutex);
3260 ret = __spi_sync(spi, message);
3261 mutex_unlock(&spi->controller->bus_lock_mutex);
3262
3263 return ret;
3264}
3265EXPORT_SYMBOL_GPL(spi_sync);
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3284{
3285 return __spi_sync(spi, message);
3286}
3287EXPORT_SYMBOL_GPL(spi_sync_locked);
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304int spi_bus_lock(struct spi_controller *ctlr)
3305{
3306 unsigned long flags;
3307
3308 mutex_lock(&ctlr->bus_lock_mutex);
3309
3310 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3311 ctlr->bus_lock_flag = 1;
3312 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3313
3314
3315
3316 return 0;
3317}
3318EXPORT_SYMBOL_GPL(spi_bus_lock);
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333int spi_bus_unlock(struct spi_controller *ctlr)
3334{
3335 ctlr->bus_lock_flag = 0;
3336
3337 mutex_unlock(&ctlr->bus_lock_mutex);
3338
3339 return 0;
3340}
3341EXPORT_SYMBOL_GPL(spi_bus_unlock);
3342
3343
3344#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3345
3346static u8 *buf;
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369int spi_write_then_read(struct spi_device *spi,
3370 const void *txbuf, unsigned n_tx,
3371 void *rxbuf, unsigned n_rx)
3372{
3373 static DEFINE_MUTEX(lock);
3374
3375 int status;
3376 struct spi_message message;
3377 struct spi_transfer x[2];
3378 u8 *local_buf;
3379
3380
3381
3382
3383
3384
3385 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3386 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3387 GFP_KERNEL | GFP_DMA);
3388 if (!local_buf)
3389 return -ENOMEM;
3390 } else {
3391 local_buf = buf;
3392 }
3393
3394 spi_message_init(&message);
3395 memset(x, 0, sizeof(x));
3396 if (n_tx) {
3397 x[0].len = n_tx;
3398 spi_message_add_tail(&x[0], &message);
3399 }
3400 if (n_rx) {
3401 x[1].len = n_rx;
3402 spi_message_add_tail(&x[1], &message);
3403 }
3404
3405 memcpy(local_buf, txbuf, n_tx);
3406 x[0].tx_buf = local_buf;
3407 x[1].rx_buf = local_buf + n_tx;
3408
3409
3410 status = spi_sync(spi, &message);
3411 if (status == 0)
3412 memcpy(rxbuf, x[1].rx_buf, n_rx);
3413
3414 if (x[0].tx_buf == buf)
3415 mutex_unlock(&lock);
3416 else
3417 kfree(local_buf);
3418
3419 return status;
3420}
3421EXPORT_SYMBOL_GPL(spi_write_then_read);
3422
3423
3424
3425#if IS_ENABLED(CONFIG_OF)
3426static int __spi_of_device_match(struct device *dev, void *data)
3427{
3428 return dev->of_node == data;
3429}
3430
3431
3432struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3433{
3434 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3435 __spi_of_device_match);
3436 return dev ? to_spi_device(dev) : NULL;
3437}
3438EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3439#endif
3440
3441#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3442static int __spi_of_controller_match(struct device *dev, const void *data)
3443{
3444 return dev->of_node == data;
3445}
3446
3447
3448static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3449{
3450 struct device *dev;
3451
3452 dev = class_find_device(&spi_master_class, NULL, node,
3453 __spi_of_controller_match);
3454 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3455 dev = class_find_device(&spi_slave_class, NULL, node,
3456 __spi_of_controller_match);
3457 if (!dev)
3458 return NULL;
3459
3460
3461 return container_of(dev, struct spi_controller, dev);
3462}
3463
3464static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3465 void *arg)
3466{
3467 struct of_reconfig_data *rd = arg;
3468 struct spi_controller *ctlr;
3469 struct spi_device *spi;
3470
3471 switch (of_reconfig_get_state_change(action, arg)) {
3472 case OF_RECONFIG_CHANGE_ADD:
3473 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3474 if (ctlr == NULL)
3475 return NOTIFY_OK;
3476
3477 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3478 put_device(&ctlr->dev);
3479 return NOTIFY_OK;
3480 }
3481
3482 spi = of_register_spi_device(ctlr, rd->dn);
3483 put_device(&ctlr->dev);
3484
3485 if (IS_ERR(spi)) {
3486 pr_err("%s: failed to create for '%pOF'\n",
3487 __func__, rd->dn);
3488 of_node_clear_flag(rd->dn, OF_POPULATED);
3489 return notifier_from_errno(PTR_ERR(spi));
3490 }
3491 break;
3492
3493 case OF_RECONFIG_CHANGE_REMOVE:
3494
3495 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3496 return NOTIFY_OK;
3497
3498
3499 spi = of_find_spi_device_by_node(rd->dn);
3500 if (spi == NULL)
3501 return NOTIFY_OK;
3502
3503
3504 spi_unregister_device(spi);
3505
3506
3507 put_device(&spi->dev);
3508 break;
3509 }
3510
3511 return NOTIFY_OK;
3512}
3513
3514static struct notifier_block spi_of_notifier = {
3515 .notifier_call = of_spi_notify,
3516};
3517#else
3518extern struct notifier_block spi_of_notifier;
3519#endif
3520
3521#if IS_ENABLED(CONFIG_ACPI)
3522static int spi_acpi_controller_match(struct device *dev, const void *data)
3523{
3524 return ACPI_COMPANION(dev->parent) == data;
3525}
3526
3527static int spi_acpi_device_match(struct device *dev, void *data)
3528{
3529 return ACPI_COMPANION(dev) == data;
3530}
3531
3532static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3533{
3534 struct device *dev;
3535
3536 dev = class_find_device(&spi_master_class, NULL, adev,
3537 spi_acpi_controller_match);
3538 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3539 dev = class_find_device(&spi_slave_class, NULL, adev,
3540 spi_acpi_controller_match);
3541 if (!dev)
3542 return NULL;
3543
3544 return container_of(dev, struct spi_controller, dev);
3545}
3546
3547static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3548{
3549 struct device *dev;
3550
3551 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3552
3553 return dev ? to_spi_device(dev) : NULL;
3554}
3555
3556static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3557 void *arg)
3558{
3559 struct acpi_device *adev = arg;
3560 struct spi_controller *ctlr;
3561 struct spi_device *spi;
3562
3563 switch (value) {
3564 case ACPI_RECONFIG_DEVICE_ADD:
3565 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3566 if (!ctlr)
3567 break;
3568
3569 acpi_register_spi_device(ctlr, adev);
3570 put_device(&ctlr->dev);
3571 break;
3572 case ACPI_RECONFIG_DEVICE_REMOVE:
3573 if (!acpi_device_enumerated(adev))
3574 break;
3575
3576 spi = acpi_spi_find_device_by_adev(adev);
3577 if (!spi)
3578 break;
3579
3580 spi_unregister_device(spi);
3581 put_device(&spi->dev);
3582 break;
3583 }
3584
3585 return NOTIFY_OK;
3586}
3587
3588static struct notifier_block spi_acpi_notifier = {
3589 .notifier_call = acpi_spi_notify,
3590};
3591#else
3592extern struct notifier_block spi_acpi_notifier;
3593#endif
3594
3595static int __init spi_init(void)
3596{
3597 int status;
3598
3599 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3600 if (!buf) {
3601 status = -ENOMEM;
3602 goto err0;
3603 }
3604
3605 status = bus_register(&spi_bus_type);
3606 if (status < 0)
3607 goto err1;
3608
3609 status = class_register(&spi_master_class);
3610 if (status < 0)
3611 goto err2;
3612
3613 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3614 status = class_register(&spi_slave_class);
3615 if (status < 0)
3616 goto err3;
3617 }
3618
3619 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3620 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3621 if (IS_ENABLED(CONFIG_ACPI))
3622 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3623
3624 return 0;
3625
3626err3:
3627 class_unregister(&spi_master_class);
3628err2:
3629 bus_unregister(&spi_bus_type);
3630err1:
3631 kfree(buf);
3632 buf = NULL;
3633err0:
3634 return status;
3635}
3636
3637
3638
3639
3640
3641
3642
3643
3644postcore_initcall(spi_init);
3645
3646