1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/cache.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/mutex.h>
25#include <linux/of_device.h>
26#include <linux/of_irq.h>
27#include <linux/clk/clk-conf.h>
28#include <linux/slab.h>
29#include <linux/mod_devicetable.h>
30#include <linux/spi/spi.h>
31#include <linux/of_gpio.h>
32#include <linux/pm_runtime.h>
33#include <linux/pm_domain.h>
34#include <linux/property.h>
35#include <linux/export.h>
36#include <linux/sched/rt.h>
37#include <uapi/linux/sched/types.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/ioport.h>
41#include <linux/acpi.h>
42#include <linux/highmem.h>
43#include <linux/idr.h>
44#include <linux/platform_data/x86/apple.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/spi.h>
48
49static DEFINE_IDR(spi_master_idr);
50
51static void spidev_release(struct device *dev)
52{
53 struct spi_device *spi = to_spi_device(dev);
54
55
56 if (spi->controller->cleanup)
57 spi->controller->cleanup(spi);
58
59 spi_controller_put(spi->controller);
60 kfree(spi);
61}
62
63static ssize_t
64modalias_show(struct device *dev, struct device_attribute *a, char *buf)
65{
66 const struct spi_device *spi = to_spi_device(dev);
67 int len;
68
69 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
70 if (len != -ENODEV)
71 return len;
72
73 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
74}
75static DEVICE_ATTR_RO(modalias);
76
77#define SPI_STATISTICS_ATTRS(field, file) \
78static ssize_t spi_controller_##field##_show(struct device *dev, \
79 struct device_attribute *attr, \
80 char *buf) \
81{ \
82 struct spi_controller *ctlr = container_of(dev, \
83 struct spi_controller, dev); \
84 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
85} \
86static struct device_attribute dev_attr_spi_controller_##field = { \
87 .attr = { .name = file, .mode = 0444 }, \
88 .show = spi_controller_##field##_show, \
89}; \
90static ssize_t spi_device_##field##_show(struct device *dev, \
91 struct device_attribute *attr, \
92 char *buf) \
93{ \
94 struct spi_device *spi = to_spi_device(dev); \
95 return spi_statistics_##field##_show(&spi->statistics, buf); \
96} \
97static struct device_attribute dev_attr_spi_device_##field = { \
98 .attr = { .name = file, .mode = 0444 }, \
99 .show = spi_device_##field##_show, \
100}
101
102#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
103static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
104 char *buf) \
105{ \
106 unsigned long flags; \
107 ssize_t len; \
108 spin_lock_irqsave(&stat->lock, flags); \
109 len = sprintf(buf, format_string, stat->field); \
110 spin_unlock_irqrestore(&stat->lock, flags); \
111 return len; \
112} \
113SPI_STATISTICS_ATTRS(name, file)
114
115#define SPI_STATISTICS_SHOW(field, format_string) \
116 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
117 field, format_string)
118
119SPI_STATISTICS_SHOW(messages, "%lu");
120SPI_STATISTICS_SHOW(transfers, "%lu");
121SPI_STATISTICS_SHOW(errors, "%lu");
122SPI_STATISTICS_SHOW(timedout, "%lu");
123
124SPI_STATISTICS_SHOW(spi_sync, "%lu");
125SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
126SPI_STATISTICS_SHOW(spi_async, "%lu");
127
128SPI_STATISTICS_SHOW(bytes, "%llu");
129SPI_STATISTICS_SHOW(bytes_rx, "%llu");
130SPI_STATISTICS_SHOW(bytes_tx, "%llu");
131
132#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
133 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
134 "transfer_bytes_histo_" number, \
135 transfer_bytes_histo[index], "%lu")
136SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
137SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
138SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
139SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
140SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
141SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
142SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
143SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
146SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
147SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
148SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
149SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
150SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
151SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
152SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
153
154SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
155
156static struct attribute *spi_dev_attrs[] = {
157 &dev_attr_modalias.attr,
158 NULL,
159};
160
161static const struct attribute_group spi_dev_group = {
162 .attrs = spi_dev_attrs,
163};
164
165static struct attribute *spi_device_statistics_attrs[] = {
166 &dev_attr_spi_device_messages.attr,
167 &dev_attr_spi_device_transfers.attr,
168 &dev_attr_spi_device_errors.attr,
169 &dev_attr_spi_device_timedout.attr,
170 &dev_attr_spi_device_spi_sync.attr,
171 &dev_attr_spi_device_spi_sync_immediate.attr,
172 &dev_attr_spi_device_spi_async.attr,
173 &dev_attr_spi_device_bytes.attr,
174 &dev_attr_spi_device_bytes_rx.attr,
175 &dev_attr_spi_device_bytes_tx.attr,
176 &dev_attr_spi_device_transfer_bytes_histo0.attr,
177 &dev_attr_spi_device_transfer_bytes_histo1.attr,
178 &dev_attr_spi_device_transfer_bytes_histo2.attr,
179 &dev_attr_spi_device_transfer_bytes_histo3.attr,
180 &dev_attr_spi_device_transfer_bytes_histo4.attr,
181 &dev_attr_spi_device_transfer_bytes_histo5.attr,
182 &dev_attr_spi_device_transfer_bytes_histo6.attr,
183 &dev_attr_spi_device_transfer_bytes_histo7.attr,
184 &dev_attr_spi_device_transfer_bytes_histo8.attr,
185 &dev_attr_spi_device_transfer_bytes_histo9.attr,
186 &dev_attr_spi_device_transfer_bytes_histo10.attr,
187 &dev_attr_spi_device_transfer_bytes_histo11.attr,
188 &dev_attr_spi_device_transfer_bytes_histo12.attr,
189 &dev_attr_spi_device_transfer_bytes_histo13.attr,
190 &dev_attr_spi_device_transfer_bytes_histo14.attr,
191 &dev_attr_spi_device_transfer_bytes_histo15.attr,
192 &dev_attr_spi_device_transfer_bytes_histo16.attr,
193 &dev_attr_spi_device_transfers_split_maxsize.attr,
194 NULL,
195};
196
197static const struct attribute_group spi_device_statistics_group = {
198 .name = "statistics",
199 .attrs = spi_device_statistics_attrs,
200};
201
202static const struct attribute_group *spi_dev_groups[] = {
203 &spi_dev_group,
204 &spi_device_statistics_group,
205 NULL,
206};
207
208static struct attribute *spi_controller_statistics_attrs[] = {
209 &dev_attr_spi_controller_messages.attr,
210 &dev_attr_spi_controller_transfers.attr,
211 &dev_attr_spi_controller_errors.attr,
212 &dev_attr_spi_controller_timedout.attr,
213 &dev_attr_spi_controller_spi_sync.attr,
214 &dev_attr_spi_controller_spi_sync_immediate.attr,
215 &dev_attr_spi_controller_spi_async.attr,
216 &dev_attr_spi_controller_bytes.attr,
217 &dev_attr_spi_controller_bytes_rx.attr,
218 &dev_attr_spi_controller_bytes_tx.attr,
219 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
220 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
221 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
222 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
223 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
224 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
225 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
226 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
227 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
228 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
229 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
230 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
231 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
232 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
233 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
234 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
235 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
236 &dev_attr_spi_controller_transfers_split_maxsize.attr,
237 NULL,
238};
239
240static const struct attribute_group spi_controller_statistics_group = {
241 .name = "statistics",
242 .attrs = spi_controller_statistics_attrs,
243};
244
245static const struct attribute_group *spi_master_groups[] = {
246 &spi_controller_statistics_group,
247 NULL,
248};
249
250void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
251 struct spi_transfer *xfer,
252 struct spi_controller *ctlr)
253{
254 unsigned long flags;
255 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
256
257 if (l2len < 0)
258 l2len = 0;
259
260 spin_lock_irqsave(&stats->lock, flags);
261
262 stats->transfers++;
263 stats->transfer_bytes_histo[l2len]++;
264
265 stats->bytes += xfer->len;
266 if ((xfer->tx_buf) &&
267 (xfer->tx_buf != ctlr->dummy_tx))
268 stats->bytes_tx += xfer->len;
269 if ((xfer->rx_buf) &&
270 (xfer->rx_buf != ctlr->dummy_rx))
271 stats->bytes_rx += xfer->len;
272
273 spin_unlock_irqrestore(&stats->lock, flags);
274}
275EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
276
277
278
279
280
281static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
282 const struct spi_device *sdev)
283{
284 while (id->name[0]) {
285 if (!strcmp(sdev->modalias, id->name))
286 return id;
287 id++;
288 }
289 return NULL;
290}
291
292const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
293{
294 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
295
296 return spi_match_id(sdrv->id_table, sdev);
297}
298EXPORT_SYMBOL_GPL(spi_get_device_id);
299
300static int spi_match_device(struct device *dev, struct device_driver *drv)
301{
302 const struct spi_device *spi = to_spi_device(dev);
303 const struct spi_driver *sdrv = to_spi_driver(drv);
304
305
306 if (of_driver_match_device(dev, drv))
307 return 1;
308
309
310 if (acpi_driver_match_device(dev, drv))
311 return 1;
312
313 if (sdrv->id_table)
314 return !!spi_match_id(sdrv->id_table, spi);
315
316 return strcmp(spi->modalias, drv->name) == 0;
317}
318
319static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
320{
321 const struct spi_device *spi = to_spi_device(dev);
322 int rc;
323
324 rc = acpi_device_uevent_modalias(dev, env);
325 if (rc != -ENODEV)
326 return rc;
327
328 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
329}
330
331struct bus_type spi_bus_type = {
332 .name = "spi",
333 .dev_groups = spi_dev_groups,
334 .match = spi_match_device,
335 .uevent = spi_uevent,
336};
337EXPORT_SYMBOL_GPL(spi_bus_type);
338
339
340static int spi_drv_probe(struct device *dev)
341{
342 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
343 struct spi_device *spi = to_spi_device(dev);
344 int ret;
345
346 ret = of_clk_set_defaults(dev->of_node, false);
347 if (ret)
348 return ret;
349
350 if (dev->of_node) {
351 spi->irq = of_irq_get(dev->of_node, 0);
352 if (spi->irq == -EPROBE_DEFER)
353 return -EPROBE_DEFER;
354 if (spi->irq < 0)
355 spi->irq = 0;
356 }
357
358 ret = dev_pm_domain_attach(dev, true);
359 if (ret != -EPROBE_DEFER) {
360 ret = sdrv->probe(spi);
361 if (ret)
362 dev_pm_domain_detach(dev, true);
363 }
364
365 return ret;
366}
367
368static int spi_drv_remove(struct device *dev)
369{
370 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
371 int ret;
372
373 ret = sdrv->remove(to_spi_device(dev));
374 dev_pm_domain_detach(dev, true);
375
376 return ret;
377}
378
379static void spi_drv_shutdown(struct device *dev)
380{
381 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
382
383 sdrv->shutdown(to_spi_device(dev));
384}
385
386
387
388
389
390
391
392
393
394int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
395{
396 sdrv->driver.owner = owner;
397 sdrv->driver.bus = &spi_bus_type;
398 if (sdrv->probe)
399 sdrv->driver.probe = spi_drv_probe;
400 if (sdrv->remove)
401 sdrv->driver.remove = spi_drv_remove;
402 if (sdrv->shutdown)
403 sdrv->driver.shutdown = spi_drv_shutdown;
404 return driver_register(&sdrv->driver);
405}
406EXPORT_SYMBOL_GPL(__spi_register_driver);
407
408
409
410
411
412
413
414
415
416struct boardinfo {
417 struct list_head list;
418 struct spi_board_info board_info;
419};
420
421static LIST_HEAD(board_list);
422static LIST_HEAD(spi_controller_list);
423
424
425
426
427
428
429static DEFINE_MUTEX(board_lock);
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
449{
450 struct spi_device *spi;
451
452 if (!spi_controller_get(ctlr))
453 return NULL;
454
455 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
456 if (!spi) {
457 spi_controller_put(ctlr);
458 return NULL;
459 }
460
461 spi->master = spi->controller = ctlr;
462 spi->dev.parent = &ctlr->dev;
463 spi->dev.bus = &spi_bus_type;
464 spi->dev.release = spidev_release;
465 spi->cs_gpio = -ENOENT;
466
467 spin_lock_init(&spi->statistics.lock);
468
469 device_initialize(&spi->dev);
470 return spi;
471}
472EXPORT_SYMBOL_GPL(spi_alloc_device);
473
474static void spi_dev_set_name(struct spi_device *spi)
475{
476 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
477
478 if (adev) {
479 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
480 return;
481 }
482
483 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
484 spi->chip_select);
485}
486
487static int spi_dev_check(struct device *dev, void *data)
488{
489 struct spi_device *spi = to_spi_device(dev);
490 struct spi_device *new_spi = data;
491
492 if (spi->controller == new_spi->controller &&
493 spi->chip_select == new_spi->chip_select)
494 return -EBUSY;
495 return 0;
496}
497
498
499
500
501
502
503
504
505
506
507int spi_add_device(struct spi_device *spi)
508{
509 static DEFINE_MUTEX(spi_add_lock);
510 struct spi_controller *ctlr = spi->controller;
511 struct device *dev = ctlr->dev.parent;
512 int status;
513
514
515 if (spi->chip_select >= ctlr->num_chipselect) {
516 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
517 ctlr->num_chipselect);
518 return -EINVAL;
519 }
520
521
522 spi_dev_set_name(spi);
523
524
525
526
527
528 mutex_lock(&spi_add_lock);
529
530 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
531 if (status) {
532 dev_err(dev, "chipselect %d already in use\n",
533 spi->chip_select);
534 goto done;
535 }
536
537 if (ctlr->cs_gpios)
538 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
539
540
541
542
543
544 status = spi_setup(spi);
545 if (status < 0) {
546 dev_err(dev, "can't setup %s, status %d\n",
547 dev_name(&spi->dev), status);
548 goto done;
549 }
550
551
552 status = device_add(&spi->dev);
553 if (status < 0)
554 dev_err(dev, "can't add %s, status %d\n",
555 dev_name(&spi->dev), status);
556 else
557 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
558
559done:
560 mutex_unlock(&spi_add_lock);
561 return status;
562}
563EXPORT_SYMBOL_GPL(spi_add_device);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579struct spi_device *spi_new_device(struct spi_controller *ctlr,
580 struct spi_board_info *chip)
581{
582 struct spi_device *proxy;
583 int status;
584
585
586
587
588
589
590
591
592 proxy = spi_alloc_device(ctlr);
593 if (!proxy)
594 return NULL;
595
596 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
597
598 proxy->chip_select = chip->chip_select;
599 proxy->max_speed_hz = chip->max_speed_hz;
600 proxy->mode = chip->mode;
601 proxy->irq = chip->irq;
602 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
603 proxy->dev.platform_data = (void *) chip->platform_data;
604 proxy->controller_data = chip->controller_data;
605 proxy->controller_state = NULL;
606
607 if (chip->properties) {
608 status = device_add_properties(&proxy->dev, chip->properties);
609 if (status) {
610 dev_err(&ctlr->dev,
611 "failed to add properties to '%s': %d\n",
612 chip->modalias, status);
613 goto err_dev_put;
614 }
615 }
616
617 status = spi_add_device(proxy);
618 if (status < 0)
619 goto err_remove_props;
620
621 return proxy;
622
623err_remove_props:
624 if (chip->properties)
625 device_remove_properties(&proxy->dev);
626err_dev_put:
627 spi_dev_put(proxy);
628 return NULL;
629}
630EXPORT_SYMBOL_GPL(spi_new_device);
631
632
633
634
635
636
637
638
639void spi_unregister_device(struct spi_device *spi)
640{
641 if (!spi)
642 return;
643
644 if (spi->dev.of_node) {
645 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
646 of_node_put(spi->dev.of_node);
647 }
648 if (ACPI_COMPANION(&spi->dev))
649 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
650 device_unregister(&spi->dev);
651}
652EXPORT_SYMBOL_GPL(spi_unregister_device);
653
654static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
655 struct spi_board_info *bi)
656{
657 struct spi_device *dev;
658
659 if (ctlr->bus_num != bi->bus_num)
660 return;
661
662 dev = spi_new_device(ctlr, bi);
663 if (!dev)
664 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
665 bi->modalias);
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690int spi_register_board_info(struct spi_board_info const *info, unsigned n)
691{
692 struct boardinfo *bi;
693 int i;
694
695 if (!n)
696 return 0;
697
698 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
699 if (!bi)
700 return -ENOMEM;
701
702 for (i = 0; i < n; i++, bi++, info++) {
703 struct spi_controller *ctlr;
704
705 memcpy(&bi->board_info, info, sizeof(*info));
706 if (info->properties) {
707 bi->board_info.properties =
708 property_entries_dup(info->properties);
709 if (IS_ERR(bi->board_info.properties))
710 return PTR_ERR(bi->board_info.properties);
711 }
712
713 mutex_lock(&board_lock);
714 list_add_tail(&bi->list, &board_list);
715 list_for_each_entry(ctlr, &spi_controller_list, list)
716 spi_match_controller_to_boardinfo(ctlr,
717 &bi->board_info);
718 mutex_unlock(&board_lock);
719 }
720
721 return 0;
722}
723
724
725
726static void spi_set_cs(struct spi_device *spi, bool enable)
727{
728 if (spi->mode & SPI_CS_HIGH)
729 enable = !enable;
730
731 if (gpio_is_valid(spi->cs_gpio)) {
732 gpio_set_value(spi->cs_gpio, !enable);
733
734 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
735 spi->controller->set_cs)
736 spi->controller->set_cs(spi, !enable);
737 } else if (spi->controller->set_cs) {
738 spi->controller->set_cs(spi, !enable);
739 }
740}
741
742#ifdef CONFIG_HAS_DMA
743static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
744 struct sg_table *sgt, void *buf, size_t len,
745 enum dma_data_direction dir)
746{
747 const bool vmalloced_buf = is_vmalloc_addr(buf);
748 unsigned int max_seg_size = dma_get_max_seg_size(dev);
749#ifdef CONFIG_HIGHMEM
750 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
751 (unsigned long)buf < (PKMAP_BASE +
752 (LAST_PKMAP * PAGE_SIZE)));
753#else
754 const bool kmap_buf = false;
755#endif
756 int desc_len;
757 int sgs;
758 struct page *vm_page;
759 struct scatterlist *sg;
760 void *sg_buf;
761 size_t min;
762 int i, ret;
763
764 if (vmalloced_buf || kmap_buf) {
765 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
766 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
767 } else if (virt_addr_valid(buf)) {
768 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
769 sgs = DIV_ROUND_UP(len, desc_len);
770 } else {
771 return -EINVAL;
772 }
773
774 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
775 if (ret != 0)
776 return ret;
777
778 sg = &sgt->sgl[0];
779 for (i = 0; i < sgs; i++) {
780
781 if (vmalloced_buf || kmap_buf) {
782 min = min_t(size_t,
783 len, desc_len - offset_in_page(buf));
784 if (vmalloced_buf)
785 vm_page = vmalloc_to_page(buf);
786 else
787 vm_page = kmap_to_page(buf);
788 if (!vm_page) {
789 sg_free_table(sgt);
790 return -ENOMEM;
791 }
792 sg_set_page(sg, vm_page,
793 min, offset_in_page(buf));
794 } else {
795 min = min_t(size_t, len, desc_len);
796 sg_buf = buf;
797 sg_set_buf(sg, sg_buf, min);
798 }
799
800 buf += min;
801 len -= min;
802 sg = sg_next(sg);
803 }
804
805 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
806 if (!ret)
807 ret = -ENOMEM;
808 if (ret < 0) {
809 sg_free_table(sgt);
810 return ret;
811 }
812
813 sgt->nents = ret;
814
815 return 0;
816}
817
818static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
819 struct sg_table *sgt, enum dma_data_direction dir)
820{
821 if (sgt->orig_nents) {
822 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
823 sg_free_table(sgt);
824 }
825}
826
827static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
828{
829 struct device *tx_dev, *rx_dev;
830 struct spi_transfer *xfer;
831 int ret;
832
833 if (!ctlr->can_dma)
834 return 0;
835
836 if (ctlr->dma_tx)
837 tx_dev = ctlr->dma_tx->device->dev;
838 else
839 tx_dev = ctlr->dev.parent;
840
841 if (ctlr->dma_rx)
842 rx_dev = ctlr->dma_rx->device->dev;
843 else
844 rx_dev = ctlr->dev.parent;
845
846 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
847 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
848 continue;
849
850 if (xfer->tx_buf != NULL) {
851 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
852 (void *)xfer->tx_buf, xfer->len,
853 DMA_TO_DEVICE);
854 if (ret != 0)
855 return ret;
856 }
857
858 if (xfer->rx_buf != NULL) {
859 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
860 xfer->rx_buf, xfer->len,
861 DMA_FROM_DEVICE);
862 if (ret != 0) {
863 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
864 DMA_TO_DEVICE);
865 return ret;
866 }
867 }
868 }
869
870 ctlr->cur_msg_mapped = true;
871
872 return 0;
873}
874
875static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
876{
877 struct spi_transfer *xfer;
878 struct device *tx_dev, *rx_dev;
879
880 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
881 return 0;
882
883 if (ctlr->dma_tx)
884 tx_dev = ctlr->dma_tx->device->dev;
885 else
886 tx_dev = ctlr->dev.parent;
887
888 if (ctlr->dma_rx)
889 rx_dev = ctlr->dma_rx->device->dev;
890 else
891 rx_dev = ctlr->dev.parent;
892
893 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
894 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
895 continue;
896
897 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
898 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
899 }
900
901 return 0;
902}
903#else
904static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
905 struct sg_table *sgt, void *buf, size_t len,
906 enum dma_data_direction dir)
907{
908 return -EINVAL;
909}
910
911static inline void spi_unmap_buf(struct spi_controller *ctlr,
912 struct device *dev, struct sg_table *sgt,
913 enum dma_data_direction dir)
914{
915}
916
917static inline int __spi_map_msg(struct spi_controller *ctlr,
918 struct spi_message *msg)
919{
920 return 0;
921}
922
923static inline int __spi_unmap_msg(struct spi_controller *ctlr,
924 struct spi_message *msg)
925{
926 return 0;
927}
928#endif
929
930static inline int spi_unmap_msg(struct spi_controller *ctlr,
931 struct spi_message *msg)
932{
933 struct spi_transfer *xfer;
934
935 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
936
937
938
939
940 if (xfer->tx_buf == ctlr->dummy_tx)
941 xfer->tx_buf = NULL;
942 if (xfer->rx_buf == ctlr->dummy_rx)
943 xfer->rx_buf = NULL;
944 }
945
946 return __spi_unmap_msg(ctlr, msg);
947}
948
949static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
950{
951 struct spi_transfer *xfer;
952 void *tmp;
953 unsigned int max_tx, max_rx;
954
955 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
956 max_tx = 0;
957 max_rx = 0;
958
959 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
960 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
961 !xfer->tx_buf)
962 max_tx = max(xfer->len, max_tx);
963 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
964 !xfer->rx_buf)
965 max_rx = max(xfer->len, max_rx);
966 }
967
968 if (max_tx) {
969 tmp = krealloc(ctlr->dummy_tx, max_tx,
970 GFP_KERNEL | GFP_DMA);
971 if (!tmp)
972 return -ENOMEM;
973 ctlr->dummy_tx = tmp;
974 memset(tmp, 0, max_tx);
975 }
976
977 if (max_rx) {
978 tmp = krealloc(ctlr->dummy_rx, max_rx,
979 GFP_KERNEL | GFP_DMA);
980 if (!tmp)
981 return -ENOMEM;
982 ctlr->dummy_rx = tmp;
983 }
984
985 if (max_tx || max_rx) {
986 list_for_each_entry(xfer, &msg->transfers,
987 transfer_list) {
988 if (!xfer->tx_buf)
989 xfer->tx_buf = ctlr->dummy_tx;
990 if (!xfer->rx_buf)
991 xfer->rx_buf = ctlr->dummy_rx;
992 }
993 }
994 }
995
996 return __spi_map_msg(ctlr, msg);
997}
998
999
1000
1001
1002
1003
1004
1005
1006static int spi_transfer_one_message(struct spi_controller *ctlr,
1007 struct spi_message *msg)
1008{
1009 struct spi_transfer *xfer;
1010 bool keep_cs = false;
1011 int ret = 0;
1012 unsigned long long ms = 1;
1013 struct spi_statistics *statm = &ctlr->statistics;
1014 struct spi_statistics *stats = &msg->spi->statistics;
1015
1016 spi_set_cs(msg->spi, true);
1017
1018 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1019 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1020
1021 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1022 trace_spi_transfer_start(msg, xfer);
1023
1024 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1025 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1026
1027 if (xfer->tx_buf || xfer->rx_buf) {
1028 reinit_completion(&ctlr->xfer_completion);
1029
1030 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1031 if (ret < 0) {
1032 SPI_STATISTICS_INCREMENT_FIELD(statm,
1033 errors);
1034 SPI_STATISTICS_INCREMENT_FIELD(stats,
1035 errors);
1036 dev_err(&msg->spi->dev,
1037 "SPI transfer failed: %d\n", ret);
1038 goto out;
1039 }
1040
1041 if (ret > 0) {
1042 ret = 0;
1043 ms = 8LL * 1000LL * xfer->len;
1044 do_div(ms, xfer->speed_hz);
1045 ms += ms + 200;
1046
1047 if (ms > UINT_MAX)
1048 ms = UINT_MAX;
1049
1050 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1051 msecs_to_jiffies(ms));
1052 }
1053
1054 if (ms == 0) {
1055 SPI_STATISTICS_INCREMENT_FIELD(statm,
1056 timedout);
1057 SPI_STATISTICS_INCREMENT_FIELD(stats,
1058 timedout);
1059 dev_err(&msg->spi->dev,
1060 "SPI transfer timed out\n");
1061 msg->status = -ETIMEDOUT;
1062 }
1063 } else {
1064 if (xfer->len)
1065 dev_err(&msg->spi->dev,
1066 "Bufferless transfer has length %u\n",
1067 xfer->len);
1068 }
1069
1070 trace_spi_transfer_stop(msg, xfer);
1071
1072 if (msg->status != -EINPROGRESS)
1073 goto out;
1074
1075 if (xfer->delay_usecs) {
1076 u16 us = xfer->delay_usecs;
1077
1078 if (us <= 10)
1079 udelay(us);
1080 else
1081 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1082 }
1083
1084 if (xfer->cs_change) {
1085 if (list_is_last(&xfer->transfer_list,
1086 &msg->transfers)) {
1087 keep_cs = true;
1088 } else {
1089 spi_set_cs(msg->spi, false);
1090 udelay(10);
1091 spi_set_cs(msg->spi, true);
1092 }
1093 }
1094
1095 msg->actual_length += xfer->len;
1096 }
1097
1098out:
1099 if (ret != 0 || !keep_cs)
1100 spi_set_cs(msg->spi, false);
1101
1102 if (msg->status == -EINPROGRESS)
1103 msg->status = ret;
1104
1105 if (msg->status && ctlr->handle_err)
1106 ctlr->handle_err(ctlr, msg);
1107
1108 spi_res_release(ctlr, msg);
1109
1110 spi_finalize_current_message(ctlr);
1111
1112 return ret;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123void spi_finalize_current_transfer(struct spi_controller *ctlr)
1124{
1125 complete(&ctlr->xfer_completion);
1126}
1127EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1143{
1144 unsigned long flags;
1145 bool was_busy = false;
1146 int ret;
1147
1148
1149 spin_lock_irqsave(&ctlr->queue_lock, flags);
1150
1151
1152 if (ctlr->cur_msg) {
1153 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1154 return;
1155 }
1156
1157
1158 if (ctlr->idling) {
1159 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1160 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1161 return;
1162 }
1163
1164
1165 if (list_empty(&ctlr->queue) || !ctlr->running) {
1166 if (!ctlr->busy) {
1167 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1168 return;
1169 }
1170
1171
1172 if (!in_kthread) {
1173 kthread_queue_work(&ctlr->kworker,
1174 &ctlr->pump_messages);
1175 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1176 return;
1177 }
1178
1179 ctlr->busy = false;
1180 ctlr->idling = true;
1181 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1182
1183 kfree(ctlr->dummy_rx);
1184 ctlr->dummy_rx = NULL;
1185 kfree(ctlr->dummy_tx);
1186 ctlr->dummy_tx = NULL;
1187 if (ctlr->unprepare_transfer_hardware &&
1188 ctlr->unprepare_transfer_hardware(ctlr))
1189 dev_err(&ctlr->dev,
1190 "failed to unprepare transfer hardware\n");
1191 if (ctlr->auto_runtime_pm) {
1192 pm_runtime_mark_last_busy(ctlr->dev.parent);
1193 pm_runtime_put_autosuspend(ctlr->dev.parent);
1194 }
1195 trace_spi_controller_idle(ctlr);
1196
1197 spin_lock_irqsave(&ctlr->queue_lock, flags);
1198 ctlr->idling = false;
1199 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1200 return;
1201 }
1202
1203
1204 ctlr->cur_msg =
1205 list_first_entry(&ctlr->queue, struct spi_message, queue);
1206
1207 list_del_init(&ctlr->cur_msg->queue);
1208 if (ctlr->busy)
1209 was_busy = true;
1210 else
1211 ctlr->busy = true;
1212 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1213
1214 mutex_lock(&ctlr->io_mutex);
1215
1216 if (!was_busy && ctlr->auto_runtime_pm) {
1217 ret = pm_runtime_get_sync(ctlr->dev.parent);
1218 if (ret < 0) {
1219 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1220 ret);
1221 mutex_unlock(&ctlr->io_mutex);
1222 return;
1223 }
1224 }
1225
1226 if (!was_busy)
1227 trace_spi_controller_busy(ctlr);
1228
1229 if (!was_busy && ctlr->prepare_transfer_hardware) {
1230 ret = ctlr->prepare_transfer_hardware(ctlr);
1231 if (ret) {
1232 dev_err(&ctlr->dev,
1233 "failed to prepare transfer hardware\n");
1234
1235 if (ctlr->auto_runtime_pm)
1236 pm_runtime_put(ctlr->dev.parent);
1237 mutex_unlock(&ctlr->io_mutex);
1238 return;
1239 }
1240 }
1241
1242 trace_spi_message_start(ctlr->cur_msg);
1243
1244 if (ctlr->prepare_message) {
1245 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1246 if (ret) {
1247 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1248 ret);
1249 ctlr->cur_msg->status = ret;
1250 spi_finalize_current_message(ctlr);
1251 goto out;
1252 }
1253 ctlr->cur_msg_prepared = true;
1254 }
1255
1256 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1257 if (ret) {
1258 ctlr->cur_msg->status = ret;
1259 spi_finalize_current_message(ctlr);
1260 goto out;
1261 }
1262
1263 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1264 if (ret) {
1265 dev_err(&ctlr->dev,
1266 "failed to transfer one message from queue\n");
1267 goto out;
1268 }
1269
1270out:
1271 mutex_unlock(&ctlr->io_mutex);
1272
1273
1274 if (!ret)
1275 cond_resched();
1276}
1277
1278
1279
1280
1281
1282static void spi_pump_messages(struct kthread_work *work)
1283{
1284 struct spi_controller *ctlr =
1285 container_of(work, struct spi_controller, pump_messages);
1286
1287 __spi_pump_messages(ctlr, true);
1288}
1289
1290static int spi_init_queue(struct spi_controller *ctlr)
1291{
1292 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1293
1294 ctlr->running = false;
1295 ctlr->busy = false;
1296
1297 kthread_init_worker(&ctlr->kworker);
1298 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1299 "%s", dev_name(&ctlr->dev));
1300 if (IS_ERR(ctlr->kworker_task)) {
1301 dev_err(&ctlr->dev, "failed to create message pump task\n");
1302 return PTR_ERR(ctlr->kworker_task);
1303 }
1304 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1305
1306
1307
1308
1309
1310
1311
1312
1313 if (ctlr->rt) {
1314 dev_info(&ctlr->dev,
1315 "will run message pump with realtime priority\n");
1316 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1317 }
1318
1319 return 0;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1333{
1334 struct spi_message *next;
1335 unsigned long flags;
1336
1337
1338 spin_lock_irqsave(&ctlr->queue_lock, flags);
1339 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1340 queue);
1341 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1342
1343 return next;
1344}
1345EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1346
1347
1348
1349
1350
1351
1352
1353
1354void spi_finalize_current_message(struct spi_controller *ctlr)
1355{
1356 struct spi_message *mesg;
1357 unsigned long flags;
1358 int ret;
1359
1360 spin_lock_irqsave(&ctlr->queue_lock, flags);
1361 mesg = ctlr->cur_msg;
1362 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1363
1364 spi_unmap_msg(ctlr, mesg);
1365
1366 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1367 ret = ctlr->unprepare_message(ctlr, mesg);
1368 if (ret) {
1369 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1370 ret);
1371 }
1372 }
1373
1374 spin_lock_irqsave(&ctlr->queue_lock, flags);
1375 ctlr->cur_msg = NULL;
1376 ctlr->cur_msg_prepared = false;
1377 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1378 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1379
1380 trace_spi_message_done(mesg);
1381
1382 mesg->state = NULL;
1383 if (mesg->complete)
1384 mesg->complete(mesg->context);
1385}
1386EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1387
1388static int spi_start_queue(struct spi_controller *ctlr)
1389{
1390 unsigned long flags;
1391
1392 spin_lock_irqsave(&ctlr->queue_lock, flags);
1393
1394 if (ctlr->running || ctlr->busy) {
1395 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1396 return -EBUSY;
1397 }
1398
1399 ctlr->running = true;
1400 ctlr->cur_msg = NULL;
1401 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1402
1403 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1404
1405 return 0;
1406}
1407
1408static int spi_stop_queue(struct spi_controller *ctlr)
1409{
1410 unsigned long flags;
1411 unsigned limit = 500;
1412 int ret = 0;
1413
1414 spin_lock_irqsave(&ctlr->queue_lock, flags);
1415
1416
1417
1418
1419
1420
1421
1422 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1423 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1424 usleep_range(10000, 11000);
1425 spin_lock_irqsave(&ctlr->queue_lock, flags);
1426 }
1427
1428 if (!list_empty(&ctlr->queue) || ctlr->busy)
1429 ret = -EBUSY;
1430 else
1431 ctlr->running = false;
1432
1433 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1434
1435 if (ret) {
1436 dev_warn(&ctlr->dev, "could not stop message queue\n");
1437 return ret;
1438 }
1439 return ret;
1440}
1441
1442static int spi_destroy_queue(struct spi_controller *ctlr)
1443{
1444 int ret;
1445
1446 ret = spi_stop_queue(ctlr);
1447
1448
1449
1450
1451
1452
1453
1454 if (ret) {
1455 dev_err(&ctlr->dev, "problem destroying queue\n");
1456 return ret;
1457 }
1458
1459 kthread_flush_worker(&ctlr->kworker);
1460 kthread_stop(ctlr->kworker_task);
1461
1462 return 0;
1463}
1464
1465static int __spi_queued_transfer(struct spi_device *spi,
1466 struct spi_message *msg,
1467 bool need_pump)
1468{
1469 struct spi_controller *ctlr = spi->controller;
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(&ctlr->queue_lock, flags);
1473
1474 if (!ctlr->running) {
1475 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1476 return -ESHUTDOWN;
1477 }
1478 msg->actual_length = 0;
1479 msg->status = -EINPROGRESS;
1480
1481 list_add_tail(&msg->queue, &ctlr->queue);
1482 if (!ctlr->busy && need_pump)
1483 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1484
1485 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1486 return 0;
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1497{
1498 return __spi_queued_transfer(spi, msg, true);
1499}
1500
1501static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1502{
1503 int ret;
1504
1505 ctlr->transfer = spi_queued_transfer;
1506 if (!ctlr->transfer_one_message)
1507 ctlr->transfer_one_message = spi_transfer_one_message;
1508
1509
1510 ret = spi_init_queue(ctlr);
1511 if (ret) {
1512 dev_err(&ctlr->dev, "problem initializing queue\n");
1513 goto err_init_queue;
1514 }
1515 ctlr->queued = true;
1516 ret = spi_start_queue(ctlr);
1517 if (ret) {
1518 dev_err(&ctlr->dev, "problem starting queue\n");
1519 goto err_start_queue;
1520 }
1521
1522 return 0;
1523
1524err_start_queue:
1525 spi_destroy_queue(ctlr);
1526err_init_queue:
1527 return ret;
1528}
1529
1530
1531
1532#if defined(CONFIG_OF)
1533static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1534 struct device_node *nc)
1535{
1536 u32 value;
1537 int rc;
1538
1539
1540 if (of_property_read_bool(nc, "spi-cpha"))
1541 spi->mode |= SPI_CPHA;
1542 if (of_property_read_bool(nc, "spi-cpol"))
1543 spi->mode |= SPI_CPOL;
1544 if (of_property_read_bool(nc, "spi-cs-high"))
1545 spi->mode |= SPI_CS_HIGH;
1546 if (of_property_read_bool(nc, "spi-3wire"))
1547 spi->mode |= SPI_3WIRE;
1548 if (of_property_read_bool(nc, "spi-lsb-first"))
1549 spi->mode |= SPI_LSB_FIRST;
1550
1551
1552 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1553 switch (value) {
1554 case 1:
1555 break;
1556 case 2:
1557 spi->mode |= SPI_TX_DUAL;
1558 break;
1559 case 4:
1560 spi->mode |= SPI_TX_QUAD;
1561 break;
1562 default:
1563 dev_warn(&ctlr->dev,
1564 "spi-tx-bus-width %d not supported\n",
1565 value);
1566 break;
1567 }
1568 }
1569
1570 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1571 switch (value) {
1572 case 1:
1573 break;
1574 case 2:
1575 spi->mode |= SPI_RX_DUAL;
1576 break;
1577 case 4:
1578 spi->mode |= SPI_RX_QUAD;
1579 break;
1580 default:
1581 dev_warn(&ctlr->dev,
1582 "spi-rx-bus-width %d not supported\n",
1583 value);
1584 break;
1585 }
1586 }
1587
1588 if (spi_controller_is_slave(ctlr)) {
1589 if (strcmp(nc->name, "slave")) {
1590 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1591 nc);
1592 return -EINVAL;
1593 }
1594 return 0;
1595 }
1596
1597
1598 rc = of_property_read_u32(nc, "reg", &value);
1599 if (rc) {
1600 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1601 nc, rc);
1602 return rc;
1603 }
1604 spi->chip_select = value;
1605
1606
1607 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1608 if (rc) {
1609 dev_err(&ctlr->dev,
1610 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1611 return rc;
1612 }
1613 spi->max_speed_hz = value;
1614
1615 return 0;
1616}
1617
1618static struct spi_device *
1619of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1620{
1621 struct spi_device *spi;
1622 int rc;
1623
1624
1625 spi = spi_alloc_device(ctlr);
1626 if (!spi) {
1627 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1628 rc = -ENOMEM;
1629 goto err_out;
1630 }
1631
1632
1633 rc = of_modalias_node(nc, spi->modalias,
1634 sizeof(spi->modalias));
1635 if (rc < 0) {
1636 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1637 goto err_out;
1638 }
1639
1640 rc = of_spi_parse_dt(ctlr, spi, nc);
1641 if (rc)
1642 goto err_out;
1643
1644
1645 of_node_get(nc);
1646 spi->dev.of_node = nc;
1647
1648
1649 rc = spi_add_device(spi);
1650 if (rc) {
1651 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1652 goto err_of_node_put;
1653 }
1654
1655 return spi;
1656
1657err_of_node_put:
1658 of_node_put(nc);
1659err_out:
1660 spi_dev_put(spi);
1661 return ERR_PTR(rc);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671static void of_register_spi_devices(struct spi_controller *ctlr)
1672{
1673 struct spi_device *spi;
1674 struct device_node *nc;
1675
1676 if (!ctlr->dev.of_node)
1677 return;
1678
1679 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1680 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1681 continue;
1682 spi = of_register_spi_device(ctlr, nc);
1683 if (IS_ERR(spi)) {
1684 dev_warn(&ctlr->dev,
1685 "Failed to create SPI device for %pOF\n", nc);
1686 of_node_clear_flag(nc, OF_POPULATED);
1687 }
1688 }
1689}
1690#else
1691static void of_register_spi_devices(struct spi_controller *ctlr) { }
1692#endif
1693
1694#ifdef CONFIG_ACPI
1695static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1696{
1697 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1698 const union acpi_object *obj;
1699
1700 if (!x86_apple_machine)
1701 return;
1702
1703 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1704 && obj->buffer.length >= 4)
1705 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1706
1707 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1708 && obj->buffer.length == 8)
1709 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1710
1711 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1712 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1713 spi->mode |= SPI_LSB_FIRST;
1714
1715 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1716 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1717 spi->mode |= SPI_CPOL;
1718
1719 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1720 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1721 spi->mode |= SPI_CPHA;
1722}
1723
1724static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1725{
1726 struct spi_device *spi = data;
1727 struct spi_controller *ctlr = spi->controller;
1728
1729 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1730 struct acpi_resource_spi_serialbus *sb;
1731
1732 sb = &ares->data.spi_serial_bus;
1733 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1734
1735
1736
1737
1738
1739
1740
1741 if (ctlr->fw_translate_cs) {
1742 int cs = ctlr->fw_translate_cs(ctlr,
1743 sb->device_selection);
1744 if (cs < 0)
1745 return cs;
1746 spi->chip_select = cs;
1747 } else {
1748 spi->chip_select = sb->device_selection;
1749 }
1750
1751 spi->max_speed_hz = sb->connection_speed;
1752
1753 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1754 spi->mode |= SPI_CPHA;
1755 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1756 spi->mode |= SPI_CPOL;
1757 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1758 spi->mode |= SPI_CS_HIGH;
1759 }
1760 } else if (spi->irq < 0) {
1761 struct resource r;
1762
1763 if (acpi_dev_resource_interrupt(ares, 0, &r))
1764 spi->irq = r.start;
1765 }
1766
1767
1768 return 1;
1769}
1770
1771static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1772 struct acpi_device *adev)
1773{
1774 struct list_head resource_list;
1775 struct spi_device *spi;
1776 int ret;
1777
1778 if (acpi_bus_get_status(adev) || !adev->status.present ||
1779 acpi_device_enumerated(adev))
1780 return AE_OK;
1781
1782 spi = spi_alloc_device(ctlr);
1783 if (!spi) {
1784 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1785 dev_name(&adev->dev));
1786 return AE_NO_MEMORY;
1787 }
1788
1789 ACPI_COMPANION_SET(&spi->dev, adev);
1790 spi->irq = -1;
1791
1792 INIT_LIST_HEAD(&resource_list);
1793 ret = acpi_dev_get_resources(adev, &resource_list,
1794 acpi_spi_add_resource, spi);
1795 acpi_dev_free_resource_list(&resource_list);
1796
1797 acpi_spi_parse_apple_properties(spi);
1798
1799 if (ret < 0 || !spi->max_speed_hz) {
1800 spi_dev_put(spi);
1801 return AE_OK;
1802 }
1803
1804 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1805 sizeof(spi->modalias));
1806
1807 if (spi->irq < 0)
1808 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1809
1810 acpi_device_set_enumerated(adev);
1811
1812 adev->power.flags.ignore_parent = true;
1813 if (spi_add_device(spi)) {
1814 adev->power.flags.ignore_parent = false;
1815 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1816 dev_name(&adev->dev));
1817 spi_dev_put(spi);
1818 }
1819
1820 return AE_OK;
1821}
1822
1823static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1824 void *data, void **return_value)
1825{
1826 struct spi_controller *ctlr = data;
1827 struct acpi_device *adev;
1828
1829 if (acpi_bus_get_device(handle, &adev))
1830 return AE_OK;
1831
1832 return acpi_register_spi_device(ctlr, adev);
1833}
1834
1835static void acpi_register_spi_devices(struct spi_controller *ctlr)
1836{
1837 acpi_status status;
1838 acpi_handle handle;
1839
1840 handle = ACPI_HANDLE(ctlr->dev.parent);
1841 if (!handle)
1842 return;
1843
1844 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1845 acpi_spi_add_device, NULL, ctlr, NULL);
1846 if (ACPI_FAILURE(status))
1847 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1848}
1849#else
1850static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1851#endif
1852
1853static void spi_controller_release(struct device *dev)
1854{
1855 struct spi_controller *ctlr;
1856
1857 ctlr = container_of(dev, struct spi_controller, dev);
1858 kfree(ctlr);
1859}
1860
1861static struct class spi_master_class = {
1862 .name = "spi_master",
1863 .owner = THIS_MODULE,
1864 .dev_release = spi_controller_release,
1865 .dev_groups = spi_master_groups,
1866};
1867
1868#ifdef CONFIG_SPI_SLAVE
1869
1870
1871
1872
1873
1874int spi_slave_abort(struct spi_device *spi)
1875{
1876 struct spi_controller *ctlr = spi->controller;
1877
1878 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1879 return ctlr->slave_abort(ctlr);
1880
1881 return -ENOTSUPP;
1882}
1883EXPORT_SYMBOL_GPL(spi_slave_abort);
1884
1885static int match_true(struct device *dev, void *data)
1886{
1887 return 1;
1888}
1889
1890static ssize_t spi_slave_show(struct device *dev,
1891 struct device_attribute *attr, char *buf)
1892{
1893 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1894 dev);
1895 struct device *child;
1896
1897 child = device_find_child(&ctlr->dev, NULL, match_true);
1898 return sprintf(buf, "%s\n",
1899 child ? to_spi_device(child)->modalias : NULL);
1900}
1901
1902static ssize_t spi_slave_store(struct device *dev,
1903 struct device_attribute *attr, const char *buf,
1904 size_t count)
1905{
1906 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1907 dev);
1908 struct spi_device *spi;
1909 struct device *child;
1910 char name[32];
1911 int rc;
1912
1913 rc = sscanf(buf, "%31s", name);
1914 if (rc != 1 || !name[0])
1915 return -EINVAL;
1916
1917 child = device_find_child(&ctlr->dev, NULL, match_true);
1918 if (child) {
1919
1920 device_unregister(child);
1921 put_device(child);
1922 }
1923
1924 if (strcmp(name, "(null)")) {
1925
1926 spi = spi_alloc_device(ctlr);
1927 if (!spi)
1928 return -ENOMEM;
1929
1930 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1931
1932 rc = spi_add_device(spi);
1933 if (rc) {
1934 spi_dev_put(spi);
1935 return rc;
1936 }
1937 }
1938
1939 return count;
1940}
1941
1942static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1943
1944static struct attribute *spi_slave_attrs[] = {
1945 &dev_attr_slave.attr,
1946 NULL,
1947};
1948
1949static const struct attribute_group spi_slave_group = {
1950 .attrs = spi_slave_attrs,
1951};
1952
1953static const struct attribute_group *spi_slave_groups[] = {
1954 &spi_controller_statistics_group,
1955 &spi_slave_group,
1956 NULL,
1957};
1958
1959static struct class spi_slave_class = {
1960 .name = "spi_slave",
1961 .owner = THIS_MODULE,
1962 .dev_release = spi_controller_release,
1963 .dev_groups = spi_slave_groups,
1964};
1965#else
1966extern struct class spi_slave_class;
1967#endif
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992struct spi_controller *__spi_alloc_controller(struct device *dev,
1993 unsigned int size, bool slave)
1994{
1995 struct spi_controller *ctlr;
1996
1997 if (!dev)
1998 return NULL;
1999
2000 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2001 if (!ctlr)
2002 return NULL;
2003
2004 device_initialize(&ctlr->dev);
2005 ctlr->bus_num = -1;
2006 ctlr->num_chipselect = 1;
2007 ctlr->slave = slave;
2008 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2009 ctlr->dev.class = &spi_slave_class;
2010 else
2011 ctlr->dev.class = &spi_master_class;
2012 ctlr->dev.parent = dev;
2013 pm_suspend_ignore_children(&ctlr->dev, true);
2014 spi_controller_set_devdata(ctlr, &ctlr[1]);
2015
2016 return ctlr;
2017}
2018EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2019
2020#ifdef CONFIG_OF
2021static int of_spi_register_master(struct spi_controller *ctlr)
2022{
2023 int nb, i, *cs;
2024 struct device_node *np = ctlr->dev.of_node;
2025
2026 if (!np)
2027 return 0;
2028
2029 nb = of_gpio_named_count(np, "cs-gpios");
2030 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2031
2032
2033 if (nb == 0 || nb == -ENOENT)
2034 return 0;
2035 else if (nb < 0)
2036 return nb;
2037
2038 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
2039 GFP_KERNEL);
2040 ctlr->cs_gpios = cs;
2041
2042 if (!ctlr->cs_gpios)
2043 return -ENOMEM;
2044
2045 for (i = 0; i < ctlr->num_chipselect; i++)
2046 cs[i] = -ENOENT;
2047
2048 for (i = 0; i < nb; i++)
2049 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2050
2051 return 0;
2052}
2053#else
2054static int of_spi_register_master(struct spi_controller *ctlr)
2055{
2056 return 0;
2057}
2058#endif
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083int spi_register_controller(struct spi_controller *ctlr)
2084{
2085 struct device *dev = ctlr->dev.parent;
2086 struct boardinfo *bi;
2087 int status = -ENODEV;
2088 int id, first_dynamic;
2089
2090 if (!dev)
2091 return -ENODEV;
2092
2093 if (!spi_controller_is_slave(ctlr)) {
2094 status = of_spi_register_master(ctlr);
2095 if (status)
2096 return status;
2097 }
2098
2099
2100
2101
2102 if (ctlr->num_chipselect == 0)
2103 return -EINVAL;
2104
2105 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
2106 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2107 if (id >= 0) {
2108 ctlr->bus_num = id;
2109 mutex_lock(&board_lock);
2110 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2111 ctlr->bus_num + 1, GFP_KERNEL);
2112 mutex_unlock(&board_lock);
2113 if (WARN(id < 0, "couldn't get idr"))
2114 return id == -ENOSPC ? -EBUSY : id;
2115 }
2116 }
2117 if (ctlr->bus_num < 0) {
2118 first_dynamic = of_alias_get_highest_id("spi");
2119 if (first_dynamic < 0)
2120 first_dynamic = 0;
2121 else
2122 first_dynamic++;
2123
2124 mutex_lock(&board_lock);
2125 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2126 0, GFP_KERNEL);
2127 mutex_unlock(&board_lock);
2128 if (WARN(id < 0, "couldn't get idr"))
2129 return id;
2130 ctlr->bus_num = id;
2131 }
2132 INIT_LIST_HEAD(&ctlr->queue);
2133 spin_lock_init(&ctlr->queue_lock);
2134 spin_lock_init(&ctlr->bus_lock_spinlock);
2135 mutex_init(&ctlr->bus_lock_mutex);
2136 mutex_init(&ctlr->io_mutex);
2137 ctlr->bus_lock_flag = 0;
2138 init_completion(&ctlr->xfer_completion);
2139 if (!ctlr->max_dma_len)
2140 ctlr->max_dma_len = INT_MAX;
2141
2142
2143
2144
2145 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2146 status = device_add(&ctlr->dev);
2147 if (status < 0) {
2148
2149 mutex_lock(&board_lock);
2150 idr_remove(&spi_master_idr, ctlr->bus_num);
2151 mutex_unlock(&board_lock);
2152 goto done;
2153 }
2154 dev_dbg(dev, "registered %s %s\n",
2155 spi_controller_is_slave(ctlr) ? "slave" : "master",
2156 dev_name(&ctlr->dev));
2157
2158
2159 if (ctlr->transfer)
2160 dev_info(dev, "controller is unqueued, this is deprecated\n");
2161 else {
2162 status = spi_controller_initialize_queue(ctlr);
2163 if (status) {
2164 device_del(&ctlr->dev);
2165
2166 mutex_lock(&board_lock);
2167 idr_remove(&spi_master_idr, ctlr->bus_num);
2168 mutex_unlock(&board_lock);
2169 goto done;
2170 }
2171 }
2172
2173 spin_lock_init(&ctlr->statistics.lock);
2174
2175 mutex_lock(&board_lock);
2176 list_add_tail(&ctlr->list, &spi_controller_list);
2177 list_for_each_entry(bi, &board_list, list)
2178 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2179 mutex_unlock(&board_lock);
2180
2181
2182 of_register_spi_devices(ctlr);
2183 acpi_register_spi_devices(ctlr);
2184done:
2185 return status;
2186}
2187EXPORT_SYMBOL_GPL(spi_register_controller);
2188
2189static void devm_spi_unregister(struct device *dev, void *res)
2190{
2191 spi_unregister_controller(*(struct spi_controller **)res);
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207int devm_spi_register_controller(struct device *dev,
2208 struct spi_controller *ctlr)
2209{
2210 struct spi_controller **ptr;
2211 int ret;
2212
2213 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2214 if (!ptr)
2215 return -ENOMEM;
2216
2217 ret = spi_register_controller(ctlr);
2218 if (!ret) {
2219 *ptr = ctlr;
2220 devres_add(dev, ptr);
2221 } else {
2222 devres_free(ptr);
2223 }
2224
2225 return ret;
2226}
2227EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2228
2229static int __unregister(struct device *dev, void *null)
2230{
2231 spi_unregister_device(to_spi_device(dev));
2232 return 0;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247void spi_unregister_controller(struct spi_controller *ctlr)
2248{
2249 struct spi_controller *found;
2250 int id = ctlr->bus_num;
2251 int dummy;
2252
2253
2254 mutex_lock(&board_lock);
2255 found = idr_find(&spi_master_idr, id);
2256 mutex_unlock(&board_lock);
2257 if (found != ctlr) {
2258 dev_dbg(&ctlr->dev,
2259 "attempting to delete unregistered controller [%s]\n",
2260 dev_name(&ctlr->dev));
2261 return;
2262 }
2263 if (ctlr->queued) {
2264 if (spi_destroy_queue(ctlr))
2265 dev_err(&ctlr->dev, "queue remove failed\n");
2266 }
2267 mutex_lock(&board_lock);
2268 list_del(&ctlr->list);
2269 mutex_unlock(&board_lock);
2270
2271 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2272 device_unregister(&ctlr->dev);
2273
2274 mutex_lock(&board_lock);
2275 idr_remove(&spi_master_idr, id);
2276 mutex_unlock(&board_lock);
2277}
2278EXPORT_SYMBOL_GPL(spi_unregister_controller);
2279
2280int spi_controller_suspend(struct spi_controller *ctlr)
2281{
2282 int ret;
2283
2284
2285 if (!ctlr->queued)
2286 return 0;
2287
2288 ret = spi_stop_queue(ctlr);
2289 if (ret)
2290 dev_err(&ctlr->dev, "queue stop failed\n");
2291
2292 return ret;
2293}
2294EXPORT_SYMBOL_GPL(spi_controller_suspend);
2295
2296int spi_controller_resume(struct spi_controller *ctlr)
2297{
2298 int ret;
2299
2300 if (!ctlr->queued)
2301 return 0;
2302
2303 ret = spi_start_queue(ctlr);
2304 if (ret)
2305 dev_err(&ctlr->dev, "queue restart failed\n");
2306
2307 return ret;
2308}
2309EXPORT_SYMBOL_GPL(spi_controller_resume);
2310
2311static int __spi_controller_match(struct device *dev, const void *data)
2312{
2313 struct spi_controller *ctlr;
2314 const u16 *bus_num = data;
2315
2316 ctlr = container_of(dev, struct spi_controller, dev);
2317 return ctlr->bus_num == *bus_num;
2318}
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332struct spi_controller *spi_busnum_to_master(u16 bus_num)
2333{
2334 struct device *dev;
2335 struct spi_controller *ctlr = NULL;
2336
2337 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2338 __spi_controller_match);
2339 if (dev)
2340 ctlr = container_of(dev, struct spi_controller, dev);
2341
2342 return ctlr;
2343}
2344EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364void *spi_res_alloc(struct spi_device *spi,
2365 spi_res_release_t release,
2366 size_t size, gfp_t gfp)
2367{
2368 struct spi_res *sres;
2369
2370 sres = kzalloc(sizeof(*sres) + size, gfp);
2371 if (!sres)
2372 return NULL;
2373
2374 INIT_LIST_HEAD(&sres->entry);
2375 sres->release = release;
2376
2377 return sres->data;
2378}
2379EXPORT_SYMBOL_GPL(spi_res_alloc);
2380
2381
2382
2383
2384
2385
2386void spi_res_free(void *res)
2387{
2388 struct spi_res *sres = container_of(res, struct spi_res, data);
2389
2390 if (!res)
2391 return;
2392
2393 WARN_ON(!list_empty(&sres->entry));
2394 kfree(sres);
2395}
2396EXPORT_SYMBOL_GPL(spi_res_free);
2397
2398
2399
2400
2401
2402
2403void spi_res_add(struct spi_message *message, void *res)
2404{
2405 struct spi_res *sres = container_of(res, struct spi_res, data);
2406
2407 WARN_ON(!list_empty(&sres->entry));
2408 list_add_tail(&sres->entry, &message->resources);
2409}
2410EXPORT_SYMBOL_GPL(spi_res_add);
2411
2412
2413
2414
2415
2416
2417void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2418{
2419 struct spi_res *res;
2420
2421 while (!list_empty(&message->resources)) {
2422 res = list_last_entry(&message->resources,
2423 struct spi_res, entry);
2424
2425 if (res->release)
2426 res->release(ctlr, message, res->data);
2427
2428 list_del(&res->entry);
2429
2430 kfree(res);
2431 }
2432}
2433EXPORT_SYMBOL_GPL(spi_res_release);
2434
2435
2436
2437
2438
2439static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2440 struct spi_message *msg,
2441 void *res)
2442{
2443 struct spi_replaced_transfers *rxfer = res;
2444 size_t i;
2445
2446
2447 if (rxfer->release)
2448 rxfer->release(ctlr, msg, res);
2449
2450
2451 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2452
2453
2454 for (i = 0; i < rxfer->inserted; i++)
2455 list_del(&rxfer->inserted_transfers[i].transfer_list);
2456}
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473struct spi_replaced_transfers *spi_replace_transfers(
2474 struct spi_message *msg,
2475 struct spi_transfer *xfer_first,
2476 size_t remove,
2477 size_t insert,
2478 spi_replaced_release_t release,
2479 size_t extradatasize,
2480 gfp_t gfp)
2481{
2482 struct spi_replaced_transfers *rxfer;
2483 struct spi_transfer *xfer;
2484 size_t i;
2485
2486
2487 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2488 insert * sizeof(struct spi_transfer)
2489 + sizeof(struct spi_replaced_transfers)
2490 + extradatasize,
2491 gfp);
2492 if (!rxfer)
2493 return ERR_PTR(-ENOMEM);
2494
2495
2496 rxfer->release = release;
2497
2498
2499 if (extradatasize)
2500 rxfer->extradata =
2501 &rxfer->inserted_transfers[insert];
2502
2503
2504 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2505
2506
2507
2508
2509 rxfer->replaced_after = xfer_first->transfer_list.prev;
2510
2511
2512 for (i = 0; i < remove; i++) {
2513
2514
2515
2516
2517 if (rxfer->replaced_after->next == &msg->transfers) {
2518 dev_err(&msg->spi->dev,
2519 "requested to remove more spi_transfers than are available\n");
2520
2521 list_splice(&rxfer->replaced_transfers,
2522 rxfer->replaced_after);
2523
2524
2525 spi_res_free(rxfer);
2526
2527
2528 return ERR_PTR(-EINVAL);
2529 }
2530
2531
2532
2533
2534 list_move_tail(rxfer->replaced_after->next,
2535 &rxfer->replaced_transfers);
2536 }
2537
2538
2539
2540
2541 for (i = 0; i < insert; i++) {
2542
2543 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2544
2545
2546 memcpy(xfer, xfer_first, sizeof(*xfer));
2547
2548
2549 list_add(&xfer->transfer_list, rxfer->replaced_after);
2550
2551
2552 if (i) {
2553 xfer->cs_change = false;
2554 xfer->delay_usecs = 0;
2555 }
2556 }
2557
2558
2559 rxfer->inserted = insert;
2560
2561
2562 spi_res_add(msg, rxfer);
2563
2564 return rxfer;
2565}
2566EXPORT_SYMBOL_GPL(spi_replace_transfers);
2567
2568static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2569 struct spi_message *msg,
2570 struct spi_transfer **xferp,
2571 size_t maxsize,
2572 gfp_t gfp)
2573{
2574 struct spi_transfer *xfer = *xferp, *xfers;
2575 struct spi_replaced_transfers *srt;
2576 size_t offset;
2577 size_t count, i;
2578
2579
2580 dev_warn_once(&msg->spi->dev,
2581 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2582 xfer->len, maxsize);
2583
2584
2585 count = DIV_ROUND_UP(xfer->len, maxsize);
2586
2587
2588 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2589 if (IS_ERR(srt))
2590 return PTR_ERR(srt);
2591 xfers = srt->inserted_transfers;
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2607
2608
2609 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2610
2611 if (xfers[i].rx_buf)
2612 xfers[i].rx_buf += offset;
2613 if (xfers[i].rx_dma)
2614 xfers[i].rx_dma += offset;
2615 if (xfers[i].tx_buf)
2616 xfers[i].tx_buf += offset;
2617 if (xfers[i].tx_dma)
2618 xfers[i].tx_dma += offset;
2619
2620
2621 xfers[i].len = min(maxsize, xfers[i].len - offset);
2622 }
2623
2624
2625
2626
2627 *xferp = &xfers[count - 1];
2628
2629
2630 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2631 transfers_split_maxsize);
2632 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2633 transfers_split_maxsize);
2634
2635 return 0;
2636}
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2650 struct spi_message *msg,
2651 size_t maxsize,
2652 gfp_t gfp)
2653{
2654 struct spi_transfer *xfer;
2655 int ret;
2656
2657
2658
2659
2660
2661
2662
2663 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2664 if (xfer->len > maxsize) {
2665 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2666 maxsize, gfp);
2667 if (ret)
2668 return ret;
2669 }
2670 }
2671
2672 return 0;
2673}
2674EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2675
2676
2677
2678
2679
2680
2681
2682static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2683 u8 bits_per_word)
2684{
2685 if (ctlr->bits_per_word_mask) {
2686
2687 if (bits_per_word > 32)
2688 return -EINVAL;
2689 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2690 return -EINVAL;
2691 }
2692
2693 return 0;
2694}
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716int spi_setup(struct spi_device *spi)
2717{
2718 unsigned bad_bits, ugly_bits;
2719 int status;
2720
2721
2722
2723 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2724 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2725 dev_err(&spi->dev,
2726 "setup: can not select dual and quad at the same time\n");
2727 return -EINVAL;
2728 }
2729
2730
2731 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2732 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2733 return -EINVAL;
2734
2735
2736
2737 bad_bits = spi->mode & ~spi->controller->mode_bits;
2738 ugly_bits = bad_bits &
2739 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2740 if (ugly_bits) {
2741 dev_warn(&spi->dev,
2742 "setup: ignoring unsupported mode bits %x\n",
2743 ugly_bits);
2744 spi->mode &= ~ugly_bits;
2745 bad_bits &= ~ugly_bits;
2746 }
2747 if (bad_bits) {
2748 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2749 bad_bits);
2750 return -EINVAL;
2751 }
2752
2753 if (!spi->bits_per_word)
2754 spi->bits_per_word = 8;
2755
2756 status = __spi_validate_bits_per_word(spi->controller,
2757 spi->bits_per_word);
2758 if (status)
2759 return status;
2760
2761 if (!spi->max_speed_hz)
2762 spi->max_speed_hz = spi->controller->max_speed_hz;
2763
2764 if (spi->controller->setup)
2765 status = spi->controller->setup(spi);
2766
2767 spi_set_cs(spi, false);
2768
2769 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2770 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2771 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2772 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2773 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2774 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2775 spi->bits_per_word, spi->max_speed_hz,
2776 status);
2777
2778 return status;
2779}
2780EXPORT_SYMBOL_GPL(spi_setup);
2781
2782static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2783{
2784 struct spi_controller *ctlr = spi->controller;
2785 struct spi_transfer *xfer;
2786 int w_size;
2787
2788 if (list_empty(&message->transfers))
2789 return -EINVAL;
2790
2791
2792
2793
2794
2795
2796 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2797 (spi->mode & SPI_3WIRE)) {
2798 unsigned flags = ctlr->flags;
2799
2800 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2801 if (xfer->rx_buf && xfer->tx_buf)
2802 return -EINVAL;
2803 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2804 return -EINVAL;
2805 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2806 return -EINVAL;
2807 }
2808 }
2809
2810
2811
2812
2813
2814
2815
2816 message->frame_length = 0;
2817 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2818 message->frame_length += xfer->len;
2819 if (!xfer->bits_per_word)
2820 xfer->bits_per_word = spi->bits_per_word;
2821
2822 if (!xfer->speed_hz)
2823 xfer->speed_hz = spi->max_speed_hz;
2824 if (!xfer->speed_hz)
2825 xfer->speed_hz = ctlr->max_speed_hz;
2826
2827 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2828 xfer->speed_hz = ctlr->max_speed_hz;
2829
2830 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2831 return -EINVAL;
2832
2833
2834
2835
2836
2837 if (xfer->bits_per_word <= 8)
2838 w_size = 1;
2839 else if (xfer->bits_per_word <= 16)
2840 w_size = 2;
2841 else
2842 w_size = 4;
2843
2844
2845 if (xfer->len % w_size)
2846 return -EINVAL;
2847
2848 if (xfer->speed_hz && ctlr->min_speed_hz &&
2849 xfer->speed_hz < ctlr->min_speed_hz)
2850 return -EINVAL;
2851
2852 if (xfer->tx_buf && !xfer->tx_nbits)
2853 xfer->tx_nbits = SPI_NBITS_SINGLE;
2854 if (xfer->rx_buf && !xfer->rx_nbits)
2855 xfer->rx_nbits = SPI_NBITS_SINGLE;
2856
2857
2858
2859
2860 if (xfer->tx_buf) {
2861 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2862 xfer->tx_nbits != SPI_NBITS_DUAL &&
2863 xfer->tx_nbits != SPI_NBITS_QUAD)
2864 return -EINVAL;
2865 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2866 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2867 return -EINVAL;
2868 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2869 !(spi->mode & SPI_TX_QUAD))
2870 return -EINVAL;
2871 }
2872
2873 if (xfer->rx_buf) {
2874 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2875 xfer->rx_nbits != SPI_NBITS_DUAL &&
2876 xfer->rx_nbits != SPI_NBITS_QUAD)
2877 return -EINVAL;
2878 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2879 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2880 return -EINVAL;
2881 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2882 !(spi->mode & SPI_RX_QUAD))
2883 return -EINVAL;
2884 }
2885 }
2886
2887 message->status = -EINPROGRESS;
2888
2889 return 0;
2890}
2891
2892static int __spi_async(struct spi_device *spi, struct spi_message *message)
2893{
2894 struct spi_controller *ctlr = spi->controller;
2895
2896 message->spi = spi;
2897
2898 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2899 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2900
2901 trace_spi_message_submit(message);
2902
2903 return ctlr->transfer(spi, message);
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937int spi_async(struct spi_device *spi, struct spi_message *message)
2938{
2939 struct spi_controller *ctlr = spi->controller;
2940 int ret;
2941 unsigned long flags;
2942
2943 ret = __spi_validate(spi, message);
2944 if (ret != 0)
2945 return ret;
2946
2947 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2948
2949 if (ctlr->bus_lock_flag)
2950 ret = -EBUSY;
2951 else
2952 ret = __spi_async(spi, message);
2953
2954 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2955
2956 return ret;
2957}
2958EXPORT_SYMBOL_GPL(spi_async);
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2992{
2993 struct spi_controller *ctlr = spi->controller;
2994 int ret;
2995 unsigned long flags;
2996
2997 ret = __spi_validate(spi, message);
2998 if (ret != 0)
2999 return ret;
3000
3001 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3002
3003 ret = __spi_async(spi, message);
3004
3005 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3006
3007 return ret;
3008
3009}
3010EXPORT_SYMBOL_GPL(spi_async_locked);
3011
3012
3013int spi_flash_read(struct spi_device *spi,
3014 struct spi_flash_read_message *msg)
3015
3016{
3017 struct spi_controller *master = spi->controller;
3018 struct device *rx_dev = NULL;
3019 int ret;
3020
3021 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
3022 msg->addr_nbits == SPI_NBITS_DUAL) &&
3023 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3024 return -EINVAL;
3025 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
3026 msg->addr_nbits == SPI_NBITS_QUAD) &&
3027 !(spi->mode & SPI_TX_QUAD))
3028 return -EINVAL;
3029 if (msg->data_nbits == SPI_NBITS_DUAL &&
3030 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3031 return -EINVAL;
3032 if (msg->data_nbits == SPI_NBITS_QUAD &&
3033 !(spi->mode & SPI_RX_QUAD))
3034 return -EINVAL;
3035
3036 if (master->auto_runtime_pm) {
3037 ret = pm_runtime_get_sync(master->dev.parent);
3038 if (ret < 0) {
3039 dev_err(&master->dev, "Failed to power device: %d\n",
3040 ret);
3041 return ret;
3042 }
3043 }
3044
3045 mutex_lock(&master->bus_lock_mutex);
3046 mutex_lock(&master->io_mutex);
3047 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
3048 rx_dev = master->dma_rx->device->dev;
3049 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
3050 msg->buf, msg->len,
3051 DMA_FROM_DEVICE);
3052 if (!ret)
3053 msg->cur_msg_mapped = true;
3054 }
3055 ret = master->spi_flash_read(spi, msg);
3056 if (msg->cur_msg_mapped)
3057 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
3058 DMA_FROM_DEVICE);
3059 mutex_unlock(&master->io_mutex);
3060 mutex_unlock(&master->bus_lock_mutex);
3061
3062 if (master->auto_runtime_pm)
3063 pm_runtime_put(master->dev.parent);
3064
3065 return ret;
3066}
3067EXPORT_SYMBOL_GPL(spi_flash_read);
3068
3069
3070
3071
3072
3073
3074
3075
3076static void spi_complete(void *arg)
3077{
3078 complete(arg);
3079}
3080
3081static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3082{
3083 DECLARE_COMPLETION_ONSTACK(done);
3084 int status;
3085 struct spi_controller *ctlr = spi->controller;
3086 unsigned long flags;
3087
3088 status = __spi_validate(spi, message);
3089 if (status != 0)
3090 return status;
3091
3092 message->complete = spi_complete;
3093 message->context = &done;
3094 message->spi = spi;
3095
3096 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3097 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3098
3099
3100
3101
3102
3103
3104 if (ctlr->transfer == spi_queued_transfer) {
3105 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3106
3107 trace_spi_message_submit(message);
3108
3109 status = __spi_queued_transfer(spi, message, false);
3110
3111 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3112 } else {
3113 status = spi_async_locked(spi, message);
3114 }
3115
3116 if (status == 0) {
3117
3118
3119
3120 if (ctlr->transfer == spi_queued_transfer) {
3121 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3122 spi_sync_immediate);
3123 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3124 spi_sync_immediate);
3125 __spi_pump_messages(ctlr, false);
3126 }
3127
3128 wait_for_completion(&done);
3129 status = message->status;
3130 }
3131 message->context = NULL;
3132 return status;
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156int spi_sync(struct spi_device *spi, struct spi_message *message)
3157{
3158 int ret;
3159
3160 mutex_lock(&spi->controller->bus_lock_mutex);
3161 ret = __spi_sync(spi, message);
3162 mutex_unlock(&spi->controller->bus_lock_mutex);
3163
3164 return ret;
3165}
3166EXPORT_SYMBOL_GPL(spi_sync);
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3185{
3186 return __spi_sync(spi, message);
3187}
3188EXPORT_SYMBOL_GPL(spi_sync_locked);
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205int spi_bus_lock(struct spi_controller *ctlr)
3206{
3207 unsigned long flags;
3208
3209 mutex_lock(&ctlr->bus_lock_mutex);
3210
3211 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3212 ctlr->bus_lock_flag = 1;
3213 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3214
3215
3216
3217 return 0;
3218}
3219EXPORT_SYMBOL_GPL(spi_bus_lock);
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234int spi_bus_unlock(struct spi_controller *ctlr)
3235{
3236 ctlr->bus_lock_flag = 0;
3237
3238 mutex_unlock(&ctlr->bus_lock_mutex);
3239
3240 return 0;
3241}
3242EXPORT_SYMBOL_GPL(spi_bus_unlock);
3243
3244
3245#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3246
3247static u8 *buf;
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270int spi_write_then_read(struct spi_device *spi,
3271 const void *txbuf, unsigned n_tx,
3272 void *rxbuf, unsigned n_rx)
3273{
3274 static DEFINE_MUTEX(lock);
3275
3276 int status;
3277 struct spi_message message;
3278 struct spi_transfer x[2];
3279 u8 *local_buf;
3280
3281
3282
3283
3284
3285
3286 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3287 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3288 GFP_KERNEL | GFP_DMA);
3289 if (!local_buf)
3290 return -ENOMEM;
3291 } else {
3292 local_buf = buf;
3293 }
3294
3295 spi_message_init(&message);
3296 memset(x, 0, sizeof(x));
3297 if (n_tx) {
3298 x[0].len = n_tx;
3299 spi_message_add_tail(&x[0], &message);
3300 }
3301 if (n_rx) {
3302 x[1].len = n_rx;
3303 spi_message_add_tail(&x[1], &message);
3304 }
3305
3306 memcpy(local_buf, txbuf, n_tx);
3307 x[0].tx_buf = local_buf;
3308 x[1].rx_buf = local_buf + n_tx;
3309
3310
3311 status = spi_sync(spi, &message);
3312 if (status == 0)
3313 memcpy(rxbuf, x[1].rx_buf, n_rx);
3314
3315 if (x[0].tx_buf == buf)
3316 mutex_unlock(&lock);
3317 else
3318 kfree(local_buf);
3319
3320 return status;
3321}
3322EXPORT_SYMBOL_GPL(spi_write_then_read);
3323
3324
3325
3326#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3327static int __spi_of_device_match(struct device *dev, void *data)
3328{
3329 return dev->of_node == data;
3330}
3331
3332
3333static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3334{
3335 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3336 __spi_of_device_match);
3337 return dev ? to_spi_device(dev) : NULL;
3338}
3339
3340static int __spi_of_controller_match(struct device *dev, const void *data)
3341{
3342 return dev->of_node == data;
3343}
3344
3345
3346static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3347{
3348 struct device *dev;
3349
3350 dev = class_find_device(&spi_master_class, NULL, node,
3351 __spi_of_controller_match);
3352 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3353 dev = class_find_device(&spi_slave_class, NULL, node,
3354 __spi_of_controller_match);
3355 if (!dev)
3356 return NULL;
3357
3358
3359 return container_of(dev, struct spi_controller, dev);
3360}
3361
3362static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3363 void *arg)
3364{
3365 struct of_reconfig_data *rd = arg;
3366 struct spi_controller *ctlr;
3367 struct spi_device *spi;
3368
3369 switch (of_reconfig_get_state_change(action, arg)) {
3370 case OF_RECONFIG_CHANGE_ADD:
3371 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3372 if (ctlr == NULL)
3373 return NOTIFY_OK;
3374
3375 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3376 put_device(&ctlr->dev);
3377 return NOTIFY_OK;
3378 }
3379
3380 spi = of_register_spi_device(ctlr, rd->dn);
3381 put_device(&ctlr->dev);
3382
3383 if (IS_ERR(spi)) {
3384 pr_err("%s: failed to create for '%pOF'\n",
3385 __func__, rd->dn);
3386 of_node_clear_flag(rd->dn, OF_POPULATED);
3387 return notifier_from_errno(PTR_ERR(spi));
3388 }
3389 break;
3390
3391 case OF_RECONFIG_CHANGE_REMOVE:
3392
3393 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3394 return NOTIFY_OK;
3395
3396
3397 spi = of_find_spi_device_by_node(rd->dn);
3398 if (spi == NULL)
3399 return NOTIFY_OK;
3400
3401
3402 spi_unregister_device(spi);
3403
3404
3405 put_device(&spi->dev);
3406 break;
3407 }
3408
3409 return NOTIFY_OK;
3410}
3411
3412static struct notifier_block spi_of_notifier = {
3413 .notifier_call = of_spi_notify,
3414};
3415#else
3416extern struct notifier_block spi_of_notifier;
3417#endif
3418
3419#if IS_ENABLED(CONFIG_ACPI)
3420static int spi_acpi_controller_match(struct device *dev, const void *data)
3421{
3422 return ACPI_COMPANION(dev->parent) == data;
3423}
3424
3425static int spi_acpi_device_match(struct device *dev, void *data)
3426{
3427 return ACPI_COMPANION(dev) == data;
3428}
3429
3430static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3431{
3432 struct device *dev;
3433
3434 dev = class_find_device(&spi_master_class, NULL, adev,
3435 spi_acpi_controller_match);
3436 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3437 dev = class_find_device(&spi_slave_class, NULL, adev,
3438 spi_acpi_controller_match);
3439 if (!dev)
3440 return NULL;
3441
3442 return container_of(dev, struct spi_controller, dev);
3443}
3444
3445static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3446{
3447 struct device *dev;
3448
3449 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3450
3451 return dev ? to_spi_device(dev) : NULL;
3452}
3453
3454static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3455 void *arg)
3456{
3457 struct acpi_device *adev = arg;
3458 struct spi_controller *ctlr;
3459 struct spi_device *spi;
3460
3461 switch (value) {
3462 case ACPI_RECONFIG_DEVICE_ADD:
3463 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3464 if (!ctlr)
3465 break;
3466
3467 acpi_register_spi_device(ctlr, adev);
3468 put_device(&ctlr->dev);
3469 break;
3470 case ACPI_RECONFIG_DEVICE_REMOVE:
3471 if (!acpi_device_enumerated(adev))
3472 break;
3473
3474 spi = acpi_spi_find_device_by_adev(adev);
3475 if (!spi)
3476 break;
3477
3478 spi_unregister_device(spi);
3479 put_device(&spi->dev);
3480 break;
3481 }
3482
3483 return NOTIFY_OK;
3484}
3485
3486static struct notifier_block spi_acpi_notifier = {
3487 .notifier_call = acpi_spi_notify,
3488};
3489#else
3490extern struct notifier_block spi_acpi_notifier;
3491#endif
3492
3493static int __init spi_init(void)
3494{
3495 int status;
3496
3497 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3498 if (!buf) {
3499 status = -ENOMEM;
3500 goto err0;
3501 }
3502
3503 status = bus_register(&spi_bus_type);
3504 if (status < 0)
3505 goto err1;
3506
3507 status = class_register(&spi_master_class);
3508 if (status < 0)
3509 goto err2;
3510
3511 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3512 status = class_register(&spi_slave_class);
3513 if (status < 0)
3514 goto err3;
3515 }
3516
3517 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3518 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3519 if (IS_ENABLED(CONFIG_ACPI))
3520 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3521
3522 return 0;
3523
3524err3:
3525 class_unregister(&spi_master_class);
3526err2:
3527 bus_unregister(&spi_bus_type);
3528err1:
3529 kfree(buf);
3530 buf = NULL;
3531err0:
3532 return status;
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542postcore_initcall(spi_init);
3543
3544