1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/cache.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/mutex.h>
25#include <linux/of_device.h>
26#include <linux/of_irq.h>
27#include <linux/clk/clk-conf.h>
28#include <linux/slab.h>
29#include <linux/mod_devicetable.h>
30#include <linux/spi/spi.h>
31#include <linux/of_gpio.h>
32#include <linux/pm_runtime.h>
33#include <linux/pm_domain.h>
34#include <linux/property.h>
35#include <linux/export.h>
36#include <linux/sched/rt.h>
37#include <uapi/linux/sched/types.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/ioport.h>
41#include <linux/acpi.h>
42#include <linux/highmem.h>
43#include <linux/idr.h>
44#include <linux/platform_data/x86/apple.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/spi.h>
48
49static DEFINE_IDR(spi_master_idr);
50
51static void spidev_release(struct device *dev)
52{
53 struct spi_device *spi = to_spi_device(dev);
54
55
56 if (spi->controller->cleanup)
57 spi->controller->cleanup(spi);
58
59 spi_controller_put(spi->controller);
60 kfree(spi);
61}
62
63static ssize_t
64modalias_show(struct device *dev, struct device_attribute *a, char *buf)
65{
66 const struct spi_device *spi = to_spi_device(dev);
67 int len;
68
69 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
70 if (len != -ENODEV)
71 return len;
72
73 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
74}
75static DEVICE_ATTR_RO(modalias);
76
77#define SPI_STATISTICS_ATTRS(field, file) \
78static ssize_t spi_controller_##field##_show(struct device *dev, \
79 struct device_attribute *attr, \
80 char *buf) \
81{ \
82 struct spi_controller *ctlr = container_of(dev, \
83 struct spi_controller, dev); \
84 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
85} \
86static struct device_attribute dev_attr_spi_controller_##field = { \
87 .attr = { .name = file, .mode = 0444 }, \
88 .show = spi_controller_##field##_show, \
89}; \
90static ssize_t spi_device_##field##_show(struct device *dev, \
91 struct device_attribute *attr, \
92 char *buf) \
93{ \
94 struct spi_device *spi = to_spi_device(dev); \
95 return spi_statistics_##field##_show(&spi->statistics, buf); \
96} \
97static struct device_attribute dev_attr_spi_device_##field = { \
98 .attr = { .name = file, .mode = 0444 }, \
99 .show = spi_device_##field##_show, \
100}
101
102#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
103static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
104 char *buf) \
105{ \
106 unsigned long flags; \
107 ssize_t len; \
108 spin_lock_irqsave(&stat->lock, flags); \
109 len = sprintf(buf, format_string, stat->field); \
110 spin_unlock_irqrestore(&stat->lock, flags); \
111 return len; \
112} \
113SPI_STATISTICS_ATTRS(name, file)
114
115#define SPI_STATISTICS_SHOW(field, format_string) \
116 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
117 field, format_string)
118
119SPI_STATISTICS_SHOW(messages, "%lu");
120SPI_STATISTICS_SHOW(transfers, "%lu");
121SPI_STATISTICS_SHOW(errors, "%lu");
122SPI_STATISTICS_SHOW(timedout, "%lu");
123
124SPI_STATISTICS_SHOW(spi_sync, "%lu");
125SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
126SPI_STATISTICS_SHOW(spi_async, "%lu");
127
128SPI_STATISTICS_SHOW(bytes, "%llu");
129SPI_STATISTICS_SHOW(bytes_rx, "%llu");
130SPI_STATISTICS_SHOW(bytes_tx, "%llu");
131
132#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
133 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
134 "transfer_bytes_histo_" number, \
135 transfer_bytes_histo[index], "%lu")
136SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
137SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
138SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
139SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
140SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
141SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
142SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
143SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
146SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
147SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
148SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
149SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
150SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
151SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
152SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
153
154SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
155
156static struct attribute *spi_dev_attrs[] = {
157 &dev_attr_modalias.attr,
158 NULL,
159};
160
161static const struct attribute_group spi_dev_group = {
162 .attrs = spi_dev_attrs,
163};
164
165static struct attribute *spi_device_statistics_attrs[] = {
166 &dev_attr_spi_device_messages.attr,
167 &dev_attr_spi_device_transfers.attr,
168 &dev_attr_spi_device_errors.attr,
169 &dev_attr_spi_device_timedout.attr,
170 &dev_attr_spi_device_spi_sync.attr,
171 &dev_attr_spi_device_spi_sync_immediate.attr,
172 &dev_attr_spi_device_spi_async.attr,
173 &dev_attr_spi_device_bytes.attr,
174 &dev_attr_spi_device_bytes_rx.attr,
175 &dev_attr_spi_device_bytes_tx.attr,
176 &dev_attr_spi_device_transfer_bytes_histo0.attr,
177 &dev_attr_spi_device_transfer_bytes_histo1.attr,
178 &dev_attr_spi_device_transfer_bytes_histo2.attr,
179 &dev_attr_spi_device_transfer_bytes_histo3.attr,
180 &dev_attr_spi_device_transfer_bytes_histo4.attr,
181 &dev_attr_spi_device_transfer_bytes_histo5.attr,
182 &dev_attr_spi_device_transfer_bytes_histo6.attr,
183 &dev_attr_spi_device_transfer_bytes_histo7.attr,
184 &dev_attr_spi_device_transfer_bytes_histo8.attr,
185 &dev_attr_spi_device_transfer_bytes_histo9.attr,
186 &dev_attr_spi_device_transfer_bytes_histo10.attr,
187 &dev_attr_spi_device_transfer_bytes_histo11.attr,
188 &dev_attr_spi_device_transfer_bytes_histo12.attr,
189 &dev_attr_spi_device_transfer_bytes_histo13.attr,
190 &dev_attr_spi_device_transfer_bytes_histo14.attr,
191 &dev_attr_spi_device_transfer_bytes_histo15.attr,
192 &dev_attr_spi_device_transfer_bytes_histo16.attr,
193 &dev_attr_spi_device_transfers_split_maxsize.attr,
194 NULL,
195};
196
197static const struct attribute_group spi_device_statistics_group = {
198 .name = "statistics",
199 .attrs = spi_device_statistics_attrs,
200};
201
202static const struct attribute_group *spi_dev_groups[] = {
203 &spi_dev_group,
204 &spi_device_statistics_group,
205 NULL,
206};
207
208static struct attribute *spi_controller_statistics_attrs[] = {
209 &dev_attr_spi_controller_messages.attr,
210 &dev_attr_spi_controller_transfers.attr,
211 &dev_attr_spi_controller_errors.attr,
212 &dev_attr_spi_controller_timedout.attr,
213 &dev_attr_spi_controller_spi_sync.attr,
214 &dev_attr_spi_controller_spi_sync_immediate.attr,
215 &dev_attr_spi_controller_spi_async.attr,
216 &dev_attr_spi_controller_bytes.attr,
217 &dev_attr_spi_controller_bytes_rx.attr,
218 &dev_attr_spi_controller_bytes_tx.attr,
219 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
220 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
221 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
222 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
223 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
224 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
225 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
226 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
227 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
228 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
229 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
230 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
231 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
232 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
233 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
234 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
235 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
236 &dev_attr_spi_controller_transfers_split_maxsize.attr,
237 NULL,
238};
239
240static const struct attribute_group spi_controller_statistics_group = {
241 .name = "statistics",
242 .attrs = spi_controller_statistics_attrs,
243};
244
245static const struct attribute_group *spi_master_groups[] = {
246 &spi_controller_statistics_group,
247 NULL,
248};
249
250void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
251 struct spi_transfer *xfer,
252 struct spi_controller *ctlr)
253{
254 unsigned long flags;
255 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
256
257 if (l2len < 0)
258 l2len = 0;
259
260 spin_lock_irqsave(&stats->lock, flags);
261
262 stats->transfers++;
263 stats->transfer_bytes_histo[l2len]++;
264
265 stats->bytes += xfer->len;
266 if ((xfer->tx_buf) &&
267 (xfer->tx_buf != ctlr->dummy_tx))
268 stats->bytes_tx += xfer->len;
269 if ((xfer->rx_buf) &&
270 (xfer->rx_buf != ctlr->dummy_rx))
271 stats->bytes_rx += xfer->len;
272
273 spin_unlock_irqrestore(&stats->lock, flags);
274}
275EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
276
277
278
279
280
281static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
282 const struct spi_device *sdev)
283{
284 while (id->name[0]) {
285 if (!strcmp(sdev->modalias, id->name))
286 return id;
287 id++;
288 }
289 return NULL;
290}
291
292const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
293{
294 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
295
296 return spi_match_id(sdrv->id_table, sdev);
297}
298EXPORT_SYMBOL_GPL(spi_get_device_id);
299
300static int spi_match_device(struct device *dev, struct device_driver *drv)
301{
302 const struct spi_device *spi = to_spi_device(dev);
303 const struct spi_driver *sdrv = to_spi_driver(drv);
304
305
306 if (of_driver_match_device(dev, drv))
307 return 1;
308
309
310 if (acpi_driver_match_device(dev, drv))
311 return 1;
312
313 if (sdrv->id_table)
314 return !!spi_match_id(sdrv->id_table, spi);
315
316 return strcmp(spi->modalias, drv->name) == 0;
317}
318
319static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
320{
321 const struct spi_device *spi = to_spi_device(dev);
322 int rc;
323
324 rc = acpi_device_uevent_modalias(dev, env);
325 if (rc != -ENODEV)
326 return rc;
327
328 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
329}
330
331struct bus_type spi_bus_type = {
332 .name = "spi",
333 .dev_groups = spi_dev_groups,
334 .match = spi_match_device,
335 .uevent = spi_uevent,
336};
337EXPORT_SYMBOL_GPL(spi_bus_type);
338
339
340static int spi_drv_probe(struct device *dev)
341{
342 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
343 struct spi_device *spi = to_spi_device(dev);
344 int ret;
345
346 ret = of_clk_set_defaults(dev->of_node, false);
347 if (ret)
348 return ret;
349
350 if (dev->of_node) {
351 spi->irq = of_irq_get(dev->of_node, 0);
352 if (spi->irq == -EPROBE_DEFER)
353 return -EPROBE_DEFER;
354 if (spi->irq < 0)
355 spi->irq = 0;
356 }
357
358 ret = dev_pm_domain_attach(dev, true);
359 if (ret != -EPROBE_DEFER) {
360 ret = sdrv->probe(spi);
361 if (ret)
362 dev_pm_domain_detach(dev, true);
363 }
364
365 return ret;
366}
367
368static int spi_drv_remove(struct device *dev)
369{
370 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
371 int ret;
372
373 ret = sdrv->remove(to_spi_device(dev));
374 dev_pm_domain_detach(dev, true);
375
376 return ret;
377}
378
379static void spi_drv_shutdown(struct device *dev)
380{
381 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
382
383 sdrv->shutdown(to_spi_device(dev));
384}
385
386
387
388
389
390
391
392
393
394int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
395{
396 sdrv->driver.owner = owner;
397 sdrv->driver.bus = &spi_bus_type;
398 if (sdrv->probe)
399 sdrv->driver.probe = spi_drv_probe;
400 if (sdrv->remove)
401 sdrv->driver.remove = spi_drv_remove;
402 if (sdrv->shutdown)
403 sdrv->driver.shutdown = spi_drv_shutdown;
404 return driver_register(&sdrv->driver);
405}
406EXPORT_SYMBOL_GPL(__spi_register_driver);
407
408
409
410
411
412
413
414
415
416struct boardinfo {
417 struct list_head list;
418 struct spi_board_info board_info;
419};
420
421static LIST_HEAD(board_list);
422static LIST_HEAD(spi_controller_list);
423
424
425
426
427
428
429static DEFINE_MUTEX(board_lock);
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
449{
450 struct spi_device *spi;
451
452 if (!spi_controller_get(ctlr))
453 return NULL;
454
455 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
456 if (!spi) {
457 spi_controller_put(ctlr);
458 return NULL;
459 }
460
461 spi->master = spi->controller = ctlr;
462 spi->dev.parent = &ctlr->dev;
463 spi->dev.bus = &spi_bus_type;
464 spi->dev.release = spidev_release;
465 spi->cs_gpio = -ENOENT;
466
467 spin_lock_init(&spi->statistics.lock);
468
469 device_initialize(&spi->dev);
470 return spi;
471}
472EXPORT_SYMBOL_GPL(spi_alloc_device);
473
474static void spi_dev_set_name(struct spi_device *spi)
475{
476 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
477
478 if (adev) {
479 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
480 return;
481 }
482
483 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
484 spi->chip_select);
485}
486
487static int spi_dev_check(struct device *dev, void *data)
488{
489 struct spi_device *spi = to_spi_device(dev);
490 struct spi_device *new_spi = data;
491
492 if (spi->controller == new_spi->controller &&
493 spi->chip_select == new_spi->chip_select)
494 return -EBUSY;
495 return 0;
496}
497
498
499
500
501
502
503
504
505
506
507int spi_add_device(struct spi_device *spi)
508{
509 static DEFINE_MUTEX(spi_add_lock);
510 struct spi_controller *ctlr = spi->controller;
511 struct device *dev = ctlr->dev.parent;
512 int status;
513
514
515 if (spi->chip_select >= ctlr->num_chipselect) {
516 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
517 ctlr->num_chipselect);
518 return -EINVAL;
519 }
520
521
522 spi_dev_set_name(spi);
523
524
525
526
527
528 mutex_lock(&spi_add_lock);
529
530 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
531 if (status) {
532 dev_err(dev, "chipselect %d already in use\n",
533 spi->chip_select);
534 goto done;
535 }
536
537 if (ctlr->cs_gpios)
538 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
539
540
541
542
543
544 status = spi_setup(spi);
545 if (status < 0) {
546 dev_err(dev, "can't setup %s, status %d\n",
547 dev_name(&spi->dev), status);
548 goto done;
549 }
550
551
552 status = device_add(&spi->dev);
553 if (status < 0)
554 dev_err(dev, "can't add %s, status %d\n",
555 dev_name(&spi->dev), status);
556 else
557 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
558
559done:
560 mutex_unlock(&spi_add_lock);
561 return status;
562}
563EXPORT_SYMBOL_GPL(spi_add_device);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579struct spi_device *spi_new_device(struct spi_controller *ctlr,
580 struct spi_board_info *chip)
581{
582 struct spi_device *proxy;
583 int status;
584
585
586
587
588
589
590
591
592 proxy = spi_alloc_device(ctlr);
593 if (!proxy)
594 return NULL;
595
596 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
597
598 proxy->chip_select = chip->chip_select;
599 proxy->max_speed_hz = chip->max_speed_hz;
600 proxy->mode = chip->mode;
601 proxy->irq = chip->irq;
602 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
603 proxy->dev.platform_data = (void *) chip->platform_data;
604 proxy->controller_data = chip->controller_data;
605 proxy->controller_state = NULL;
606
607 if (chip->properties) {
608 status = device_add_properties(&proxy->dev, chip->properties);
609 if (status) {
610 dev_err(&ctlr->dev,
611 "failed to add properties to '%s': %d\n",
612 chip->modalias, status);
613 goto err_dev_put;
614 }
615 }
616
617 status = spi_add_device(proxy);
618 if (status < 0)
619 goto err_remove_props;
620
621 return proxy;
622
623err_remove_props:
624 if (chip->properties)
625 device_remove_properties(&proxy->dev);
626err_dev_put:
627 spi_dev_put(proxy);
628 return NULL;
629}
630EXPORT_SYMBOL_GPL(spi_new_device);
631
632
633
634
635
636
637
638
639void spi_unregister_device(struct spi_device *spi)
640{
641 if (!spi)
642 return;
643
644 if (spi->dev.of_node) {
645 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
646 of_node_put(spi->dev.of_node);
647 }
648 if (ACPI_COMPANION(&spi->dev))
649 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
650 device_unregister(&spi->dev);
651}
652EXPORT_SYMBOL_GPL(spi_unregister_device);
653
654static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
655 struct spi_board_info *bi)
656{
657 struct spi_device *dev;
658
659 if (ctlr->bus_num != bi->bus_num)
660 return;
661
662 dev = spi_new_device(ctlr, bi);
663 if (!dev)
664 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
665 bi->modalias);
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690int spi_register_board_info(struct spi_board_info const *info, unsigned n)
691{
692 struct boardinfo *bi;
693 int i;
694
695 if (!n)
696 return 0;
697
698 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
699 if (!bi)
700 return -ENOMEM;
701
702 for (i = 0; i < n; i++, bi++, info++) {
703 struct spi_controller *ctlr;
704
705 memcpy(&bi->board_info, info, sizeof(*info));
706 if (info->properties) {
707 bi->board_info.properties =
708 property_entries_dup(info->properties);
709 if (IS_ERR(bi->board_info.properties))
710 return PTR_ERR(bi->board_info.properties);
711 }
712
713 mutex_lock(&board_lock);
714 list_add_tail(&bi->list, &board_list);
715 list_for_each_entry(ctlr, &spi_controller_list, list)
716 spi_match_controller_to_boardinfo(ctlr,
717 &bi->board_info);
718 mutex_unlock(&board_lock);
719 }
720
721 return 0;
722}
723
724
725
726static void spi_set_cs(struct spi_device *spi, bool enable)
727{
728 if (spi->mode & SPI_CS_HIGH)
729 enable = !enable;
730
731 if (gpio_is_valid(spi->cs_gpio)) {
732 gpio_set_value(spi->cs_gpio, !enable);
733
734 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
735 spi->controller->set_cs)
736 spi->controller->set_cs(spi, !enable);
737 } else if (spi->controller->set_cs) {
738 spi->controller->set_cs(spi, !enable);
739 }
740}
741
742#ifdef CONFIG_HAS_DMA
743static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
744 struct sg_table *sgt, void *buf, size_t len,
745 enum dma_data_direction dir)
746{
747 const bool vmalloced_buf = is_vmalloc_addr(buf);
748 unsigned int max_seg_size = dma_get_max_seg_size(dev);
749#ifdef CONFIG_HIGHMEM
750 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
751 (unsigned long)buf < (PKMAP_BASE +
752 (LAST_PKMAP * PAGE_SIZE)));
753#else
754 const bool kmap_buf = false;
755#endif
756 int desc_len;
757 int sgs;
758 struct page *vm_page;
759 struct scatterlist *sg;
760 void *sg_buf;
761 size_t min;
762 int i, ret;
763
764 if (vmalloced_buf || kmap_buf) {
765 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
766 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
767 } else if (virt_addr_valid(buf)) {
768 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
769 sgs = DIV_ROUND_UP(len, desc_len);
770 } else {
771 return -EINVAL;
772 }
773
774 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
775 if (ret != 0)
776 return ret;
777
778 sg = &sgt->sgl[0];
779 for (i = 0; i < sgs; i++) {
780
781 if (vmalloced_buf || kmap_buf) {
782 min = min_t(size_t,
783 len, desc_len - offset_in_page(buf));
784 if (vmalloced_buf)
785 vm_page = vmalloc_to_page(buf);
786 else
787 vm_page = kmap_to_page(buf);
788 if (!vm_page) {
789 sg_free_table(sgt);
790 return -ENOMEM;
791 }
792 sg_set_page(sg, vm_page,
793 min, offset_in_page(buf));
794 } else {
795 min = min_t(size_t, len, desc_len);
796 sg_buf = buf;
797 sg_set_buf(sg, sg_buf, min);
798 }
799
800 buf += min;
801 len -= min;
802 sg = sg_next(sg);
803 }
804
805 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
806 if (!ret)
807 ret = -ENOMEM;
808 if (ret < 0) {
809 sg_free_table(sgt);
810 return ret;
811 }
812
813 sgt->nents = ret;
814
815 return 0;
816}
817
818static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
819 struct sg_table *sgt, enum dma_data_direction dir)
820{
821 if (sgt->orig_nents) {
822 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
823 sg_free_table(sgt);
824 }
825}
826
827static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
828{
829 struct device *tx_dev, *rx_dev;
830 struct spi_transfer *xfer;
831 int ret;
832
833 if (!ctlr->can_dma)
834 return 0;
835
836 if (ctlr->dma_tx)
837 tx_dev = ctlr->dma_tx->device->dev;
838 else
839 tx_dev = ctlr->dev.parent;
840
841 if (ctlr->dma_rx)
842 rx_dev = ctlr->dma_rx->device->dev;
843 else
844 rx_dev = ctlr->dev.parent;
845
846 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
847 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
848 continue;
849
850 if (xfer->tx_buf != NULL) {
851 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
852 (void *)xfer->tx_buf, xfer->len,
853 DMA_TO_DEVICE);
854 if (ret != 0)
855 return ret;
856 }
857
858 if (xfer->rx_buf != NULL) {
859 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
860 xfer->rx_buf, xfer->len,
861 DMA_FROM_DEVICE);
862 if (ret != 0) {
863 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
864 DMA_TO_DEVICE);
865 return ret;
866 }
867 }
868 }
869
870 ctlr->cur_msg_mapped = true;
871
872 return 0;
873}
874
875static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
876{
877 struct spi_transfer *xfer;
878 struct device *tx_dev, *rx_dev;
879
880 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
881 return 0;
882
883 if (ctlr->dma_tx)
884 tx_dev = ctlr->dma_tx->device->dev;
885 else
886 tx_dev = ctlr->dev.parent;
887
888 if (ctlr->dma_rx)
889 rx_dev = ctlr->dma_rx->device->dev;
890 else
891 rx_dev = ctlr->dev.parent;
892
893 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
894 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
895 continue;
896
897 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
898 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
899 }
900
901 return 0;
902}
903#else
904static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
905 struct sg_table *sgt, void *buf, size_t len,
906 enum dma_data_direction dir)
907{
908 return -EINVAL;
909}
910
911static inline void spi_unmap_buf(struct spi_controller *ctlr,
912 struct device *dev, struct sg_table *sgt,
913 enum dma_data_direction dir)
914{
915}
916
917static inline int __spi_map_msg(struct spi_controller *ctlr,
918 struct spi_message *msg)
919{
920 return 0;
921}
922
923static inline int __spi_unmap_msg(struct spi_controller *ctlr,
924 struct spi_message *msg)
925{
926 return 0;
927}
928#endif
929
930static inline int spi_unmap_msg(struct spi_controller *ctlr,
931 struct spi_message *msg)
932{
933 struct spi_transfer *xfer;
934
935 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
936
937
938
939
940 if (xfer->tx_buf == ctlr->dummy_tx)
941 xfer->tx_buf = NULL;
942 if (xfer->rx_buf == ctlr->dummy_rx)
943 xfer->rx_buf = NULL;
944 }
945
946 return __spi_unmap_msg(ctlr, msg);
947}
948
949static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
950{
951 struct spi_transfer *xfer;
952 void *tmp;
953 unsigned int max_tx, max_rx;
954
955 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
956 max_tx = 0;
957 max_rx = 0;
958
959 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
960 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
961 !xfer->tx_buf)
962 max_tx = max(xfer->len, max_tx);
963 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
964 !xfer->rx_buf)
965 max_rx = max(xfer->len, max_rx);
966 }
967
968 if (max_tx) {
969 tmp = krealloc(ctlr->dummy_tx, max_tx,
970 GFP_KERNEL | GFP_DMA);
971 if (!tmp)
972 return -ENOMEM;
973 ctlr->dummy_tx = tmp;
974 memset(tmp, 0, max_tx);
975 }
976
977 if (max_rx) {
978 tmp = krealloc(ctlr->dummy_rx, max_rx,
979 GFP_KERNEL | GFP_DMA);
980 if (!tmp)
981 return -ENOMEM;
982 ctlr->dummy_rx = tmp;
983 }
984
985 if (max_tx || max_rx) {
986 list_for_each_entry(xfer, &msg->transfers,
987 transfer_list) {
988 if (!xfer->tx_buf)
989 xfer->tx_buf = ctlr->dummy_tx;
990 if (!xfer->rx_buf)
991 xfer->rx_buf = ctlr->dummy_rx;
992 }
993 }
994 }
995
996 return __spi_map_msg(ctlr, msg);
997}
998
999
1000
1001
1002
1003
1004
1005
1006static int spi_transfer_one_message(struct spi_controller *ctlr,
1007 struct spi_message *msg)
1008{
1009 struct spi_transfer *xfer;
1010 bool keep_cs = false;
1011 int ret = 0;
1012 unsigned long long ms = 1;
1013 struct spi_statistics *statm = &ctlr->statistics;
1014 struct spi_statistics *stats = &msg->spi->statistics;
1015
1016 spi_set_cs(msg->spi, true);
1017
1018 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1019 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1020
1021 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1022 trace_spi_transfer_start(msg, xfer);
1023
1024 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1025 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1026
1027 if (xfer->tx_buf || xfer->rx_buf) {
1028 reinit_completion(&ctlr->xfer_completion);
1029
1030 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1031 if (ret < 0) {
1032 SPI_STATISTICS_INCREMENT_FIELD(statm,
1033 errors);
1034 SPI_STATISTICS_INCREMENT_FIELD(stats,
1035 errors);
1036 dev_err(&msg->spi->dev,
1037 "SPI transfer failed: %d\n", ret);
1038 goto out;
1039 }
1040
1041 if (ret > 0) {
1042 ret = 0;
1043 ms = 8LL * 1000LL * xfer->len;
1044 do_div(ms, xfer->speed_hz);
1045 ms += ms + 200;
1046
1047 if (ms > UINT_MAX)
1048 ms = UINT_MAX;
1049
1050 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1051 msecs_to_jiffies(ms));
1052 }
1053
1054 if (ms == 0) {
1055 SPI_STATISTICS_INCREMENT_FIELD(statm,
1056 timedout);
1057 SPI_STATISTICS_INCREMENT_FIELD(stats,
1058 timedout);
1059 dev_err(&msg->spi->dev,
1060 "SPI transfer timed out\n");
1061 msg->status = -ETIMEDOUT;
1062 }
1063 } else {
1064 if (xfer->len)
1065 dev_err(&msg->spi->dev,
1066 "Bufferless transfer has length %u\n",
1067 xfer->len);
1068 }
1069
1070 trace_spi_transfer_stop(msg, xfer);
1071
1072 if (msg->status != -EINPROGRESS)
1073 goto out;
1074
1075 if (xfer->delay_usecs) {
1076 u16 us = xfer->delay_usecs;
1077
1078 if (us <= 10)
1079 udelay(us);
1080 else
1081 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1082 }
1083
1084 if (xfer->cs_change) {
1085 if (list_is_last(&xfer->transfer_list,
1086 &msg->transfers)) {
1087 keep_cs = true;
1088 } else {
1089 spi_set_cs(msg->spi, false);
1090 udelay(10);
1091 spi_set_cs(msg->spi, true);
1092 }
1093 }
1094
1095 msg->actual_length += xfer->len;
1096 }
1097
1098out:
1099 if (ret != 0 || !keep_cs)
1100 spi_set_cs(msg->spi, false);
1101
1102 if (msg->status == -EINPROGRESS)
1103 msg->status = ret;
1104
1105 if (msg->status && ctlr->handle_err)
1106 ctlr->handle_err(ctlr, msg);
1107
1108 spi_res_release(ctlr, msg);
1109
1110 spi_finalize_current_message(ctlr);
1111
1112 return ret;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123void spi_finalize_current_transfer(struct spi_controller *ctlr)
1124{
1125 complete(&ctlr->xfer_completion);
1126}
1127EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1143{
1144 unsigned long flags;
1145 bool was_busy = false;
1146 int ret;
1147
1148
1149 spin_lock_irqsave(&ctlr->queue_lock, flags);
1150
1151
1152 if (ctlr->cur_msg) {
1153 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1154 return;
1155 }
1156
1157
1158 if (ctlr->idling) {
1159 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1160 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1161 return;
1162 }
1163
1164
1165 if (list_empty(&ctlr->queue) || !ctlr->running) {
1166 if (!ctlr->busy) {
1167 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1168 return;
1169 }
1170
1171
1172 if (!in_kthread) {
1173 kthread_queue_work(&ctlr->kworker,
1174 &ctlr->pump_messages);
1175 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1176 return;
1177 }
1178
1179 ctlr->busy = false;
1180 ctlr->idling = true;
1181 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1182
1183 kfree(ctlr->dummy_rx);
1184 ctlr->dummy_rx = NULL;
1185 kfree(ctlr->dummy_tx);
1186 ctlr->dummy_tx = NULL;
1187 if (ctlr->unprepare_transfer_hardware &&
1188 ctlr->unprepare_transfer_hardware(ctlr))
1189 dev_err(&ctlr->dev,
1190 "failed to unprepare transfer hardware\n");
1191 if (ctlr->auto_runtime_pm) {
1192 pm_runtime_mark_last_busy(ctlr->dev.parent);
1193 pm_runtime_put_autosuspend(ctlr->dev.parent);
1194 }
1195 trace_spi_controller_idle(ctlr);
1196
1197 spin_lock_irqsave(&ctlr->queue_lock, flags);
1198 ctlr->idling = false;
1199 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1200 return;
1201 }
1202
1203
1204 ctlr->cur_msg =
1205 list_first_entry(&ctlr->queue, struct spi_message, queue);
1206
1207 list_del_init(&ctlr->cur_msg->queue);
1208 if (ctlr->busy)
1209 was_busy = true;
1210 else
1211 ctlr->busy = true;
1212 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1213
1214 mutex_lock(&ctlr->io_mutex);
1215
1216 if (!was_busy && ctlr->auto_runtime_pm) {
1217 ret = pm_runtime_get_sync(ctlr->dev.parent);
1218 if (ret < 0) {
1219 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1220 ret);
1221 mutex_unlock(&ctlr->io_mutex);
1222 return;
1223 }
1224 }
1225
1226 if (!was_busy)
1227 trace_spi_controller_busy(ctlr);
1228
1229 if (!was_busy && ctlr->prepare_transfer_hardware) {
1230 ret = ctlr->prepare_transfer_hardware(ctlr);
1231 if (ret) {
1232 dev_err(&ctlr->dev,
1233 "failed to prepare transfer hardware\n");
1234
1235 if (ctlr->auto_runtime_pm)
1236 pm_runtime_put(ctlr->dev.parent);
1237 mutex_unlock(&ctlr->io_mutex);
1238 return;
1239 }
1240 }
1241
1242 trace_spi_message_start(ctlr->cur_msg);
1243
1244 if (ctlr->prepare_message) {
1245 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1246 if (ret) {
1247 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1248 ret);
1249 ctlr->cur_msg->status = ret;
1250 spi_finalize_current_message(ctlr);
1251 goto out;
1252 }
1253 ctlr->cur_msg_prepared = true;
1254 }
1255
1256 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1257 if (ret) {
1258 ctlr->cur_msg->status = ret;
1259 spi_finalize_current_message(ctlr);
1260 goto out;
1261 }
1262
1263 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1264 if (ret) {
1265 dev_err(&ctlr->dev,
1266 "failed to transfer one message from queue\n");
1267 goto out;
1268 }
1269
1270out:
1271 mutex_unlock(&ctlr->io_mutex);
1272
1273
1274 if (!ret)
1275 cond_resched();
1276}
1277
1278
1279
1280
1281
1282static void spi_pump_messages(struct kthread_work *work)
1283{
1284 struct spi_controller *ctlr =
1285 container_of(work, struct spi_controller, pump_messages);
1286
1287 __spi_pump_messages(ctlr, true);
1288}
1289
1290static int spi_init_queue(struct spi_controller *ctlr)
1291{
1292 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1293
1294 ctlr->running = false;
1295 ctlr->busy = false;
1296
1297 kthread_init_worker(&ctlr->kworker);
1298 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1299 "%s", dev_name(&ctlr->dev));
1300 if (IS_ERR(ctlr->kworker_task)) {
1301 dev_err(&ctlr->dev, "failed to create message pump task\n");
1302 return PTR_ERR(ctlr->kworker_task);
1303 }
1304 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1305
1306
1307
1308
1309
1310
1311
1312
1313 if (ctlr->rt) {
1314 dev_info(&ctlr->dev,
1315 "will run message pump with realtime priority\n");
1316 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1317 }
1318
1319 return 0;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1333{
1334 struct spi_message *next;
1335 unsigned long flags;
1336
1337
1338 spin_lock_irqsave(&ctlr->queue_lock, flags);
1339 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1340 queue);
1341 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1342
1343 return next;
1344}
1345EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1346
1347
1348
1349
1350
1351
1352
1353
1354void spi_finalize_current_message(struct spi_controller *ctlr)
1355{
1356 struct spi_message *mesg;
1357 unsigned long flags;
1358 int ret;
1359
1360 spin_lock_irqsave(&ctlr->queue_lock, flags);
1361 mesg = ctlr->cur_msg;
1362 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1363
1364 spi_unmap_msg(ctlr, mesg);
1365
1366 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1367 ret = ctlr->unprepare_message(ctlr, mesg);
1368 if (ret) {
1369 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1370 ret);
1371 }
1372 }
1373
1374 spin_lock_irqsave(&ctlr->queue_lock, flags);
1375 ctlr->cur_msg = NULL;
1376 ctlr->cur_msg_prepared = false;
1377 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1378 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1379
1380 trace_spi_message_done(mesg);
1381
1382 mesg->state = NULL;
1383 if (mesg->complete)
1384 mesg->complete(mesg->context);
1385}
1386EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1387
1388static int spi_start_queue(struct spi_controller *ctlr)
1389{
1390 unsigned long flags;
1391
1392 spin_lock_irqsave(&ctlr->queue_lock, flags);
1393
1394 if (ctlr->running || ctlr->busy) {
1395 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1396 return -EBUSY;
1397 }
1398
1399 ctlr->running = true;
1400 ctlr->cur_msg = NULL;
1401 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1402
1403 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1404
1405 return 0;
1406}
1407
1408static int spi_stop_queue(struct spi_controller *ctlr)
1409{
1410 unsigned long flags;
1411 unsigned limit = 500;
1412 int ret = 0;
1413
1414 spin_lock_irqsave(&ctlr->queue_lock, flags);
1415
1416
1417
1418
1419
1420
1421
1422 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1423 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1424 usleep_range(10000, 11000);
1425 spin_lock_irqsave(&ctlr->queue_lock, flags);
1426 }
1427
1428 if (!list_empty(&ctlr->queue) || ctlr->busy)
1429 ret = -EBUSY;
1430 else
1431 ctlr->running = false;
1432
1433 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1434
1435 if (ret) {
1436 dev_warn(&ctlr->dev, "could not stop message queue\n");
1437 return ret;
1438 }
1439 return ret;
1440}
1441
1442static int spi_destroy_queue(struct spi_controller *ctlr)
1443{
1444 int ret;
1445
1446 ret = spi_stop_queue(ctlr);
1447
1448
1449
1450
1451
1452
1453
1454 if (ret) {
1455 dev_err(&ctlr->dev, "problem destroying queue\n");
1456 return ret;
1457 }
1458
1459 kthread_flush_worker(&ctlr->kworker);
1460 kthread_stop(ctlr->kworker_task);
1461
1462 return 0;
1463}
1464
1465static int __spi_queued_transfer(struct spi_device *spi,
1466 struct spi_message *msg,
1467 bool need_pump)
1468{
1469 struct spi_controller *ctlr = spi->controller;
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(&ctlr->queue_lock, flags);
1473
1474 if (!ctlr->running) {
1475 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1476 return -ESHUTDOWN;
1477 }
1478 msg->actual_length = 0;
1479 msg->status = -EINPROGRESS;
1480
1481 list_add_tail(&msg->queue, &ctlr->queue);
1482 if (!ctlr->busy && need_pump)
1483 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1484
1485 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1486 return 0;
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1497{
1498 return __spi_queued_transfer(spi, msg, true);
1499}
1500
1501static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1502{
1503 int ret;
1504
1505 ctlr->transfer = spi_queued_transfer;
1506 if (!ctlr->transfer_one_message)
1507 ctlr->transfer_one_message = spi_transfer_one_message;
1508
1509
1510 ret = spi_init_queue(ctlr);
1511 if (ret) {
1512 dev_err(&ctlr->dev, "problem initializing queue\n");
1513 goto err_init_queue;
1514 }
1515 ctlr->queued = true;
1516 ret = spi_start_queue(ctlr);
1517 if (ret) {
1518 dev_err(&ctlr->dev, "problem starting queue\n");
1519 goto err_start_queue;
1520 }
1521
1522 return 0;
1523
1524err_start_queue:
1525 spi_destroy_queue(ctlr);
1526err_init_queue:
1527 return ret;
1528}
1529
1530
1531
1532#if defined(CONFIG_OF)
1533static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1534 struct device_node *nc)
1535{
1536 u32 value;
1537 int rc;
1538
1539
1540 if (of_property_read_bool(nc, "spi-cpha"))
1541 spi->mode |= SPI_CPHA;
1542 if (of_property_read_bool(nc, "spi-cpol"))
1543 spi->mode |= SPI_CPOL;
1544 if (of_property_read_bool(nc, "spi-cs-high"))
1545 spi->mode |= SPI_CS_HIGH;
1546 if (of_property_read_bool(nc, "spi-3wire"))
1547 spi->mode |= SPI_3WIRE;
1548 if (of_property_read_bool(nc, "spi-lsb-first"))
1549 spi->mode |= SPI_LSB_FIRST;
1550
1551
1552 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1553 switch (value) {
1554 case 1:
1555 break;
1556 case 2:
1557 spi->mode |= SPI_TX_DUAL;
1558 break;
1559 case 4:
1560 spi->mode |= SPI_TX_QUAD;
1561 break;
1562 default:
1563 dev_warn(&ctlr->dev,
1564 "spi-tx-bus-width %d not supported\n",
1565 value);
1566 break;
1567 }
1568 }
1569
1570 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1571 switch (value) {
1572 case 1:
1573 break;
1574 case 2:
1575 spi->mode |= SPI_RX_DUAL;
1576 break;
1577 case 4:
1578 spi->mode |= SPI_RX_QUAD;
1579 break;
1580 default:
1581 dev_warn(&ctlr->dev,
1582 "spi-rx-bus-width %d not supported\n",
1583 value);
1584 break;
1585 }
1586 }
1587
1588 if (spi_controller_is_slave(ctlr)) {
1589 if (strcmp(nc->name, "slave")) {
1590 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1591 nc);
1592 return -EINVAL;
1593 }
1594 return 0;
1595 }
1596
1597
1598 rc = of_property_read_u32(nc, "reg", &value);
1599 if (rc) {
1600 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1601 nc, rc);
1602 return rc;
1603 }
1604 spi->chip_select = value;
1605
1606
1607 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1608 if (rc) {
1609 dev_err(&ctlr->dev,
1610 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1611 return rc;
1612 }
1613 spi->max_speed_hz = value;
1614
1615 return 0;
1616}
1617
1618static struct spi_device *
1619of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1620{
1621 struct spi_device *spi;
1622 int rc;
1623
1624
1625 spi = spi_alloc_device(ctlr);
1626 if (!spi) {
1627 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1628 rc = -ENOMEM;
1629 goto err_out;
1630 }
1631
1632
1633 rc = of_modalias_node(nc, spi->modalias,
1634 sizeof(spi->modalias));
1635 if (rc < 0) {
1636 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1637 goto err_out;
1638 }
1639
1640 rc = of_spi_parse_dt(ctlr, spi, nc);
1641 if (rc)
1642 goto err_out;
1643
1644
1645 of_node_get(nc);
1646 spi->dev.of_node = nc;
1647
1648
1649 rc = spi_add_device(spi);
1650 if (rc) {
1651 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1652 goto err_of_node_put;
1653 }
1654
1655 return spi;
1656
1657err_of_node_put:
1658 of_node_put(nc);
1659err_out:
1660 spi_dev_put(spi);
1661 return ERR_PTR(rc);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671static void of_register_spi_devices(struct spi_controller *ctlr)
1672{
1673 struct spi_device *spi;
1674 struct device_node *nc;
1675
1676 if (!ctlr->dev.of_node)
1677 return;
1678
1679 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1680 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1681 continue;
1682 spi = of_register_spi_device(ctlr, nc);
1683 if (IS_ERR(spi)) {
1684 dev_warn(&ctlr->dev,
1685 "Failed to create SPI device for %pOF\n", nc);
1686 of_node_clear_flag(nc, OF_POPULATED);
1687 }
1688 }
1689}
1690#else
1691static void of_register_spi_devices(struct spi_controller *ctlr) { }
1692#endif
1693
1694#ifdef CONFIG_ACPI
1695static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1696{
1697 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1698 const union acpi_object *obj;
1699
1700 if (!x86_apple_machine)
1701 return;
1702
1703 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1704 && obj->buffer.length >= 4)
1705 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1706
1707 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1708 && obj->buffer.length == 8)
1709 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1710
1711 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1712 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1713 spi->mode |= SPI_LSB_FIRST;
1714
1715 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1716 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1717 spi->mode |= SPI_CPOL;
1718
1719 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1720 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1721 spi->mode |= SPI_CPHA;
1722}
1723
1724static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1725{
1726 struct spi_device *spi = data;
1727 struct spi_controller *ctlr = spi->controller;
1728
1729 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1730 struct acpi_resource_spi_serialbus *sb;
1731
1732 sb = &ares->data.spi_serial_bus;
1733 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1734
1735
1736
1737
1738
1739
1740
1741 if (ctlr->fw_translate_cs) {
1742 int cs = ctlr->fw_translate_cs(ctlr,
1743 sb->device_selection);
1744 if (cs < 0)
1745 return cs;
1746 spi->chip_select = cs;
1747 } else {
1748 spi->chip_select = sb->device_selection;
1749 }
1750
1751 spi->max_speed_hz = sb->connection_speed;
1752
1753 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1754 spi->mode |= SPI_CPHA;
1755 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1756 spi->mode |= SPI_CPOL;
1757 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1758 spi->mode |= SPI_CS_HIGH;
1759 }
1760 } else if (spi->irq < 0) {
1761 struct resource r;
1762
1763 if (acpi_dev_resource_interrupt(ares, 0, &r))
1764 spi->irq = r.start;
1765 }
1766
1767
1768 return 1;
1769}
1770
1771static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1772 struct acpi_device *adev)
1773{
1774 struct list_head resource_list;
1775 struct spi_device *spi;
1776 int ret;
1777
1778 if (acpi_bus_get_status(adev) || !adev->status.present ||
1779 acpi_device_enumerated(adev))
1780 return AE_OK;
1781
1782 spi = spi_alloc_device(ctlr);
1783 if (!spi) {
1784 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1785 dev_name(&adev->dev));
1786 return AE_NO_MEMORY;
1787 }
1788
1789 ACPI_COMPANION_SET(&spi->dev, adev);
1790 spi->irq = -1;
1791
1792 INIT_LIST_HEAD(&resource_list);
1793 ret = acpi_dev_get_resources(adev, &resource_list,
1794 acpi_spi_add_resource, spi);
1795 acpi_dev_free_resource_list(&resource_list);
1796
1797 acpi_spi_parse_apple_properties(spi);
1798
1799 if (ret < 0 || !spi->max_speed_hz) {
1800 spi_dev_put(spi);
1801 return AE_OK;
1802 }
1803
1804 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1805 sizeof(spi->modalias));
1806
1807 if (spi->irq < 0)
1808 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1809
1810 acpi_device_set_enumerated(adev);
1811
1812 adev->power.flags.ignore_parent = true;
1813 if (spi_add_device(spi)) {
1814 adev->power.flags.ignore_parent = false;
1815 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1816 dev_name(&adev->dev));
1817 spi_dev_put(spi);
1818 }
1819
1820 return AE_OK;
1821}
1822
1823static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1824 void *data, void **return_value)
1825{
1826 struct spi_controller *ctlr = data;
1827 struct acpi_device *adev;
1828
1829 if (acpi_bus_get_device(handle, &adev))
1830 return AE_OK;
1831
1832 return acpi_register_spi_device(ctlr, adev);
1833}
1834
1835static void acpi_register_spi_devices(struct spi_controller *ctlr)
1836{
1837 acpi_status status;
1838 acpi_handle handle;
1839
1840 handle = ACPI_HANDLE(ctlr->dev.parent);
1841 if (!handle)
1842 return;
1843
1844 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1845 acpi_spi_add_device, NULL, ctlr, NULL);
1846 if (ACPI_FAILURE(status))
1847 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1848}
1849#else
1850static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1851#endif
1852
1853static void spi_controller_release(struct device *dev)
1854{
1855 struct spi_controller *ctlr;
1856
1857 ctlr = container_of(dev, struct spi_controller, dev);
1858 kfree(ctlr);
1859}
1860
1861static struct class spi_master_class = {
1862 .name = "spi_master",
1863 .owner = THIS_MODULE,
1864 .dev_release = spi_controller_release,
1865 .dev_groups = spi_master_groups,
1866};
1867
1868#ifdef CONFIG_SPI_SLAVE
1869
1870
1871
1872
1873
1874int spi_slave_abort(struct spi_device *spi)
1875{
1876 struct spi_controller *ctlr = spi->controller;
1877
1878 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1879 return ctlr->slave_abort(ctlr);
1880
1881 return -ENOTSUPP;
1882}
1883EXPORT_SYMBOL_GPL(spi_slave_abort);
1884
1885static int match_true(struct device *dev, void *data)
1886{
1887 return 1;
1888}
1889
1890static ssize_t spi_slave_show(struct device *dev,
1891 struct device_attribute *attr, char *buf)
1892{
1893 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1894 dev);
1895 struct device *child;
1896
1897 child = device_find_child(&ctlr->dev, NULL, match_true);
1898 return sprintf(buf, "%s\n",
1899 child ? to_spi_device(child)->modalias : NULL);
1900}
1901
1902static ssize_t spi_slave_store(struct device *dev,
1903 struct device_attribute *attr, const char *buf,
1904 size_t count)
1905{
1906 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1907 dev);
1908 struct spi_device *spi;
1909 struct device *child;
1910 char name[32];
1911 int rc;
1912
1913 rc = sscanf(buf, "%31s", name);
1914 if (rc != 1 || !name[0])
1915 return -EINVAL;
1916
1917 child = device_find_child(&ctlr->dev, NULL, match_true);
1918 if (child) {
1919
1920 device_unregister(child);
1921 put_device(child);
1922 }
1923
1924 if (strcmp(name, "(null)")) {
1925
1926 spi = spi_alloc_device(ctlr);
1927 if (!spi)
1928 return -ENOMEM;
1929
1930 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1931
1932 rc = spi_add_device(spi);
1933 if (rc) {
1934 spi_dev_put(spi);
1935 return rc;
1936 }
1937 }
1938
1939 return count;
1940}
1941
1942static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1943
1944static struct attribute *spi_slave_attrs[] = {
1945 &dev_attr_slave.attr,
1946 NULL,
1947};
1948
1949static const struct attribute_group spi_slave_group = {
1950 .attrs = spi_slave_attrs,
1951};
1952
1953static const struct attribute_group *spi_slave_groups[] = {
1954 &spi_controller_statistics_group,
1955 &spi_slave_group,
1956 NULL,
1957};
1958
1959static struct class spi_slave_class = {
1960 .name = "spi_slave",
1961 .owner = THIS_MODULE,
1962 .dev_release = spi_controller_release,
1963 .dev_groups = spi_slave_groups,
1964};
1965#else
1966extern struct class spi_slave_class;
1967#endif
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992struct spi_controller *__spi_alloc_controller(struct device *dev,
1993 unsigned int size, bool slave)
1994{
1995 struct spi_controller *ctlr;
1996
1997 if (!dev)
1998 return NULL;
1999
2000 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2001 if (!ctlr)
2002 return NULL;
2003
2004 device_initialize(&ctlr->dev);
2005 ctlr->bus_num = -1;
2006 ctlr->num_chipselect = 1;
2007 ctlr->slave = slave;
2008 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2009 ctlr->dev.class = &spi_slave_class;
2010 else
2011 ctlr->dev.class = &spi_master_class;
2012 ctlr->dev.parent = dev;
2013 pm_suspend_ignore_children(&ctlr->dev, true);
2014 spi_controller_set_devdata(ctlr, &ctlr[1]);
2015
2016 return ctlr;
2017}
2018EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2019
2020#ifdef CONFIG_OF
2021static int of_spi_register_master(struct spi_controller *ctlr)
2022{
2023 int nb, i, *cs;
2024 struct device_node *np = ctlr->dev.of_node;
2025
2026 if (!np)
2027 return 0;
2028
2029 nb = of_gpio_named_count(np, "cs-gpios");
2030 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2031
2032
2033 if (nb == 0 || nb == -ENOENT)
2034 return 0;
2035 else if (nb < 0)
2036 return nb;
2037
2038 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
2039 GFP_KERNEL);
2040 ctlr->cs_gpios = cs;
2041
2042 if (!ctlr->cs_gpios)
2043 return -ENOMEM;
2044
2045 for (i = 0; i < ctlr->num_chipselect; i++)
2046 cs[i] = -ENOENT;
2047
2048 for (i = 0; i < nb; i++)
2049 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2050
2051 return 0;
2052}
2053#else
2054static int of_spi_register_master(struct spi_controller *ctlr)
2055{
2056 return 0;
2057}
2058#endif
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083int spi_register_controller(struct spi_controller *ctlr)
2084{
2085 struct device *dev = ctlr->dev.parent;
2086 struct boardinfo *bi;
2087 int status = -ENODEV;
2088 int id, first_dynamic;
2089
2090 if (!dev)
2091 return -ENODEV;
2092
2093 if (!spi_controller_is_slave(ctlr)) {
2094 status = of_spi_register_master(ctlr);
2095 if (status)
2096 return status;
2097 }
2098
2099
2100
2101
2102 if (ctlr->num_chipselect == 0)
2103 return -EINVAL;
2104
2105 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
2106 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2107 if (id >= 0) {
2108 ctlr->bus_num = id;
2109 mutex_lock(&board_lock);
2110 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2111 ctlr->bus_num + 1, GFP_KERNEL);
2112 mutex_unlock(&board_lock);
2113 if (WARN(id < 0, "couldn't get idr"))
2114 return id == -ENOSPC ? -EBUSY : id;
2115 }
2116 }
2117 if (ctlr->bus_num < 0) {
2118 first_dynamic = of_alias_get_highest_id("spi");
2119 if (first_dynamic < 0)
2120 first_dynamic = 0;
2121 else
2122 first_dynamic++;
2123
2124 mutex_lock(&board_lock);
2125 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2126 0, GFP_KERNEL);
2127 mutex_unlock(&board_lock);
2128 if (WARN(id < 0, "couldn't get idr"))
2129 return id;
2130 ctlr->bus_num = id;
2131 }
2132 INIT_LIST_HEAD(&ctlr->queue);
2133 spin_lock_init(&ctlr->queue_lock);
2134 spin_lock_init(&ctlr->bus_lock_spinlock);
2135 mutex_init(&ctlr->bus_lock_mutex);
2136 mutex_init(&ctlr->io_mutex);
2137 ctlr->bus_lock_flag = 0;
2138 init_completion(&ctlr->xfer_completion);
2139 if (!ctlr->max_dma_len)
2140 ctlr->max_dma_len = INT_MAX;
2141
2142
2143
2144
2145 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2146 status = device_add(&ctlr->dev);
2147 if (status < 0) {
2148
2149 mutex_lock(&board_lock);
2150 idr_remove(&spi_master_idr, ctlr->bus_num);
2151 mutex_unlock(&board_lock);
2152 goto done;
2153 }
2154 dev_dbg(dev, "registered %s %s\n",
2155 spi_controller_is_slave(ctlr) ? "slave" : "master",
2156 dev_name(&ctlr->dev));
2157
2158
2159 if (ctlr->transfer)
2160 dev_info(dev, "controller is unqueued, this is deprecated\n");
2161 else {
2162 status = spi_controller_initialize_queue(ctlr);
2163 if (status) {
2164 device_del(&ctlr->dev);
2165
2166 mutex_lock(&board_lock);
2167 idr_remove(&spi_master_idr, ctlr->bus_num);
2168 mutex_unlock(&board_lock);
2169 goto done;
2170 }
2171 }
2172
2173 spin_lock_init(&ctlr->statistics.lock);
2174
2175 mutex_lock(&board_lock);
2176 list_add_tail(&ctlr->list, &spi_controller_list);
2177 list_for_each_entry(bi, &board_list, list)
2178 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2179 mutex_unlock(&board_lock);
2180
2181
2182 of_register_spi_devices(ctlr);
2183 acpi_register_spi_devices(ctlr);
2184done:
2185 return status;
2186}
2187EXPORT_SYMBOL_GPL(spi_register_controller);
2188
2189static void devm_spi_unregister(struct device *dev, void *res)
2190{
2191 spi_unregister_controller(*(struct spi_controller **)res);
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207int devm_spi_register_controller(struct device *dev,
2208 struct spi_controller *ctlr)
2209{
2210 struct spi_controller **ptr;
2211 int ret;
2212
2213 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2214 if (!ptr)
2215 return -ENOMEM;
2216
2217 ret = spi_register_controller(ctlr);
2218 if (!ret) {
2219 *ptr = ctlr;
2220 devres_add(dev, ptr);
2221 } else {
2222 devres_free(ptr);
2223 }
2224
2225 return ret;
2226}
2227EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2228
2229static int __unregister(struct device *dev, void *null)
2230{
2231 spi_unregister_device(to_spi_device(dev));
2232 return 0;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245void spi_unregister_controller(struct spi_controller *ctlr)
2246{
2247 struct spi_controller *found;
2248 int dummy;
2249
2250
2251 mutex_lock(&board_lock);
2252 found = idr_find(&spi_master_idr, ctlr->bus_num);
2253 mutex_unlock(&board_lock);
2254 if (found != ctlr) {
2255 dev_dbg(&ctlr->dev,
2256 "attempting to delete unregistered controller [%s]\n",
2257 dev_name(&ctlr->dev));
2258 return;
2259 }
2260 if (ctlr->queued) {
2261 if (spi_destroy_queue(ctlr))
2262 dev_err(&ctlr->dev, "queue remove failed\n");
2263 }
2264 mutex_lock(&board_lock);
2265 list_del(&ctlr->list);
2266 mutex_unlock(&board_lock);
2267
2268 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2269 device_unregister(&ctlr->dev);
2270
2271 mutex_lock(&board_lock);
2272 idr_remove(&spi_master_idr, ctlr->bus_num);
2273 mutex_unlock(&board_lock);
2274}
2275EXPORT_SYMBOL_GPL(spi_unregister_controller);
2276
2277int spi_controller_suspend(struct spi_controller *ctlr)
2278{
2279 int ret;
2280
2281
2282 if (!ctlr->queued)
2283 return 0;
2284
2285 ret = spi_stop_queue(ctlr);
2286 if (ret)
2287 dev_err(&ctlr->dev, "queue stop failed\n");
2288
2289 return ret;
2290}
2291EXPORT_SYMBOL_GPL(spi_controller_suspend);
2292
2293int spi_controller_resume(struct spi_controller *ctlr)
2294{
2295 int ret;
2296
2297 if (!ctlr->queued)
2298 return 0;
2299
2300 ret = spi_start_queue(ctlr);
2301 if (ret)
2302 dev_err(&ctlr->dev, "queue restart failed\n");
2303
2304 return ret;
2305}
2306EXPORT_SYMBOL_GPL(spi_controller_resume);
2307
2308static int __spi_controller_match(struct device *dev, const void *data)
2309{
2310 struct spi_controller *ctlr;
2311 const u16 *bus_num = data;
2312
2313 ctlr = container_of(dev, struct spi_controller, dev);
2314 return ctlr->bus_num == *bus_num;
2315}
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329struct spi_controller *spi_busnum_to_master(u16 bus_num)
2330{
2331 struct device *dev;
2332 struct spi_controller *ctlr = NULL;
2333
2334 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2335 __spi_controller_match);
2336 if (dev)
2337 ctlr = container_of(dev, struct spi_controller, dev);
2338
2339 return ctlr;
2340}
2341EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361void *spi_res_alloc(struct spi_device *spi,
2362 spi_res_release_t release,
2363 size_t size, gfp_t gfp)
2364{
2365 struct spi_res *sres;
2366
2367 sres = kzalloc(sizeof(*sres) + size, gfp);
2368 if (!sres)
2369 return NULL;
2370
2371 INIT_LIST_HEAD(&sres->entry);
2372 sres->release = release;
2373
2374 return sres->data;
2375}
2376EXPORT_SYMBOL_GPL(spi_res_alloc);
2377
2378
2379
2380
2381
2382
2383void spi_res_free(void *res)
2384{
2385 struct spi_res *sres = container_of(res, struct spi_res, data);
2386
2387 if (!res)
2388 return;
2389
2390 WARN_ON(!list_empty(&sres->entry));
2391 kfree(sres);
2392}
2393EXPORT_SYMBOL_GPL(spi_res_free);
2394
2395
2396
2397
2398
2399
2400void spi_res_add(struct spi_message *message, void *res)
2401{
2402 struct spi_res *sres = container_of(res, struct spi_res, data);
2403
2404 WARN_ON(!list_empty(&sres->entry));
2405 list_add_tail(&sres->entry, &message->resources);
2406}
2407EXPORT_SYMBOL_GPL(spi_res_add);
2408
2409
2410
2411
2412
2413
2414void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2415{
2416 struct spi_res *res;
2417
2418 while (!list_empty(&message->resources)) {
2419 res = list_last_entry(&message->resources,
2420 struct spi_res, entry);
2421
2422 if (res->release)
2423 res->release(ctlr, message, res->data);
2424
2425 list_del(&res->entry);
2426
2427 kfree(res);
2428 }
2429}
2430EXPORT_SYMBOL_GPL(spi_res_release);
2431
2432
2433
2434
2435
2436static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2437 struct spi_message *msg,
2438 void *res)
2439{
2440 struct spi_replaced_transfers *rxfer = res;
2441 size_t i;
2442
2443
2444 if (rxfer->release)
2445 rxfer->release(ctlr, msg, res);
2446
2447
2448 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2449
2450
2451 for (i = 0; i < rxfer->inserted; i++)
2452 list_del(&rxfer->inserted_transfers[i].transfer_list);
2453}
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470struct spi_replaced_transfers *spi_replace_transfers(
2471 struct spi_message *msg,
2472 struct spi_transfer *xfer_first,
2473 size_t remove,
2474 size_t insert,
2475 spi_replaced_release_t release,
2476 size_t extradatasize,
2477 gfp_t gfp)
2478{
2479 struct spi_replaced_transfers *rxfer;
2480 struct spi_transfer *xfer;
2481 size_t i;
2482
2483
2484 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2485 insert * sizeof(struct spi_transfer)
2486 + sizeof(struct spi_replaced_transfers)
2487 + extradatasize,
2488 gfp);
2489 if (!rxfer)
2490 return ERR_PTR(-ENOMEM);
2491
2492
2493 rxfer->release = release;
2494
2495
2496 if (extradatasize)
2497 rxfer->extradata =
2498 &rxfer->inserted_transfers[insert];
2499
2500
2501 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2502
2503
2504
2505
2506 rxfer->replaced_after = xfer_first->transfer_list.prev;
2507
2508
2509 for (i = 0; i < remove; i++) {
2510
2511
2512
2513
2514 if (rxfer->replaced_after->next == &msg->transfers) {
2515 dev_err(&msg->spi->dev,
2516 "requested to remove more spi_transfers than are available\n");
2517
2518 list_splice(&rxfer->replaced_transfers,
2519 rxfer->replaced_after);
2520
2521
2522 spi_res_free(rxfer);
2523
2524
2525 return ERR_PTR(-EINVAL);
2526 }
2527
2528
2529
2530
2531 list_move_tail(rxfer->replaced_after->next,
2532 &rxfer->replaced_transfers);
2533 }
2534
2535
2536
2537
2538 for (i = 0; i < insert; i++) {
2539
2540 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2541
2542
2543 memcpy(xfer, xfer_first, sizeof(*xfer));
2544
2545
2546 list_add(&xfer->transfer_list, rxfer->replaced_after);
2547
2548
2549 if (i) {
2550 xfer->cs_change = false;
2551 xfer->delay_usecs = 0;
2552 }
2553 }
2554
2555
2556 rxfer->inserted = insert;
2557
2558
2559 spi_res_add(msg, rxfer);
2560
2561 return rxfer;
2562}
2563EXPORT_SYMBOL_GPL(spi_replace_transfers);
2564
2565static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2566 struct spi_message *msg,
2567 struct spi_transfer **xferp,
2568 size_t maxsize,
2569 gfp_t gfp)
2570{
2571 struct spi_transfer *xfer = *xferp, *xfers;
2572 struct spi_replaced_transfers *srt;
2573 size_t offset;
2574 size_t count, i;
2575
2576
2577 dev_warn_once(&msg->spi->dev,
2578 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2579 xfer->len, maxsize);
2580
2581
2582 count = DIV_ROUND_UP(xfer->len, maxsize);
2583
2584
2585 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2586 if (IS_ERR(srt))
2587 return PTR_ERR(srt);
2588 xfers = srt->inserted_transfers;
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2604
2605
2606 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2607
2608 if (xfers[i].rx_buf)
2609 xfers[i].rx_buf += offset;
2610 if (xfers[i].rx_dma)
2611 xfers[i].rx_dma += offset;
2612 if (xfers[i].tx_buf)
2613 xfers[i].tx_buf += offset;
2614 if (xfers[i].tx_dma)
2615 xfers[i].tx_dma += offset;
2616
2617
2618 xfers[i].len = min(maxsize, xfers[i].len - offset);
2619 }
2620
2621
2622
2623
2624 *xferp = &xfers[count - 1];
2625
2626
2627 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2628 transfers_split_maxsize);
2629 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2630 transfers_split_maxsize);
2631
2632 return 0;
2633}
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2647 struct spi_message *msg,
2648 size_t maxsize,
2649 gfp_t gfp)
2650{
2651 struct spi_transfer *xfer;
2652 int ret;
2653
2654
2655
2656
2657
2658
2659
2660 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2661 if (xfer->len > maxsize) {
2662 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2663 maxsize, gfp);
2664 if (ret)
2665 return ret;
2666 }
2667 }
2668
2669 return 0;
2670}
2671EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2672
2673
2674
2675
2676
2677
2678
2679static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2680 u8 bits_per_word)
2681{
2682 if (ctlr->bits_per_word_mask) {
2683
2684 if (bits_per_word > 32)
2685 return -EINVAL;
2686 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2687 return -EINVAL;
2688 }
2689
2690 return 0;
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713int spi_setup(struct spi_device *spi)
2714{
2715 unsigned bad_bits, ugly_bits;
2716 int status;
2717
2718
2719
2720 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2721 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2722 dev_err(&spi->dev,
2723 "setup: can not select dual and quad at the same time\n");
2724 return -EINVAL;
2725 }
2726
2727
2728 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2729 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2730 return -EINVAL;
2731
2732
2733
2734 bad_bits = spi->mode & ~spi->controller->mode_bits;
2735 ugly_bits = bad_bits &
2736 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2737 if (ugly_bits) {
2738 dev_warn(&spi->dev,
2739 "setup: ignoring unsupported mode bits %x\n",
2740 ugly_bits);
2741 spi->mode &= ~ugly_bits;
2742 bad_bits &= ~ugly_bits;
2743 }
2744 if (bad_bits) {
2745 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2746 bad_bits);
2747 return -EINVAL;
2748 }
2749
2750 if (!spi->bits_per_word)
2751 spi->bits_per_word = 8;
2752
2753 status = __spi_validate_bits_per_word(spi->controller,
2754 spi->bits_per_word);
2755 if (status)
2756 return status;
2757
2758 if (!spi->max_speed_hz)
2759 spi->max_speed_hz = spi->controller->max_speed_hz;
2760
2761 if (spi->controller->setup)
2762 status = spi->controller->setup(spi);
2763
2764 spi_set_cs(spi, false);
2765
2766 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2767 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2768 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2769 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2770 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2771 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2772 spi->bits_per_word, spi->max_speed_hz,
2773 status);
2774
2775 return status;
2776}
2777EXPORT_SYMBOL_GPL(spi_setup);
2778
2779static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2780{
2781 struct spi_controller *ctlr = spi->controller;
2782 struct spi_transfer *xfer;
2783 int w_size;
2784
2785 if (list_empty(&message->transfers))
2786 return -EINVAL;
2787
2788
2789
2790
2791
2792
2793 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2794 (spi->mode & SPI_3WIRE)) {
2795 unsigned flags = ctlr->flags;
2796
2797 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2798 if (xfer->rx_buf && xfer->tx_buf)
2799 return -EINVAL;
2800 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2801 return -EINVAL;
2802 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2803 return -EINVAL;
2804 }
2805 }
2806
2807
2808
2809
2810
2811
2812
2813 message->frame_length = 0;
2814 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2815 message->frame_length += xfer->len;
2816 if (!xfer->bits_per_word)
2817 xfer->bits_per_word = spi->bits_per_word;
2818
2819 if (!xfer->speed_hz)
2820 xfer->speed_hz = spi->max_speed_hz;
2821 if (!xfer->speed_hz)
2822 xfer->speed_hz = ctlr->max_speed_hz;
2823
2824 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2825 xfer->speed_hz = ctlr->max_speed_hz;
2826
2827 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2828 return -EINVAL;
2829
2830
2831
2832
2833
2834 if (xfer->bits_per_word <= 8)
2835 w_size = 1;
2836 else if (xfer->bits_per_word <= 16)
2837 w_size = 2;
2838 else
2839 w_size = 4;
2840
2841
2842 if (xfer->len % w_size)
2843 return -EINVAL;
2844
2845 if (xfer->speed_hz && ctlr->min_speed_hz &&
2846 xfer->speed_hz < ctlr->min_speed_hz)
2847 return -EINVAL;
2848
2849 if (xfer->tx_buf && !xfer->tx_nbits)
2850 xfer->tx_nbits = SPI_NBITS_SINGLE;
2851 if (xfer->rx_buf && !xfer->rx_nbits)
2852 xfer->rx_nbits = SPI_NBITS_SINGLE;
2853
2854
2855
2856
2857 if (xfer->tx_buf) {
2858 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2859 xfer->tx_nbits != SPI_NBITS_DUAL &&
2860 xfer->tx_nbits != SPI_NBITS_QUAD)
2861 return -EINVAL;
2862 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2863 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2864 return -EINVAL;
2865 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2866 !(spi->mode & SPI_TX_QUAD))
2867 return -EINVAL;
2868 }
2869
2870 if (xfer->rx_buf) {
2871 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2872 xfer->rx_nbits != SPI_NBITS_DUAL &&
2873 xfer->rx_nbits != SPI_NBITS_QUAD)
2874 return -EINVAL;
2875 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2876 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2877 return -EINVAL;
2878 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2879 !(spi->mode & SPI_RX_QUAD))
2880 return -EINVAL;
2881 }
2882 }
2883
2884 message->status = -EINPROGRESS;
2885
2886 return 0;
2887}
2888
2889static int __spi_async(struct spi_device *spi, struct spi_message *message)
2890{
2891 struct spi_controller *ctlr = spi->controller;
2892
2893 message->spi = spi;
2894
2895 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2896 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2897
2898 trace_spi_message_submit(message);
2899
2900 return ctlr->transfer(spi, message);
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934int spi_async(struct spi_device *spi, struct spi_message *message)
2935{
2936 struct spi_controller *ctlr = spi->controller;
2937 int ret;
2938 unsigned long flags;
2939
2940 ret = __spi_validate(spi, message);
2941 if (ret != 0)
2942 return ret;
2943
2944 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2945
2946 if (ctlr->bus_lock_flag)
2947 ret = -EBUSY;
2948 else
2949 ret = __spi_async(spi, message);
2950
2951 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2952
2953 return ret;
2954}
2955EXPORT_SYMBOL_GPL(spi_async);
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2989{
2990 struct spi_controller *ctlr = spi->controller;
2991 int ret;
2992 unsigned long flags;
2993
2994 ret = __spi_validate(spi, message);
2995 if (ret != 0)
2996 return ret;
2997
2998 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2999
3000 ret = __spi_async(spi, message);
3001
3002 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3003
3004 return ret;
3005
3006}
3007EXPORT_SYMBOL_GPL(spi_async_locked);
3008
3009
3010int spi_flash_read(struct spi_device *spi,
3011 struct spi_flash_read_message *msg)
3012
3013{
3014 struct spi_controller *master = spi->controller;
3015 struct device *rx_dev = NULL;
3016 int ret;
3017
3018 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
3019 msg->addr_nbits == SPI_NBITS_DUAL) &&
3020 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3021 return -EINVAL;
3022 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
3023 msg->addr_nbits == SPI_NBITS_QUAD) &&
3024 !(spi->mode & SPI_TX_QUAD))
3025 return -EINVAL;
3026 if (msg->data_nbits == SPI_NBITS_DUAL &&
3027 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3028 return -EINVAL;
3029 if (msg->data_nbits == SPI_NBITS_QUAD &&
3030 !(spi->mode & SPI_RX_QUAD))
3031 return -EINVAL;
3032
3033 if (master->auto_runtime_pm) {
3034 ret = pm_runtime_get_sync(master->dev.parent);
3035 if (ret < 0) {
3036 dev_err(&master->dev, "Failed to power device: %d\n",
3037 ret);
3038 return ret;
3039 }
3040 }
3041
3042 mutex_lock(&master->bus_lock_mutex);
3043 mutex_lock(&master->io_mutex);
3044 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
3045 rx_dev = master->dma_rx->device->dev;
3046 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
3047 msg->buf, msg->len,
3048 DMA_FROM_DEVICE);
3049 if (!ret)
3050 msg->cur_msg_mapped = true;
3051 }
3052 ret = master->spi_flash_read(spi, msg);
3053 if (msg->cur_msg_mapped)
3054 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
3055 DMA_FROM_DEVICE);
3056 mutex_unlock(&master->io_mutex);
3057 mutex_unlock(&master->bus_lock_mutex);
3058
3059 if (master->auto_runtime_pm)
3060 pm_runtime_put(master->dev.parent);
3061
3062 return ret;
3063}
3064EXPORT_SYMBOL_GPL(spi_flash_read);
3065
3066
3067
3068
3069
3070
3071
3072
3073static void spi_complete(void *arg)
3074{
3075 complete(arg);
3076}
3077
3078static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3079{
3080 DECLARE_COMPLETION_ONSTACK(done);
3081 int status;
3082 struct spi_controller *ctlr = spi->controller;
3083 unsigned long flags;
3084
3085 status = __spi_validate(spi, message);
3086 if (status != 0)
3087 return status;
3088
3089 message->complete = spi_complete;
3090 message->context = &done;
3091 message->spi = spi;
3092
3093 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3094 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3095
3096
3097
3098
3099
3100
3101 if (ctlr->transfer == spi_queued_transfer) {
3102 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3103
3104 trace_spi_message_submit(message);
3105
3106 status = __spi_queued_transfer(spi, message, false);
3107
3108 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3109 } else {
3110 status = spi_async_locked(spi, message);
3111 }
3112
3113 if (status == 0) {
3114
3115
3116
3117 if (ctlr->transfer == spi_queued_transfer) {
3118 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3119 spi_sync_immediate);
3120 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3121 spi_sync_immediate);
3122 __spi_pump_messages(ctlr, false);
3123 }
3124
3125 wait_for_completion(&done);
3126 status = message->status;
3127 }
3128 message->context = NULL;
3129 return status;
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153int spi_sync(struct spi_device *spi, struct spi_message *message)
3154{
3155 int ret;
3156
3157 mutex_lock(&spi->controller->bus_lock_mutex);
3158 ret = __spi_sync(spi, message);
3159 mutex_unlock(&spi->controller->bus_lock_mutex);
3160
3161 return ret;
3162}
3163EXPORT_SYMBOL_GPL(spi_sync);
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3182{
3183 return __spi_sync(spi, message);
3184}
3185EXPORT_SYMBOL_GPL(spi_sync_locked);
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202int spi_bus_lock(struct spi_controller *ctlr)
3203{
3204 unsigned long flags;
3205
3206 mutex_lock(&ctlr->bus_lock_mutex);
3207
3208 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3209 ctlr->bus_lock_flag = 1;
3210 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3211
3212
3213
3214 return 0;
3215}
3216EXPORT_SYMBOL_GPL(spi_bus_lock);
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231int spi_bus_unlock(struct spi_controller *ctlr)
3232{
3233 ctlr->bus_lock_flag = 0;
3234
3235 mutex_unlock(&ctlr->bus_lock_mutex);
3236
3237 return 0;
3238}
3239EXPORT_SYMBOL_GPL(spi_bus_unlock);
3240
3241
3242#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3243
3244static u8 *buf;
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267int spi_write_then_read(struct spi_device *spi,
3268 const void *txbuf, unsigned n_tx,
3269 void *rxbuf, unsigned n_rx)
3270{
3271 static DEFINE_MUTEX(lock);
3272
3273 int status;
3274 struct spi_message message;
3275 struct spi_transfer x[2];
3276 u8 *local_buf;
3277
3278
3279
3280
3281
3282
3283 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3284 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3285 GFP_KERNEL | GFP_DMA);
3286 if (!local_buf)
3287 return -ENOMEM;
3288 } else {
3289 local_buf = buf;
3290 }
3291
3292 spi_message_init(&message);
3293 memset(x, 0, sizeof(x));
3294 if (n_tx) {
3295 x[0].len = n_tx;
3296 spi_message_add_tail(&x[0], &message);
3297 }
3298 if (n_rx) {
3299 x[1].len = n_rx;
3300 spi_message_add_tail(&x[1], &message);
3301 }
3302
3303 memcpy(local_buf, txbuf, n_tx);
3304 x[0].tx_buf = local_buf;
3305 x[1].rx_buf = local_buf + n_tx;
3306
3307
3308 status = spi_sync(spi, &message);
3309 if (status == 0)
3310 memcpy(rxbuf, x[1].rx_buf, n_rx);
3311
3312 if (x[0].tx_buf == buf)
3313 mutex_unlock(&lock);
3314 else
3315 kfree(local_buf);
3316
3317 return status;
3318}
3319EXPORT_SYMBOL_GPL(spi_write_then_read);
3320
3321
3322
3323#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3324static int __spi_of_device_match(struct device *dev, void *data)
3325{
3326 return dev->of_node == data;
3327}
3328
3329
3330static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3331{
3332 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3333 __spi_of_device_match);
3334 return dev ? to_spi_device(dev) : NULL;
3335}
3336
3337static int __spi_of_controller_match(struct device *dev, const void *data)
3338{
3339 return dev->of_node == data;
3340}
3341
3342
3343static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3344{
3345 struct device *dev;
3346
3347 dev = class_find_device(&spi_master_class, NULL, node,
3348 __spi_of_controller_match);
3349 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3350 dev = class_find_device(&spi_slave_class, NULL, node,
3351 __spi_of_controller_match);
3352 if (!dev)
3353 return NULL;
3354
3355
3356 return container_of(dev, struct spi_controller, dev);
3357}
3358
3359static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3360 void *arg)
3361{
3362 struct of_reconfig_data *rd = arg;
3363 struct spi_controller *ctlr;
3364 struct spi_device *spi;
3365
3366 switch (of_reconfig_get_state_change(action, arg)) {
3367 case OF_RECONFIG_CHANGE_ADD:
3368 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3369 if (ctlr == NULL)
3370 return NOTIFY_OK;
3371
3372 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3373 put_device(&ctlr->dev);
3374 return NOTIFY_OK;
3375 }
3376
3377 spi = of_register_spi_device(ctlr, rd->dn);
3378 put_device(&ctlr->dev);
3379
3380 if (IS_ERR(spi)) {
3381 pr_err("%s: failed to create for '%pOF'\n",
3382 __func__, rd->dn);
3383 of_node_clear_flag(rd->dn, OF_POPULATED);
3384 return notifier_from_errno(PTR_ERR(spi));
3385 }
3386 break;
3387
3388 case OF_RECONFIG_CHANGE_REMOVE:
3389
3390 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3391 return NOTIFY_OK;
3392
3393
3394 spi = of_find_spi_device_by_node(rd->dn);
3395 if (spi == NULL)
3396 return NOTIFY_OK;
3397
3398
3399 spi_unregister_device(spi);
3400
3401
3402 put_device(&spi->dev);
3403 break;
3404 }
3405
3406 return NOTIFY_OK;
3407}
3408
3409static struct notifier_block spi_of_notifier = {
3410 .notifier_call = of_spi_notify,
3411};
3412#else
3413extern struct notifier_block spi_of_notifier;
3414#endif
3415
3416#if IS_ENABLED(CONFIG_ACPI)
3417static int spi_acpi_controller_match(struct device *dev, const void *data)
3418{
3419 return ACPI_COMPANION(dev->parent) == data;
3420}
3421
3422static int spi_acpi_device_match(struct device *dev, void *data)
3423{
3424 return ACPI_COMPANION(dev) == data;
3425}
3426
3427static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3428{
3429 struct device *dev;
3430
3431 dev = class_find_device(&spi_master_class, NULL, adev,
3432 spi_acpi_controller_match);
3433 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3434 dev = class_find_device(&spi_slave_class, NULL, adev,
3435 spi_acpi_controller_match);
3436 if (!dev)
3437 return NULL;
3438
3439 return container_of(dev, struct spi_controller, dev);
3440}
3441
3442static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3443{
3444 struct device *dev;
3445
3446 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3447
3448 return dev ? to_spi_device(dev) : NULL;
3449}
3450
3451static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3452 void *arg)
3453{
3454 struct acpi_device *adev = arg;
3455 struct spi_controller *ctlr;
3456 struct spi_device *spi;
3457
3458 switch (value) {
3459 case ACPI_RECONFIG_DEVICE_ADD:
3460 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3461 if (!ctlr)
3462 break;
3463
3464 acpi_register_spi_device(ctlr, adev);
3465 put_device(&ctlr->dev);
3466 break;
3467 case ACPI_RECONFIG_DEVICE_REMOVE:
3468 if (!acpi_device_enumerated(adev))
3469 break;
3470
3471 spi = acpi_spi_find_device_by_adev(adev);
3472 if (!spi)
3473 break;
3474
3475 spi_unregister_device(spi);
3476 put_device(&spi->dev);
3477 break;
3478 }
3479
3480 return NOTIFY_OK;
3481}
3482
3483static struct notifier_block spi_acpi_notifier = {
3484 .notifier_call = acpi_spi_notify,
3485};
3486#else
3487extern struct notifier_block spi_acpi_notifier;
3488#endif
3489
3490static int __init spi_init(void)
3491{
3492 int status;
3493
3494 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3495 if (!buf) {
3496 status = -ENOMEM;
3497 goto err0;
3498 }
3499
3500 status = bus_register(&spi_bus_type);
3501 if (status < 0)
3502 goto err1;
3503
3504 status = class_register(&spi_master_class);
3505 if (status < 0)
3506 goto err2;
3507
3508 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3509 status = class_register(&spi_slave_class);
3510 if (status < 0)
3511 goto err3;
3512 }
3513
3514 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3515 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3516 if (IS_ENABLED(CONFIG_ACPI))
3517 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3518
3519 return 0;
3520
3521err3:
3522 class_unregister(&spi_master_class);
3523err2:
3524 bus_unregister(&spi_bus_type);
3525err1:
3526 kfree(buf);
3527 buf = NULL;
3528err0:
3529 return status;
3530}
3531
3532
3533
3534
3535
3536
3537
3538
3539postcore_initcall(spi_init);
3540
3541