1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/cache.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/mutex.h>
25#include <linux/of_device.h>
26#include <linux/of_irq.h>
27#include <linux/clk/clk-conf.h>
28#include <linux/slab.h>
29#include <linux/mod_devicetable.h>
30#include <linux/spi/spi.h>
31#include <linux/of_gpio.h>
32#include <linux/pm_runtime.h>
33#include <linux/pm_domain.h>
34#include <linux/property.h>
35#include <linux/export.h>
36#include <linux/sched/rt.h>
37#include <uapi/linux/sched/types.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/ioport.h>
41#include <linux/acpi.h>
42#include <linux/highmem.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/spi.h>
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51
52 if (spi->master->cleanup)
53 spi->master->cleanup(spi);
54
55 spi_master_put(spi->master);
56 kfree(spi);
57}
58
59static ssize_t
60modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61{
62 const struct spi_device *spi = to_spi_device(dev);
63 int len;
64
65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 if (len != -ENODEV)
67 return len;
68
69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70}
71static DEVICE_ATTR_RO(modalias);
72
73#define SPI_STATISTICS_ATTRS(field, file) \
74static ssize_t spi_master_##field##_show(struct device *dev, \
75 struct device_attribute *attr, \
76 char *buf) \
77{ \
78 struct spi_master *master = container_of(dev, \
79 struct spi_master, dev); \
80 return spi_statistics_##field##_show(&master->statistics, buf); \
81} \
82static struct device_attribute dev_attr_spi_master_##field = { \
83 .attr = { .name = file, .mode = S_IRUGO }, \
84 .show = spi_master_##field##_show, \
85}; \
86static ssize_t spi_device_##field##_show(struct device *dev, \
87 struct device_attribute *attr, \
88 char *buf) \
89{ \
90 struct spi_device *spi = to_spi_device(dev); \
91 return spi_statistics_##field##_show(&spi->statistics, buf); \
92} \
93static struct device_attribute dev_attr_spi_device_##field = { \
94 .attr = { .name = file, .mode = S_IRUGO }, \
95 .show = spi_device_##field##_show, \
96}
97
98#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
99static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
100 char *buf) \
101{ \
102 unsigned long flags; \
103 ssize_t len; \
104 spin_lock_irqsave(&stat->lock, flags); \
105 len = sprintf(buf, format_string, stat->field); \
106 spin_unlock_irqrestore(&stat->lock, flags); \
107 return len; \
108} \
109SPI_STATISTICS_ATTRS(name, file)
110
111#define SPI_STATISTICS_SHOW(field, format_string) \
112 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
113 field, format_string)
114
115SPI_STATISTICS_SHOW(messages, "%lu");
116SPI_STATISTICS_SHOW(transfers, "%lu");
117SPI_STATISTICS_SHOW(errors, "%lu");
118SPI_STATISTICS_SHOW(timedout, "%lu");
119
120SPI_STATISTICS_SHOW(spi_sync, "%lu");
121SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
122SPI_STATISTICS_SHOW(spi_async, "%lu");
123
124SPI_STATISTICS_SHOW(bytes, "%llu");
125SPI_STATISTICS_SHOW(bytes_rx, "%llu");
126SPI_STATISTICS_SHOW(bytes_tx, "%llu");
127
128#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
129 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
130 "transfer_bytes_histo_" number, \
131 transfer_bytes_histo[index], "%lu")
132SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
133SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
134SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
135SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
136SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
137SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
138SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
139SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
140SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
141SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
142SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
143SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
146SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
147SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
148SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
149
150SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
151
152static struct attribute *spi_dev_attrs[] = {
153 &dev_attr_modalias.attr,
154 NULL,
155};
156
157static const struct attribute_group spi_dev_group = {
158 .attrs = spi_dev_attrs,
159};
160
161static struct attribute *spi_device_statistics_attrs[] = {
162 &dev_attr_spi_device_messages.attr,
163 &dev_attr_spi_device_transfers.attr,
164 &dev_attr_spi_device_errors.attr,
165 &dev_attr_spi_device_timedout.attr,
166 &dev_attr_spi_device_spi_sync.attr,
167 &dev_attr_spi_device_spi_sync_immediate.attr,
168 &dev_attr_spi_device_spi_async.attr,
169 &dev_attr_spi_device_bytes.attr,
170 &dev_attr_spi_device_bytes_rx.attr,
171 &dev_attr_spi_device_bytes_tx.attr,
172 &dev_attr_spi_device_transfer_bytes_histo0.attr,
173 &dev_attr_spi_device_transfer_bytes_histo1.attr,
174 &dev_attr_spi_device_transfer_bytes_histo2.attr,
175 &dev_attr_spi_device_transfer_bytes_histo3.attr,
176 &dev_attr_spi_device_transfer_bytes_histo4.attr,
177 &dev_attr_spi_device_transfer_bytes_histo5.attr,
178 &dev_attr_spi_device_transfer_bytes_histo6.attr,
179 &dev_attr_spi_device_transfer_bytes_histo7.attr,
180 &dev_attr_spi_device_transfer_bytes_histo8.attr,
181 &dev_attr_spi_device_transfer_bytes_histo9.attr,
182 &dev_attr_spi_device_transfer_bytes_histo10.attr,
183 &dev_attr_spi_device_transfer_bytes_histo11.attr,
184 &dev_attr_spi_device_transfer_bytes_histo12.attr,
185 &dev_attr_spi_device_transfer_bytes_histo13.attr,
186 &dev_attr_spi_device_transfer_bytes_histo14.attr,
187 &dev_attr_spi_device_transfer_bytes_histo15.attr,
188 &dev_attr_spi_device_transfer_bytes_histo16.attr,
189 &dev_attr_spi_device_transfers_split_maxsize.attr,
190 NULL,
191};
192
193static const struct attribute_group spi_device_statistics_group = {
194 .name = "statistics",
195 .attrs = spi_device_statistics_attrs,
196};
197
198static const struct attribute_group *spi_dev_groups[] = {
199 &spi_dev_group,
200 &spi_device_statistics_group,
201 NULL,
202};
203
204static struct attribute *spi_master_statistics_attrs[] = {
205 &dev_attr_spi_master_messages.attr,
206 &dev_attr_spi_master_transfers.attr,
207 &dev_attr_spi_master_errors.attr,
208 &dev_attr_spi_master_timedout.attr,
209 &dev_attr_spi_master_spi_sync.attr,
210 &dev_attr_spi_master_spi_sync_immediate.attr,
211 &dev_attr_spi_master_spi_async.attr,
212 &dev_attr_spi_master_bytes.attr,
213 &dev_attr_spi_master_bytes_rx.attr,
214 &dev_attr_spi_master_bytes_tx.attr,
215 &dev_attr_spi_master_transfer_bytes_histo0.attr,
216 &dev_attr_spi_master_transfer_bytes_histo1.attr,
217 &dev_attr_spi_master_transfer_bytes_histo2.attr,
218 &dev_attr_spi_master_transfer_bytes_histo3.attr,
219 &dev_attr_spi_master_transfer_bytes_histo4.attr,
220 &dev_attr_spi_master_transfer_bytes_histo5.attr,
221 &dev_attr_spi_master_transfer_bytes_histo6.attr,
222 &dev_attr_spi_master_transfer_bytes_histo7.attr,
223 &dev_attr_spi_master_transfer_bytes_histo8.attr,
224 &dev_attr_spi_master_transfer_bytes_histo9.attr,
225 &dev_attr_spi_master_transfer_bytes_histo10.attr,
226 &dev_attr_spi_master_transfer_bytes_histo11.attr,
227 &dev_attr_spi_master_transfer_bytes_histo12.attr,
228 &dev_attr_spi_master_transfer_bytes_histo13.attr,
229 &dev_attr_spi_master_transfer_bytes_histo14.attr,
230 &dev_attr_spi_master_transfer_bytes_histo15.attr,
231 &dev_attr_spi_master_transfer_bytes_histo16.attr,
232 &dev_attr_spi_master_transfers_split_maxsize.attr,
233 NULL,
234};
235
236static const struct attribute_group spi_master_statistics_group = {
237 .name = "statistics",
238 .attrs = spi_master_statistics_attrs,
239};
240
241static const struct attribute_group *spi_master_groups[] = {
242 &spi_master_statistics_group,
243 NULL,
244};
245
246void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
247 struct spi_transfer *xfer,
248 struct spi_master *master)
249{
250 unsigned long flags;
251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
252
253 if (l2len < 0)
254 l2len = 0;
255
256 spin_lock_irqsave(&stats->lock, flags);
257
258 stats->transfers++;
259 stats->transfer_bytes_histo[l2len]++;
260
261 stats->bytes += xfer->len;
262 if ((xfer->tx_buf) &&
263 (xfer->tx_buf != master->dummy_tx))
264 stats->bytes_tx += xfer->len;
265 if ((xfer->rx_buf) &&
266 (xfer->rx_buf != master->dummy_rx))
267 stats->bytes_rx += xfer->len;
268
269 spin_unlock_irqrestore(&stats->lock, flags);
270}
271EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
272
273
274
275
276
277static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
278 const struct spi_device *sdev)
279{
280 while (id->name[0]) {
281 if (!strcmp(sdev->modalias, id->name))
282 return id;
283 id++;
284 }
285 return NULL;
286}
287
288const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
289{
290 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
291
292 return spi_match_id(sdrv->id_table, sdev);
293}
294EXPORT_SYMBOL_GPL(spi_get_device_id);
295
296static int spi_match_device(struct device *dev, struct device_driver *drv)
297{
298 const struct spi_device *spi = to_spi_device(dev);
299 const struct spi_driver *sdrv = to_spi_driver(drv);
300
301
302 if (of_driver_match_device(dev, drv))
303 return 1;
304
305
306 if (acpi_driver_match_device(dev, drv))
307 return 1;
308
309 if (sdrv->id_table)
310 return !!spi_match_id(sdrv->id_table, spi);
311
312 return strcmp(spi->modalias, drv->name) == 0;
313}
314
315static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
316{
317 const struct spi_device *spi = to_spi_device(dev);
318 int rc;
319
320 rc = acpi_device_uevent_modalias(dev, env);
321 if (rc != -ENODEV)
322 return rc;
323
324 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
325 return 0;
326}
327
328struct bus_type spi_bus_type = {
329 .name = "spi",
330 .dev_groups = spi_dev_groups,
331 .match = spi_match_device,
332 .uevent = spi_uevent,
333};
334EXPORT_SYMBOL_GPL(spi_bus_type);
335
336
337static int spi_drv_probe(struct device *dev)
338{
339 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
340 struct spi_device *spi = to_spi_device(dev);
341 int ret;
342
343 ret = of_clk_set_defaults(dev->of_node, false);
344 if (ret)
345 return ret;
346
347 if (dev->of_node) {
348 spi->irq = of_irq_get(dev->of_node, 0);
349 if (spi->irq == -EPROBE_DEFER)
350 return -EPROBE_DEFER;
351 if (spi->irq < 0)
352 spi->irq = 0;
353 }
354
355 ret = dev_pm_domain_attach(dev, true);
356 if (ret != -EPROBE_DEFER) {
357 ret = sdrv->probe(spi);
358 if (ret)
359 dev_pm_domain_detach(dev, true);
360 }
361
362 return ret;
363}
364
365static int spi_drv_remove(struct device *dev)
366{
367 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
368 int ret;
369
370 ret = sdrv->remove(to_spi_device(dev));
371 dev_pm_domain_detach(dev, true);
372
373 return ret;
374}
375
376static void spi_drv_shutdown(struct device *dev)
377{
378 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
379
380 sdrv->shutdown(to_spi_device(dev));
381}
382
383
384
385
386
387
388
389
390
391int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
392{
393 sdrv->driver.owner = owner;
394 sdrv->driver.bus = &spi_bus_type;
395 if (sdrv->probe)
396 sdrv->driver.probe = spi_drv_probe;
397 if (sdrv->remove)
398 sdrv->driver.remove = spi_drv_remove;
399 if (sdrv->shutdown)
400 sdrv->driver.shutdown = spi_drv_shutdown;
401 return driver_register(&sdrv->driver);
402}
403EXPORT_SYMBOL_GPL(__spi_register_driver);
404
405
406
407
408
409
410
411
412
413struct boardinfo {
414 struct list_head list;
415 struct spi_board_info board_info;
416};
417
418static LIST_HEAD(board_list);
419static LIST_HEAD(spi_master_list);
420
421
422
423
424
425static DEFINE_MUTEX(board_lock);
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444struct spi_device *spi_alloc_device(struct spi_master *master)
445{
446 struct spi_device *spi;
447
448 if (!spi_master_get(master))
449 return NULL;
450
451 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
452 if (!spi) {
453 spi_master_put(master);
454 return NULL;
455 }
456
457 spi->master = master;
458 spi->dev.parent = &master->dev;
459 spi->dev.bus = &spi_bus_type;
460 spi->dev.release = spidev_release;
461 spi->cs_gpio = -ENOENT;
462
463 spin_lock_init(&spi->statistics.lock);
464
465 device_initialize(&spi->dev);
466 return spi;
467}
468EXPORT_SYMBOL_GPL(spi_alloc_device);
469
470static void spi_dev_set_name(struct spi_device *spi)
471{
472 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
473
474 if (adev) {
475 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
476 return;
477 }
478
479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
480 spi->chip_select);
481}
482
483static int spi_dev_check(struct device *dev, void *data)
484{
485 struct spi_device *spi = to_spi_device(dev);
486 struct spi_device *new_spi = data;
487
488 if (spi->master == new_spi->master &&
489 spi->chip_select == new_spi->chip_select)
490 return -EBUSY;
491 return 0;
492}
493
494
495
496
497
498
499
500
501
502
503int spi_add_device(struct spi_device *spi)
504{
505 static DEFINE_MUTEX(spi_add_lock);
506 struct spi_master *master = spi->master;
507 struct device *dev = master->dev.parent;
508 int status;
509
510
511 if (spi->chip_select >= master->num_chipselect) {
512 dev_err(dev, "cs%d >= max %d\n",
513 spi->chip_select,
514 master->num_chipselect);
515 return -EINVAL;
516 }
517
518
519 spi_dev_set_name(spi);
520
521
522
523
524
525 mutex_lock(&spi_add_lock);
526
527 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
528 if (status) {
529 dev_err(dev, "chipselect %d already in use\n",
530 spi->chip_select);
531 goto done;
532 }
533
534 if (master->cs_gpios)
535 spi->cs_gpio = master->cs_gpios[spi->chip_select];
536
537
538
539
540
541 status = spi_setup(spi);
542 if (status < 0) {
543 dev_err(dev, "can't setup %s, status %d\n",
544 dev_name(&spi->dev), status);
545 goto done;
546 }
547
548
549 status = device_add(&spi->dev);
550 if (status < 0)
551 dev_err(dev, "can't add %s, status %d\n",
552 dev_name(&spi->dev), status);
553 else
554 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
555
556done:
557 mutex_unlock(&spi_add_lock);
558 return status;
559}
560EXPORT_SYMBOL_GPL(spi_add_device);
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576struct spi_device *spi_new_device(struct spi_master *master,
577 struct spi_board_info *chip)
578{
579 struct spi_device *proxy;
580 int status;
581
582
583
584
585
586
587
588
589 proxy = spi_alloc_device(master);
590 if (!proxy)
591 return NULL;
592
593 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
594
595 proxy->chip_select = chip->chip_select;
596 proxy->max_speed_hz = chip->max_speed_hz;
597 proxy->mode = chip->mode;
598 proxy->irq = chip->irq;
599 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
600 proxy->dev.platform_data = (void *) chip->platform_data;
601 proxy->controller_data = chip->controller_data;
602 proxy->controller_state = NULL;
603
604 if (chip->properties) {
605 status = device_add_properties(&proxy->dev, chip->properties);
606 if (status) {
607 dev_err(&master->dev,
608 "failed to add properties to '%s': %d\n",
609 chip->modalias, status);
610 goto err_dev_put;
611 }
612 }
613
614 status = spi_add_device(proxy);
615 if (status < 0)
616 goto err_remove_props;
617
618 return proxy;
619
620err_remove_props:
621 if (chip->properties)
622 device_remove_properties(&proxy->dev);
623err_dev_put:
624 spi_dev_put(proxy);
625 return NULL;
626}
627EXPORT_SYMBOL_GPL(spi_new_device);
628
629
630
631
632
633
634
635
636void spi_unregister_device(struct spi_device *spi)
637{
638 if (!spi)
639 return;
640
641 if (spi->dev.of_node) {
642 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
643 of_node_put(spi->dev.of_node);
644 }
645 if (ACPI_COMPANION(&spi->dev))
646 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
647 device_unregister(&spi->dev);
648}
649EXPORT_SYMBOL_GPL(spi_unregister_device);
650
651static void spi_match_master_to_boardinfo(struct spi_master *master,
652 struct spi_board_info *bi)
653{
654 struct spi_device *dev;
655
656 if (master->bus_num != bi->bus_num)
657 return;
658
659 dev = spi_new_device(master, bi);
660 if (!dev)
661 dev_err(master->dev.parent, "can't create new device for %s\n",
662 bi->modalias);
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687int spi_register_board_info(struct spi_board_info const *info, unsigned n)
688{
689 struct boardinfo *bi;
690 int i;
691
692 if (!n)
693 return 0;
694
695 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
696 if (!bi)
697 return -ENOMEM;
698
699 for (i = 0; i < n; i++, bi++, info++) {
700 struct spi_master *master;
701
702 memcpy(&bi->board_info, info, sizeof(*info));
703 if (info->properties) {
704 bi->board_info.properties =
705 property_entries_dup(info->properties);
706 if (IS_ERR(bi->board_info.properties))
707 return PTR_ERR(bi->board_info.properties);
708 }
709
710 mutex_lock(&board_lock);
711 list_add_tail(&bi->list, &board_list);
712 list_for_each_entry(master, &spi_master_list, list)
713 spi_match_master_to_boardinfo(master, &bi->board_info);
714 mutex_unlock(&board_lock);
715 }
716
717 return 0;
718}
719
720
721
722static void spi_set_cs(struct spi_device *spi, bool enable)
723{
724 if (spi->mode & SPI_CS_HIGH)
725 enable = !enable;
726
727 if (gpio_is_valid(spi->cs_gpio)) {
728 gpio_set_value(spi->cs_gpio, !enable);
729
730 if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
731 spi->master->set_cs)
732 spi->master->set_cs(spi, !enable);
733 } else if (spi->master->set_cs) {
734 spi->master->set_cs(spi, !enable);
735 }
736}
737
738#ifdef CONFIG_HAS_DMA
739static int spi_map_buf(struct spi_master *master, struct device *dev,
740 struct sg_table *sgt, void *buf, size_t len,
741 enum dma_data_direction dir)
742{
743 const bool vmalloced_buf = is_vmalloc_addr(buf);
744 unsigned int max_seg_size = dma_get_max_seg_size(dev);
745#ifdef CONFIG_HIGHMEM
746 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
747 (unsigned long)buf < (PKMAP_BASE +
748 (LAST_PKMAP * PAGE_SIZE)));
749#else
750 const bool kmap_buf = false;
751#endif
752 int desc_len;
753 int sgs;
754 struct page *vm_page;
755 struct scatterlist *sg;
756 void *sg_buf;
757 size_t min;
758 int i, ret;
759
760 if (vmalloced_buf || kmap_buf) {
761 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
763 } else if (virt_addr_valid(buf)) {
764 desc_len = min_t(int, max_seg_size, master->max_dma_len);
765 sgs = DIV_ROUND_UP(len, desc_len);
766 } else {
767 return -EINVAL;
768 }
769
770 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
771 if (ret != 0)
772 return ret;
773
774 sg = &sgt->sgl[0];
775 for (i = 0; i < sgs; i++) {
776
777 if (vmalloced_buf || kmap_buf) {
778 min = min_t(size_t,
779 len, desc_len - offset_in_page(buf));
780 if (vmalloced_buf)
781 vm_page = vmalloc_to_page(buf);
782 else
783 vm_page = kmap_to_page(buf);
784 if (!vm_page) {
785 sg_free_table(sgt);
786 return -ENOMEM;
787 }
788 sg_set_page(sg, vm_page,
789 min, offset_in_page(buf));
790 } else {
791 min = min_t(size_t, len, desc_len);
792 sg_buf = buf;
793 sg_set_buf(sg, sg_buf, min);
794 }
795
796 buf += min;
797 len -= min;
798 sg = sg_next(sg);
799 }
800
801 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
802 if (!ret)
803 ret = -ENOMEM;
804 if (ret < 0) {
805 sg_free_table(sgt);
806 return ret;
807 }
808
809 sgt->nents = ret;
810
811 return 0;
812}
813
814static void spi_unmap_buf(struct spi_master *master, struct device *dev,
815 struct sg_table *sgt, enum dma_data_direction dir)
816{
817 if (sgt->orig_nents) {
818 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
819 sg_free_table(sgt);
820 }
821}
822
823static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
824{
825 struct device *tx_dev, *rx_dev;
826 struct spi_transfer *xfer;
827 int ret;
828
829 if (!master->can_dma)
830 return 0;
831
832 if (master->dma_tx)
833 tx_dev = master->dma_tx->device->dev;
834 else
835 tx_dev = master->dev.parent;
836
837 if (master->dma_rx)
838 rx_dev = master->dma_rx->device->dev;
839 else
840 rx_dev = master->dev.parent;
841
842 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
843 if (!master->can_dma(master, msg->spi, xfer))
844 continue;
845
846 if (xfer->tx_buf != NULL) {
847 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
848 (void *)xfer->tx_buf, xfer->len,
849 DMA_TO_DEVICE);
850 if (ret != 0)
851 return ret;
852 }
853
854 if (xfer->rx_buf != NULL) {
855 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
856 xfer->rx_buf, xfer->len,
857 DMA_FROM_DEVICE);
858 if (ret != 0) {
859 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
860 DMA_TO_DEVICE);
861 return ret;
862 }
863 }
864 }
865
866 master->cur_msg_mapped = true;
867
868 return 0;
869}
870
871static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
872{
873 struct spi_transfer *xfer;
874 struct device *tx_dev, *rx_dev;
875
876 if (!master->cur_msg_mapped || !master->can_dma)
877 return 0;
878
879 if (master->dma_tx)
880 tx_dev = master->dma_tx->device->dev;
881 else
882 tx_dev = master->dev.parent;
883
884 if (master->dma_rx)
885 rx_dev = master->dma_rx->device->dev;
886 else
887 rx_dev = master->dev.parent;
888
889 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
890 if (!master->can_dma(master, msg->spi, xfer))
891 continue;
892
893 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
894 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
895 }
896
897 return 0;
898}
899#else
900static inline int spi_map_buf(struct spi_master *master,
901 struct device *dev, struct sg_table *sgt,
902 void *buf, size_t len,
903 enum dma_data_direction dir)
904{
905 return -EINVAL;
906}
907
908static inline void spi_unmap_buf(struct spi_master *master,
909 struct device *dev, struct sg_table *sgt,
910 enum dma_data_direction dir)
911{
912}
913
914static inline int __spi_map_msg(struct spi_master *master,
915 struct spi_message *msg)
916{
917 return 0;
918}
919
920static inline int __spi_unmap_msg(struct spi_master *master,
921 struct spi_message *msg)
922{
923 return 0;
924}
925#endif
926
927static inline int spi_unmap_msg(struct spi_master *master,
928 struct spi_message *msg)
929{
930 struct spi_transfer *xfer;
931
932 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
933
934
935
936
937 if (xfer->tx_buf == master->dummy_tx)
938 xfer->tx_buf = NULL;
939 if (xfer->rx_buf == master->dummy_rx)
940 xfer->rx_buf = NULL;
941 }
942
943 return __spi_unmap_msg(master, msg);
944}
945
946static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
947{
948 struct spi_transfer *xfer;
949 void *tmp;
950 unsigned int max_tx, max_rx;
951
952 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
953 max_tx = 0;
954 max_rx = 0;
955
956 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
957 if ((master->flags & SPI_MASTER_MUST_TX) &&
958 !xfer->tx_buf)
959 max_tx = max(xfer->len, max_tx);
960 if ((master->flags & SPI_MASTER_MUST_RX) &&
961 !xfer->rx_buf)
962 max_rx = max(xfer->len, max_rx);
963 }
964
965 if (max_tx) {
966 tmp = krealloc(master->dummy_tx, max_tx,
967 GFP_KERNEL | GFP_DMA);
968 if (!tmp)
969 return -ENOMEM;
970 master->dummy_tx = tmp;
971 memset(tmp, 0, max_tx);
972 }
973
974 if (max_rx) {
975 tmp = krealloc(master->dummy_rx, max_rx,
976 GFP_KERNEL | GFP_DMA);
977 if (!tmp)
978 return -ENOMEM;
979 master->dummy_rx = tmp;
980 }
981
982 if (max_tx || max_rx) {
983 list_for_each_entry(xfer, &msg->transfers,
984 transfer_list) {
985 if (!xfer->tx_buf)
986 xfer->tx_buf = master->dummy_tx;
987 if (!xfer->rx_buf)
988 xfer->rx_buf = master->dummy_rx;
989 }
990 }
991 }
992
993 return __spi_map_msg(master, msg);
994}
995
996
997
998
999
1000
1001
1002
1003static int spi_transfer_one_message(struct spi_master *master,
1004 struct spi_message *msg)
1005{
1006 struct spi_transfer *xfer;
1007 bool keep_cs = false;
1008 int ret = 0;
1009 unsigned long long ms = 1;
1010 struct spi_statistics *statm = &master->statistics;
1011 struct spi_statistics *stats = &msg->spi->statistics;
1012
1013 spi_set_cs(msg->spi, true);
1014
1015 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1016 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1017
1018 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1019 trace_spi_transfer_start(msg, xfer);
1020
1021 spi_statistics_add_transfer_stats(statm, xfer, master);
1022 spi_statistics_add_transfer_stats(stats, xfer, master);
1023
1024 if (xfer->tx_buf || xfer->rx_buf) {
1025 reinit_completion(&master->xfer_completion);
1026
1027 ret = master->transfer_one(master, msg->spi, xfer);
1028 if (ret < 0) {
1029 SPI_STATISTICS_INCREMENT_FIELD(statm,
1030 errors);
1031 SPI_STATISTICS_INCREMENT_FIELD(stats,
1032 errors);
1033 dev_err(&msg->spi->dev,
1034 "SPI transfer failed: %d\n", ret);
1035 goto out;
1036 }
1037
1038 if (ret > 0) {
1039 ret = 0;
1040 ms = 8LL * 1000LL * xfer->len;
1041 do_div(ms, xfer->speed_hz);
1042 ms += ms + 200;
1043
1044 if (ms > UINT_MAX)
1045 ms = UINT_MAX;
1046
1047 ms = wait_for_completion_timeout(&master->xfer_completion,
1048 msecs_to_jiffies(ms));
1049 }
1050
1051 if (ms == 0) {
1052 SPI_STATISTICS_INCREMENT_FIELD(statm,
1053 timedout);
1054 SPI_STATISTICS_INCREMENT_FIELD(stats,
1055 timedout);
1056 dev_err(&msg->spi->dev,
1057 "SPI transfer timed out\n");
1058 msg->status = -ETIMEDOUT;
1059 }
1060 } else {
1061 if (xfer->len)
1062 dev_err(&msg->spi->dev,
1063 "Bufferless transfer has length %u\n",
1064 xfer->len);
1065 }
1066
1067 trace_spi_transfer_stop(msg, xfer);
1068
1069 if (msg->status != -EINPROGRESS)
1070 goto out;
1071
1072 if (xfer->delay_usecs) {
1073 u16 us = xfer->delay_usecs;
1074
1075 if (us <= 10)
1076 udelay(us);
1077 else
1078 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1079 }
1080
1081 if (xfer->cs_change) {
1082 if (list_is_last(&xfer->transfer_list,
1083 &msg->transfers)) {
1084 keep_cs = true;
1085 } else {
1086 spi_set_cs(msg->spi, false);
1087 udelay(10);
1088 spi_set_cs(msg->spi, true);
1089 }
1090 }
1091
1092 msg->actual_length += xfer->len;
1093 }
1094
1095out:
1096 if (ret != 0 || !keep_cs)
1097 spi_set_cs(msg->spi, false);
1098
1099 if (msg->status == -EINPROGRESS)
1100 msg->status = ret;
1101
1102 if (msg->status && master->handle_err)
1103 master->handle_err(master, msg);
1104
1105 spi_res_release(master, msg);
1106
1107 spi_finalize_current_message(master);
1108
1109 return ret;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120void spi_finalize_current_transfer(struct spi_master *master)
1121{
1122 complete(&master->xfer_completion);
1123}
1124EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1140{
1141 unsigned long flags;
1142 bool was_busy = false;
1143 int ret;
1144
1145
1146 spin_lock_irqsave(&master->queue_lock, flags);
1147
1148
1149 if (master->cur_msg) {
1150 spin_unlock_irqrestore(&master->queue_lock, flags);
1151 return;
1152 }
1153
1154
1155 if (master->idling) {
1156 kthread_queue_work(&master->kworker, &master->pump_messages);
1157 spin_unlock_irqrestore(&master->queue_lock, flags);
1158 return;
1159 }
1160
1161
1162 if (list_empty(&master->queue) || !master->running) {
1163 if (!master->busy) {
1164 spin_unlock_irqrestore(&master->queue_lock, flags);
1165 return;
1166 }
1167
1168
1169 if (!in_kthread) {
1170 kthread_queue_work(&master->kworker,
1171 &master->pump_messages);
1172 spin_unlock_irqrestore(&master->queue_lock, flags);
1173 return;
1174 }
1175
1176 master->busy = false;
1177 master->idling = true;
1178 spin_unlock_irqrestore(&master->queue_lock, flags);
1179
1180 kfree(master->dummy_rx);
1181 master->dummy_rx = NULL;
1182 kfree(master->dummy_tx);
1183 master->dummy_tx = NULL;
1184 if (master->unprepare_transfer_hardware &&
1185 master->unprepare_transfer_hardware(master))
1186 dev_err(&master->dev,
1187 "failed to unprepare transfer hardware\n");
1188 if (master->auto_runtime_pm) {
1189 pm_runtime_mark_last_busy(master->dev.parent);
1190 pm_runtime_put_autosuspend(master->dev.parent);
1191 }
1192 trace_spi_master_idle(master);
1193
1194 spin_lock_irqsave(&master->queue_lock, flags);
1195 master->idling = false;
1196 spin_unlock_irqrestore(&master->queue_lock, flags);
1197 return;
1198 }
1199
1200
1201 master->cur_msg =
1202 list_first_entry(&master->queue, struct spi_message, queue);
1203
1204 list_del_init(&master->cur_msg->queue);
1205 if (master->busy)
1206 was_busy = true;
1207 else
1208 master->busy = true;
1209 spin_unlock_irqrestore(&master->queue_lock, flags);
1210
1211 mutex_lock(&master->io_mutex);
1212
1213 if (!was_busy && master->auto_runtime_pm) {
1214 ret = pm_runtime_get_sync(master->dev.parent);
1215 if (ret < 0) {
1216 dev_err(&master->dev, "Failed to power device: %d\n",
1217 ret);
1218 mutex_unlock(&master->io_mutex);
1219 return;
1220 }
1221 }
1222
1223 if (!was_busy)
1224 trace_spi_master_busy(master);
1225
1226 if (!was_busy && master->prepare_transfer_hardware) {
1227 ret = master->prepare_transfer_hardware(master);
1228 if (ret) {
1229 dev_err(&master->dev,
1230 "failed to prepare transfer hardware\n");
1231
1232 if (master->auto_runtime_pm)
1233 pm_runtime_put(master->dev.parent);
1234 mutex_unlock(&master->io_mutex);
1235 return;
1236 }
1237 }
1238
1239 trace_spi_message_start(master->cur_msg);
1240
1241 if (master->prepare_message) {
1242 ret = master->prepare_message(master, master->cur_msg);
1243 if (ret) {
1244 dev_err(&master->dev,
1245 "failed to prepare message: %d\n", ret);
1246 master->cur_msg->status = ret;
1247 spi_finalize_current_message(master);
1248 goto out;
1249 }
1250 master->cur_msg_prepared = true;
1251 }
1252
1253 ret = spi_map_msg(master, master->cur_msg);
1254 if (ret) {
1255 master->cur_msg->status = ret;
1256 spi_finalize_current_message(master);
1257 goto out;
1258 }
1259
1260 ret = master->transfer_one_message(master, master->cur_msg);
1261 if (ret) {
1262 dev_err(&master->dev,
1263 "failed to transfer one message from queue\n");
1264 goto out;
1265 }
1266
1267out:
1268 mutex_unlock(&master->io_mutex);
1269
1270
1271 if (!ret)
1272 cond_resched();
1273}
1274
1275
1276
1277
1278
1279static void spi_pump_messages(struct kthread_work *work)
1280{
1281 struct spi_master *master =
1282 container_of(work, struct spi_master, pump_messages);
1283
1284 __spi_pump_messages(master, true);
1285}
1286
1287static int spi_init_queue(struct spi_master *master)
1288{
1289 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1290
1291 master->running = false;
1292 master->busy = false;
1293
1294 kthread_init_worker(&master->kworker);
1295 master->kworker_task = kthread_run(kthread_worker_fn,
1296 &master->kworker, "%s",
1297 dev_name(&master->dev));
1298 if (IS_ERR(master->kworker_task)) {
1299 dev_err(&master->dev, "failed to create message pump task\n");
1300 return PTR_ERR(master->kworker_task);
1301 }
1302 kthread_init_work(&master->pump_messages, spi_pump_messages);
1303
1304
1305
1306
1307
1308
1309
1310
1311 if (master->rt) {
1312 dev_info(&master->dev,
1313 "will run message pump with realtime priority\n");
1314 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m);
1315 }
1316
1317 return 0;
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1331{
1332 struct spi_message *next;
1333 unsigned long flags;
1334
1335
1336 spin_lock_irqsave(&master->queue_lock, flags);
1337 next = list_first_entry_or_null(&master->queue, struct spi_message,
1338 queue);
1339 spin_unlock_irqrestore(&master->queue_lock, flags);
1340
1341 return next;
1342}
1343EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1344
1345
1346
1347
1348
1349
1350
1351
1352void spi_finalize_current_message(struct spi_master *master)
1353{
1354 struct spi_message *mesg;
1355 unsigned long flags;
1356 int ret;
1357
1358 spin_lock_irqsave(&master->queue_lock, flags);
1359 mesg = master->cur_msg;
1360 spin_unlock_irqrestore(&master->queue_lock, flags);
1361
1362 spi_unmap_msg(master, mesg);
1363
1364 if (master->cur_msg_prepared && master->unprepare_message) {
1365 ret = master->unprepare_message(master, mesg);
1366 if (ret) {
1367 dev_err(&master->dev,
1368 "failed to unprepare message: %d\n", ret);
1369 }
1370 }
1371
1372 spin_lock_irqsave(&master->queue_lock, flags);
1373 master->cur_msg = NULL;
1374 master->cur_msg_prepared = false;
1375 kthread_queue_work(&master->kworker, &master->pump_messages);
1376 spin_unlock_irqrestore(&master->queue_lock, flags);
1377
1378 trace_spi_message_done(mesg);
1379
1380 mesg->state = NULL;
1381 if (mesg->complete)
1382 mesg->complete(mesg->context);
1383}
1384EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1385
1386static int spi_start_queue(struct spi_master *master)
1387{
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(&master->queue_lock, flags);
1391
1392 if (master->running || master->busy) {
1393 spin_unlock_irqrestore(&master->queue_lock, flags);
1394 return -EBUSY;
1395 }
1396
1397 master->running = true;
1398 master->cur_msg = NULL;
1399 spin_unlock_irqrestore(&master->queue_lock, flags);
1400
1401 kthread_queue_work(&master->kworker, &master->pump_messages);
1402
1403 return 0;
1404}
1405
1406static int spi_stop_queue(struct spi_master *master)
1407{
1408 unsigned long flags;
1409 unsigned limit = 500;
1410 int ret = 0;
1411
1412 spin_lock_irqsave(&master->queue_lock, flags);
1413
1414
1415
1416
1417
1418
1419
1420 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1421 spin_unlock_irqrestore(&master->queue_lock, flags);
1422 usleep_range(10000, 11000);
1423 spin_lock_irqsave(&master->queue_lock, flags);
1424 }
1425
1426 if (!list_empty(&master->queue) || master->busy)
1427 ret = -EBUSY;
1428 else
1429 master->running = false;
1430
1431 spin_unlock_irqrestore(&master->queue_lock, flags);
1432
1433 if (ret) {
1434 dev_warn(&master->dev,
1435 "could not stop message queue\n");
1436 return ret;
1437 }
1438 return ret;
1439}
1440
1441static int spi_destroy_queue(struct spi_master *master)
1442{
1443 int ret;
1444
1445 ret = spi_stop_queue(master);
1446
1447
1448
1449
1450
1451
1452
1453 if (ret) {
1454 dev_err(&master->dev, "problem destroying queue\n");
1455 return ret;
1456 }
1457
1458 kthread_flush_worker(&master->kworker);
1459 kthread_stop(master->kworker_task);
1460
1461 return 0;
1462}
1463
1464static int __spi_queued_transfer(struct spi_device *spi,
1465 struct spi_message *msg,
1466 bool need_pump)
1467{
1468 struct spi_master *master = spi->master;
1469 unsigned long flags;
1470
1471 spin_lock_irqsave(&master->queue_lock, flags);
1472
1473 if (!master->running) {
1474 spin_unlock_irqrestore(&master->queue_lock, flags);
1475 return -ESHUTDOWN;
1476 }
1477 msg->actual_length = 0;
1478 msg->status = -EINPROGRESS;
1479
1480 list_add_tail(&msg->queue, &master->queue);
1481 if (!master->busy && need_pump)
1482 kthread_queue_work(&master->kworker, &master->pump_messages);
1483
1484 spin_unlock_irqrestore(&master->queue_lock, flags);
1485 return 0;
1486}
1487
1488
1489
1490
1491
1492
1493
1494
1495static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1496{
1497 return __spi_queued_transfer(spi, msg, true);
1498}
1499
1500static int spi_master_initialize_queue(struct spi_master *master)
1501{
1502 int ret;
1503
1504 master->transfer = spi_queued_transfer;
1505 if (!master->transfer_one_message)
1506 master->transfer_one_message = spi_transfer_one_message;
1507
1508
1509 ret = spi_init_queue(master);
1510 if (ret) {
1511 dev_err(&master->dev, "problem initializing queue\n");
1512 goto err_init_queue;
1513 }
1514 master->queued = true;
1515 ret = spi_start_queue(master);
1516 if (ret) {
1517 dev_err(&master->dev, "problem starting queue\n");
1518 goto err_start_queue;
1519 }
1520
1521 return 0;
1522
1523err_start_queue:
1524 spi_destroy_queue(master);
1525err_init_queue:
1526 return ret;
1527}
1528
1529
1530
1531#if defined(CONFIG_OF)
1532static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1533 struct device_node *nc)
1534{
1535 u32 value;
1536 int rc;
1537
1538
1539 rc = of_property_read_u32(nc, "reg", &value);
1540 if (rc) {
1541 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1542 nc->full_name, rc);
1543 return rc;
1544 }
1545 spi->chip_select = value;
1546
1547
1548 if (of_find_property(nc, "spi-cpha", NULL))
1549 spi->mode |= SPI_CPHA;
1550 if (of_find_property(nc, "spi-cpol", NULL))
1551 spi->mode |= SPI_CPOL;
1552 if (of_find_property(nc, "spi-cs-high", NULL))
1553 spi->mode |= SPI_CS_HIGH;
1554 if (of_find_property(nc, "spi-3wire", NULL))
1555 spi->mode |= SPI_3WIRE;
1556 if (of_find_property(nc, "spi-lsb-first", NULL))
1557 spi->mode |= SPI_LSB_FIRST;
1558
1559
1560 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1561 switch (value) {
1562 case 1:
1563 break;
1564 case 2:
1565 spi->mode |= SPI_TX_DUAL;
1566 break;
1567 case 4:
1568 spi->mode |= SPI_TX_QUAD;
1569 break;
1570 default:
1571 dev_warn(&master->dev,
1572 "spi-tx-bus-width %d not supported\n",
1573 value);
1574 break;
1575 }
1576 }
1577
1578 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1579 switch (value) {
1580 case 1:
1581 break;
1582 case 2:
1583 spi->mode |= SPI_RX_DUAL;
1584 break;
1585 case 4:
1586 spi->mode |= SPI_RX_QUAD;
1587 break;
1588 default:
1589 dev_warn(&master->dev,
1590 "spi-rx-bus-width %d not supported\n",
1591 value);
1592 break;
1593 }
1594 }
1595
1596
1597 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1598 if (rc) {
1599 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1600 nc->full_name, rc);
1601 return rc;
1602 }
1603 spi->max_speed_hz = value;
1604
1605 return 0;
1606}
1607
1608static struct spi_device *
1609of_register_spi_device(struct spi_master *master, struct device_node *nc)
1610{
1611 struct spi_device *spi;
1612 int rc;
1613
1614
1615 spi = spi_alloc_device(master);
1616 if (!spi) {
1617 dev_err(&master->dev, "spi_device alloc error for %s\n",
1618 nc->full_name);
1619 rc = -ENOMEM;
1620 goto err_out;
1621 }
1622
1623
1624 rc = of_modalias_node(nc, spi->modalias,
1625 sizeof(spi->modalias));
1626 if (rc < 0) {
1627 dev_err(&master->dev, "cannot find modalias for %s\n",
1628 nc->full_name);
1629 goto err_out;
1630 }
1631
1632 rc = of_spi_parse_dt(master, spi, nc);
1633 if (rc)
1634 goto err_out;
1635
1636
1637 of_node_get(nc);
1638 spi->dev.of_node = nc;
1639
1640
1641 rc = spi_add_device(spi);
1642 if (rc) {
1643 dev_err(&master->dev, "spi_device register error %s\n",
1644 nc->full_name);
1645 goto err_of_node_put;
1646 }
1647
1648 return spi;
1649
1650err_of_node_put:
1651 of_node_put(nc);
1652err_out:
1653 spi_dev_put(spi);
1654 return ERR_PTR(rc);
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664static void of_register_spi_devices(struct spi_master *master)
1665{
1666 struct spi_device *spi;
1667 struct device_node *nc;
1668
1669 if (!master->dev.of_node)
1670 return;
1671
1672 for_each_available_child_of_node(master->dev.of_node, nc) {
1673 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1674 continue;
1675 spi = of_register_spi_device(master, nc);
1676 if (IS_ERR(spi)) {
1677 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1678 nc->full_name);
1679 of_node_clear_flag(nc, OF_POPULATED);
1680 }
1681 }
1682}
1683#else
1684static void of_register_spi_devices(struct spi_master *master) { }
1685#endif
1686
1687#ifdef CONFIG_ACPI
1688static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1689{
1690 struct spi_device *spi = data;
1691 struct spi_master *master = spi->master;
1692
1693 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1694 struct acpi_resource_spi_serialbus *sb;
1695
1696 sb = &ares->data.spi_serial_bus;
1697 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1698
1699
1700
1701
1702
1703
1704
1705 if (master->fw_translate_cs) {
1706 int cs = master->fw_translate_cs(master,
1707 sb->device_selection);
1708 if (cs < 0)
1709 return cs;
1710 spi->chip_select = cs;
1711 } else {
1712 spi->chip_select = sb->device_selection;
1713 }
1714
1715 spi->max_speed_hz = sb->connection_speed;
1716
1717 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1718 spi->mode |= SPI_CPHA;
1719 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1720 spi->mode |= SPI_CPOL;
1721 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1722 spi->mode |= SPI_CS_HIGH;
1723 }
1724 } else if (spi->irq < 0) {
1725 struct resource r;
1726
1727 if (acpi_dev_resource_interrupt(ares, 0, &r))
1728 spi->irq = r.start;
1729 }
1730
1731
1732 return 1;
1733}
1734
1735static acpi_status acpi_register_spi_device(struct spi_master *master,
1736 struct acpi_device *adev)
1737{
1738 struct list_head resource_list;
1739 struct spi_device *spi;
1740 int ret;
1741
1742 if (acpi_bus_get_status(adev) || !adev->status.present ||
1743 acpi_device_enumerated(adev))
1744 return AE_OK;
1745
1746 spi = spi_alloc_device(master);
1747 if (!spi) {
1748 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1749 dev_name(&adev->dev));
1750 return AE_NO_MEMORY;
1751 }
1752
1753 ACPI_COMPANION_SET(&spi->dev, adev);
1754 spi->irq = -1;
1755
1756 INIT_LIST_HEAD(&resource_list);
1757 ret = acpi_dev_get_resources(adev, &resource_list,
1758 acpi_spi_add_resource, spi);
1759 acpi_dev_free_resource_list(&resource_list);
1760
1761 if (ret < 0 || !spi->max_speed_hz) {
1762 spi_dev_put(spi);
1763 return AE_OK;
1764 }
1765
1766 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1767 sizeof(spi->modalias));
1768
1769 if (spi->irq < 0)
1770 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1771
1772 acpi_device_set_enumerated(adev);
1773
1774 adev->power.flags.ignore_parent = true;
1775 if (spi_add_device(spi)) {
1776 adev->power.flags.ignore_parent = false;
1777 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1778 dev_name(&adev->dev));
1779 spi_dev_put(spi);
1780 }
1781
1782 return AE_OK;
1783}
1784
1785static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1786 void *data, void **return_value)
1787{
1788 struct spi_master *master = data;
1789 struct acpi_device *adev;
1790
1791 if (acpi_bus_get_device(handle, &adev))
1792 return AE_OK;
1793
1794 return acpi_register_spi_device(master, adev);
1795}
1796
1797static void acpi_register_spi_devices(struct spi_master *master)
1798{
1799 acpi_status status;
1800 acpi_handle handle;
1801
1802 handle = ACPI_HANDLE(master->dev.parent);
1803 if (!handle)
1804 return;
1805
1806 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1807 acpi_spi_add_device, NULL,
1808 master, NULL);
1809 if (ACPI_FAILURE(status))
1810 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1811}
1812#else
1813static inline void acpi_register_spi_devices(struct spi_master *master) {}
1814#endif
1815
1816static void spi_master_release(struct device *dev)
1817{
1818 struct spi_master *master;
1819
1820 master = container_of(dev, struct spi_master, dev);
1821 kfree(master);
1822}
1823
1824static struct class spi_master_class = {
1825 .name = "spi_master",
1826 .owner = THIS_MODULE,
1827 .dev_release = spi_master_release,
1828 .dev_groups = spi_master_groups,
1829};
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1853{
1854 struct spi_master *master;
1855
1856 if (!dev)
1857 return NULL;
1858
1859 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1860 if (!master)
1861 return NULL;
1862
1863 device_initialize(&master->dev);
1864 master->bus_num = -1;
1865 master->num_chipselect = 1;
1866 master->dev.class = &spi_master_class;
1867 master->dev.parent = dev;
1868 pm_suspend_ignore_children(&master->dev, true);
1869 spi_master_set_devdata(master, &master[1]);
1870
1871 return master;
1872}
1873EXPORT_SYMBOL_GPL(spi_alloc_master);
1874
1875#ifdef CONFIG_OF
1876static int of_spi_register_master(struct spi_master *master)
1877{
1878 int nb, i, *cs;
1879 struct device_node *np = master->dev.of_node;
1880
1881 if (!np)
1882 return 0;
1883
1884 nb = of_gpio_named_count(np, "cs-gpios");
1885 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1886
1887
1888 if (nb == 0 || nb == -ENOENT)
1889 return 0;
1890 else if (nb < 0)
1891 return nb;
1892
1893 cs = devm_kzalloc(&master->dev,
1894 sizeof(int) * master->num_chipselect,
1895 GFP_KERNEL);
1896 master->cs_gpios = cs;
1897
1898 if (!master->cs_gpios)
1899 return -ENOMEM;
1900
1901 for (i = 0; i < master->num_chipselect; i++)
1902 cs[i] = -ENOENT;
1903
1904 for (i = 0; i < nb; i++)
1905 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1906
1907 return 0;
1908}
1909#else
1910static int of_spi_register_master(struct spi_master *master)
1911{
1912 return 0;
1913}
1914#endif
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938int spi_register_master(struct spi_master *master)
1939{
1940 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1941 struct device *dev = master->dev.parent;
1942 struct boardinfo *bi;
1943 int status = -ENODEV;
1944 int dynamic = 0;
1945
1946 if (!dev)
1947 return -ENODEV;
1948
1949 status = of_spi_register_master(master);
1950 if (status)
1951 return status;
1952
1953
1954
1955
1956 if (master->num_chipselect == 0)
1957 return -EINVAL;
1958
1959 if ((master->bus_num < 0) && master->dev.of_node)
1960 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1961
1962
1963 if (master->bus_num < 0) {
1964
1965
1966
1967 master->bus_num = atomic_dec_return(&dyn_bus_id);
1968 dynamic = 1;
1969 }
1970
1971 INIT_LIST_HEAD(&master->queue);
1972 spin_lock_init(&master->queue_lock);
1973 spin_lock_init(&master->bus_lock_spinlock);
1974 mutex_init(&master->bus_lock_mutex);
1975 mutex_init(&master->io_mutex);
1976 master->bus_lock_flag = 0;
1977 init_completion(&master->xfer_completion);
1978 if (!master->max_dma_len)
1979 master->max_dma_len = INT_MAX;
1980
1981
1982
1983
1984 dev_set_name(&master->dev, "spi%u", master->bus_num);
1985 status = device_add(&master->dev);
1986 if (status < 0)
1987 goto done;
1988 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1989 dynamic ? " (dynamic)" : "");
1990
1991
1992 if (master->transfer)
1993 dev_info(dev, "master is unqueued, this is deprecated\n");
1994 else {
1995 status = spi_master_initialize_queue(master);
1996 if (status) {
1997 device_del(&master->dev);
1998 goto done;
1999 }
2000 }
2001
2002 spin_lock_init(&master->statistics.lock);
2003
2004 mutex_lock(&board_lock);
2005 list_add_tail(&master->list, &spi_master_list);
2006 list_for_each_entry(bi, &board_list, list)
2007 spi_match_master_to_boardinfo(master, &bi->board_info);
2008 mutex_unlock(&board_lock);
2009
2010
2011 of_register_spi_devices(master);
2012 acpi_register_spi_devices(master);
2013done:
2014 return status;
2015}
2016EXPORT_SYMBOL_GPL(spi_register_master);
2017
2018static void devm_spi_unregister(struct device *dev, void *res)
2019{
2020 spi_unregister_master(*(struct spi_master **)res);
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034int devm_spi_register_master(struct device *dev, struct spi_master *master)
2035{
2036 struct spi_master **ptr;
2037 int ret;
2038
2039 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2040 if (!ptr)
2041 return -ENOMEM;
2042
2043 ret = spi_register_master(master);
2044 if (!ret) {
2045 *ptr = master;
2046 devres_add(dev, ptr);
2047 } else {
2048 devres_free(ptr);
2049 }
2050
2051 return ret;
2052}
2053EXPORT_SYMBOL_GPL(devm_spi_register_master);
2054
2055static int __unregister(struct device *dev, void *null)
2056{
2057 spi_unregister_device(to_spi_device(dev));
2058 return 0;
2059}
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071void spi_unregister_master(struct spi_master *master)
2072{
2073 int dummy;
2074
2075 if (master->queued) {
2076 if (spi_destroy_queue(master))
2077 dev_err(&master->dev, "queue remove failed\n");
2078 }
2079
2080 mutex_lock(&board_lock);
2081 list_del(&master->list);
2082 mutex_unlock(&board_lock);
2083
2084 dummy = device_for_each_child(&master->dev, NULL, __unregister);
2085 device_unregister(&master->dev);
2086}
2087EXPORT_SYMBOL_GPL(spi_unregister_master);
2088
2089int spi_master_suspend(struct spi_master *master)
2090{
2091 int ret;
2092
2093
2094 if (!master->queued)
2095 return 0;
2096
2097 ret = spi_stop_queue(master);
2098 if (ret)
2099 dev_err(&master->dev, "queue stop failed\n");
2100
2101 return ret;
2102}
2103EXPORT_SYMBOL_GPL(spi_master_suspend);
2104
2105int spi_master_resume(struct spi_master *master)
2106{
2107 int ret;
2108
2109 if (!master->queued)
2110 return 0;
2111
2112 ret = spi_start_queue(master);
2113 if (ret)
2114 dev_err(&master->dev, "queue restart failed\n");
2115
2116 return ret;
2117}
2118EXPORT_SYMBOL_GPL(spi_master_resume);
2119
2120static int __spi_master_match(struct device *dev, const void *data)
2121{
2122 struct spi_master *m;
2123 const u16 *bus_num = data;
2124
2125 m = container_of(dev, struct spi_master, dev);
2126 return m->bus_num == *bus_num;
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141struct spi_master *spi_busnum_to_master(u16 bus_num)
2142{
2143 struct device *dev;
2144 struct spi_master *master = NULL;
2145
2146 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2147 __spi_master_match);
2148 if (dev)
2149 master = container_of(dev, struct spi_master, dev);
2150
2151 return master;
2152}
2153EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173void *spi_res_alloc(struct spi_device *spi,
2174 spi_res_release_t release,
2175 size_t size, gfp_t gfp)
2176{
2177 struct spi_res *sres;
2178
2179 sres = kzalloc(sizeof(*sres) + size, gfp);
2180 if (!sres)
2181 return NULL;
2182
2183 INIT_LIST_HEAD(&sres->entry);
2184 sres->release = release;
2185
2186 return sres->data;
2187}
2188EXPORT_SYMBOL_GPL(spi_res_alloc);
2189
2190
2191
2192
2193
2194
2195void spi_res_free(void *res)
2196{
2197 struct spi_res *sres = container_of(res, struct spi_res, data);
2198
2199 if (!res)
2200 return;
2201
2202 WARN_ON(!list_empty(&sres->entry));
2203 kfree(sres);
2204}
2205EXPORT_SYMBOL_GPL(spi_res_free);
2206
2207
2208
2209
2210
2211
2212void spi_res_add(struct spi_message *message, void *res)
2213{
2214 struct spi_res *sres = container_of(res, struct spi_res, data);
2215
2216 WARN_ON(!list_empty(&sres->entry));
2217 list_add_tail(&sres->entry, &message->resources);
2218}
2219EXPORT_SYMBOL_GPL(spi_res_add);
2220
2221
2222
2223
2224
2225
2226void spi_res_release(struct spi_master *master,
2227 struct spi_message *message)
2228{
2229 struct spi_res *res;
2230
2231 while (!list_empty(&message->resources)) {
2232 res = list_last_entry(&message->resources,
2233 struct spi_res, entry);
2234
2235 if (res->release)
2236 res->release(master, message, res->data);
2237
2238 list_del(&res->entry);
2239
2240 kfree(res);
2241 }
2242}
2243EXPORT_SYMBOL_GPL(spi_res_release);
2244
2245
2246
2247
2248
2249static void __spi_replace_transfers_release(struct spi_master *master,
2250 struct spi_message *msg,
2251 void *res)
2252{
2253 struct spi_replaced_transfers *rxfer = res;
2254 size_t i;
2255
2256
2257 if (rxfer->release)
2258 rxfer->release(master, msg, res);
2259
2260
2261 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2262
2263
2264 for (i = 0; i < rxfer->inserted; i++)
2265 list_del(&rxfer->inserted_transfers[i].transfer_list);
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283struct spi_replaced_transfers *spi_replace_transfers(
2284 struct spi_message *msg,
2285 struct spi_transfer *xfer_first,
2286 size_t remove,
2287 size_t insert,
2288 spi_replaced_release_t release,
2289 size_t extradatasize,
2290 gfp_t gfp)
2291{
2292 struct spi_replaced_transfers *rxfer;
2293 struct spi_transfer *xfer;
2294 size_t i;
2295
2296
2297 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2298 insert * sizeof(struct spi_transfer)
2299 + sizeof(struct spi_replaced_transfers)
2300 + extradatasize,
2301 gfp);
2302 if (!rxfer)
2303 return ERR_PTR(-ENOMEM);
2304
2305
2306 rxfer->release = release;
2307
2308
2309 if (extradatasize)
2310 rxfer->extradata =
2311 &rxfer->inserted_transfers[insert];
2312
2313
2314 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2315
2316
2317
2318
2319 rxfer->replaced_after = xfer_first->transfer_list.prev;
2320
2321
2322 for (i = 0; i < remove; i++) {
2323
2324
2325
2326
2327 if (rxfer->replaced_after->next == &msg->transfers) {
2328 dev_err(&msg->spi->dev,
2329 "requested to remove more spi_transfers than are available\n");
2330
2331 list_splice(&rxfer->replaced_transfers,
2332 rxfer->replaced_after);
2333
2334
2335 spi_res_free(rxfer);
2336
2337
2338 return ERR_PTR(-EINVAL);
2339 }
2340
2341
2342
2343
2344 list_move_tail(rxfer->replaced_after->next,
2345 &rxfer->replaced_transfers);
2346 }
2347
2348
2349
2350
2351 for (i = 0; i < insert; i++) {
2352
2353 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2354
2355
2356 memcpy(xfer, xfer_first, sizeof(*xfer));
2357
2358
2359 list_add(&xfer->transfer_list, rxfer->replaced_after);
2360
2361
2362 if (i) {
2363 xfer->cs_change = false;
2364 xfer->delay_usecs = 0;
2365 }
2366 }
2367
2368
2369 rxfer->inserted = insert;
2370
2371
2372 spi_res_add(msg, rxfer);
2373
2374 return rxfer;
2375}
2376EXPORT_SYMBOL_GPL(spi_replace_transfers);
2377
2378static int __spi_split_transfer_maxsize(struct spi_master *master,
2379 struct spi_message *msg,
2380 struct spi_transfer **xferp,
2381 size_t maxsize,
2382 gfp_t gfp)
2383{
2384 struct spi_transfer *xfer = *xferp, *xfers;
2385 struct spi_replaced_transfers *srt;
2386 size_t offset;
2387 size_t count, i;
2388
2389
2390 dev_warn_once(&msg->spi->dev,
2391 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2392 xfer->len, maxsize);
2393
2394
2395 count = DIV_ROUND_UP(xfer->len, maxsize);
2396
2397
2398 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2399 if (IS_ERR(srt))
2400 return PTR_ERR(srt);
2401 xfers = srt->inserted_transfers;
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2417
2418
2419 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2420
2421 if (xfers[i].rx_buf)
2422 xfers[i].rx_buf += offset;
2423 if (xfers[i].rx_dma)
2424 xfers[i].rx_dma += offset;
2425 if (xfers[i].tx_buf)
2426 xfers[i].tx_buf += offset;
2427 if (xfers[i].tx_dma)
2428 xfers[i].tx_dma += offset;
2429
2430
2431 xfers[i].len = min(maxsize, xfers[i].len - offset);
2432 }
2433
2434
2435
2436
2437 *xferp = &xfers[count - 1];
2438
2439
2440 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2441 transfers_split_maxsize);
2442 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2443 transfers_split_maxsize);
2444
2445 return 0;
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459int spi_split_transfers_maxsize(struct spi_master *master,
2460 struct spi_message *msg,
2461 size_t maxsize,
2462 gfp_t gfp)
2463{
2464 struct spi_transfer *xfer;
2465 int ret;
2466
2467
2468
2469
2470
2471
2472
2473 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2474 if (xfer->len > maxsize) {
2475 ret = __spi_split_transfer_maxsize(
2476 master, msg, &xfer, maxsize, gfp);
2477 if (ret)
2478 return ret;
2479 }
2480 }
2481
2482 return 0;
2483}
2484EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2485
2486
2487
2488
2489
2490
2491
2492static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2493{
2494 if (master->bits_per_word_mask) {
2495
2496 if (bits_per_word > 32)
2497 return -EINVAL;
2498 if (!(master->bits_per_word_mask &
2499 SPI_BPW_MASK(bits_per_word)))
2500 return -EINVAL;
2501 }
2502
2503 return 0;
2504}
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526int spi_setup(struct spi_device *spi)
2527{
2528 unsigned bad_bits, ugly_bits;
2529 int status;
2530
2531
2532
2533 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2534 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2535 dev_err(&spi->dev,
2536 "setup: can not select dual and quad at the same time\n");
2537 return -EINVAL;
2538 }
2539
2540
2541 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2542 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2543 return -EINVAL;
2544
2545
2546
2547 bad_bits = spi->mode & ~spi->master->mode_bits;
2548 ugly_bits = bad_bits &
2549 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2550 if (ugly_bits) {
2551 dev_warn(&spi->dev,
2552 "setup: ignoring unsupported mode bits %x\n",
2553 ugly_bits);
2554 spi->mode &= ~ugly_bits;
2555 bad_bits &= ~ugly_bits;
2556 }
2557 if (bad_bits) {
2558 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2559 bad_bits);
2560 return -EINVAL;
2561 }
2562
2563 if (!spi->bits_per_word)
2564 spi->bits_per_word = 8;
2565
2566 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2567 if (status)
2568 return status;
2569
2570 if (!spi->max_speed_hz)
2571 spi->max_speed_hz = spi->master->max_speed_hz;
2572
2573 if (spi->master->setup)
2574 status = spi->master->setup(spi);
2575
2576 spi_set_cs(spi, false);
2577
2578 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2579 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2580 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2581 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2582 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2583 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2584 spi->bits_per_word, spi->max_speed_hz,
2585 status);
2586
2587 return status;
2588}
2589EXPORT_SYMBOL_GPL(spi_setup);
2590
2591static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2592{
2593 struct spi_master *master = spi->master;
2594 struct spi_transfer *xfer;
2595 int w_size;
2596
2597 if (list_empty(&message->transfers))
2598 return -EINVAL;
2599
2600
2601
2602
2603
2604
2605 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2606 || (spi->mode & SPI_3WIRE)) {
2607 unsigned flags = master->flags;
2608
2609 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2610 if (xfer->rx_buf && xfer->tx_buf)
2611 return -EINVAL;
2612 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2613 return -EINVAL;
2614 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2615 return -EINVAL;
2616 }
2617 }
2618
2619
2620
2621
2622
2623
2624
2625 message->frame_length = 0;
2626 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2627 message->frame_length += xfer->len;
2628 if (!xfer->bits_per_word)
2629 xfer->bits_per_word = spi->bits_per_word;
2630
2631 if (!xfer->speed_hz)
2632 xfer->speed_hz = spi->max_speed_hz;
2633 if (!xfer->speed_hz)
2634 xfer->speed_hz = master->max_speed_hz;
2635
2636 if (master->max_speed_hz &&
2637 xfer->speed_hz > master->max_speed_hz)
2638 xfer->speed_hz = master->max_speed_hz;
2639
2640 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2641 return -EINVAL;
2642
2643
2644
2645
2646
2647 if (xfer->bits_per_word <= 8)
2648 w_size = 1;
2649 else if (xfer->bits_per_word <= 16)
2650 w_size = 2;
2651 else
2652 w_size = 4;
2653
2654
2655 if (xfer->len % w_size)
2656 return -EINVAL;
2657
2658 if (xfer->speed_hz && master->min_speed_hz &&
2659 xfer->speed_hz < master->min_speed_hz)
2660 return -EINVAL;
2661
2662 if (xfer->tx_buf && !xfer->tx_nbits)
2663 xfer->tx_nbits = SPI_NBITS_SINGLE;
2664 if (xfer->rx_buf && !xfer->rx_nbits)
2665 xfer->rx_nbits = SPI_NBITS_SINGLE;
2666
2667
2668
2669
2670 if (xfer->tx_buf) {
2671 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2672 xfer->tx_nbits != SPI_NBITS_DUAL &&
2673 xfer->tx_nbits != SPI_NBITS_QUAD)
2674 return -EINVAL;
2675 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2676 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2677 return -EINVAL;
2678 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2679 !(spi->mode & SPI_TX_QUAD))
2680 return -EINVAL;
2681 }
2682
2683 if (xfer->rx_buf) {
2684 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2685 xfer->rx_nbits != SPI_NBITS_DUAL &&
2686 xfer->rx_nbits != SPI_NBITS_QUAD)
2687 return -EINVAL;
2688 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2689 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2690 return -EINVAL;
2691 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2692 !(spi->mode & SPI_RX_QUAD))
2693 return -EINVAL;
2694 }
2695 }
2696
2697 message->status = -EINPROGRESS;
2698
2699 return 0;
2700}
2701
2702static int __spi_async(struct spi_device *spi, struct spi_message *message)
2703{
2704 struct spi_master *master = spi->master;
2705
2706 message->spi = spi;
2707
2708 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2709 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2710
2711 trace_spi_message_submit(message);
2712
2713 return master->transfer(spi, message);
2714}
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747int spi_async(struct spi_device *spi, struct spi_message *message)
2748{
2749 struct spi_master *master = spi->master;
2750 int ret;
2751 unsigned long flags;
2752
2753 ret = __spi_validate(spi, message);
2754 if (ret != 0)
2755 return ret;
2756
2757 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2758
2759 if (master->bus_lock_flag)
2760 ret = -EBUSY;
2761 else
2762 ret = __spi_async(spi, message);
2763
2764 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2765
2766 return ret;
2767}
2768EXPORT_SYMBOL_GPL(spi_async);
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2802{
2803 struct spi_master *master = spi->master;
2804 int ret;
2805 unsigned long flags;
2806
2807 ret = __spi_validate(spi, message);
2808 if (ret != 0)
2809 return ret;
2810
2811 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2812
2813 ret = __spi_async(spi, message);
2814
2815 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2816
2817 return ret;
2818
2819}
2820EXPORT_SYMBOL_GPL(spi_async_locked);
2821
2822
2823int spi_flash_read(struct spi_device *spi,
2824 struct spi_flash_read_message *msg)
2825
2826{
2827 struct spi_master *master = spi->master;
2828 struct device *rx_dev = NULL;
2829 int ret;
2830
2831 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2832 msg->addr_nbits == SPI_NBITS_DUAL) &&
2833 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2834 return -EINVAL;
2835 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2836 msg->addr_nbits == SPI_NBITS_QUAD) &&
2837 !(spi->mode & SPI_TX_QUAD))
2838 return -EINVAL;
2839 if (msg->data_nbits == SPI_NBITS_DUAL &&
2840 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2841 return -EINVAL;
2842 if (msg->data_nbits == SPI_NBITS_QUAD &&
2843 !(spi->mode & SPI_RX_QUAD))
2844 return -EINVAL;
2845
2846 if (master->auto_runtime_pm) {
2847 ret = pm_runtime_get_sync(master->dev.parent);
2848 if (ret < 0) {
2849 dev_err(&master->dev, "Failed to power device: %d\n",
2850 ret);
2851 return ret;
2852 }
2853 }
2854
2855 mutex_lock(&master->bus_lock_mutex);
2856 mutex_lock(&master->io_mutex);
2857 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
2858 rx_dev = master->dma_rx->device->dev;
2859 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2860 msg->buf, msg->len,
2861 DMA_FROM_DEVICE);
2862 if (!ret)
2863 msg->cur_msg_mapped = true;
2864 }
2865 ret = master->spi_flash_read(spi, msg);
2866 if (msg->cur_msg_mapped)
2867 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2868 DMA_FROM_DEVICE);
2869 mutex_unlock(&master->io_mutex);
2870 mutex_unlock(&master->bus_lock_mutex);
2871
2872 if (master->auto_runtime_pm)
2873 pm_runtime_put(master->dev.parent);
2874
2875 return ret;
2876}
2877EXPORT_SYMBOL_GPL(spi_flash_read);
2878
2879
2880
2881
2882
2883
2884
2885
2886static void spi_complete(void *arg)
2887{
2888 complete(arg);
2889}
2890
2891static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2892{
2893 DECLARE_COMPLETION_ONSTACK(done);
2894 int status;
2895 struct spi_master *master = spi->master;
2896 unsigned long flags;
2897
2898 status = __spi_validate(spi, message);
2899 if (status != 0)
2900 return status;
2901
2902 message->complete = spi_complete;
2903 message->context = &done;
2904 message->spi = spi;
2905
2906 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2907 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2908
2909
2910
2911
2912
2913
2914 if (master->transfer == spi_queued_transfer) {
2915 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2916
2917 trace_spi_message_submit(message);
2918
2919 status = __spi_queued_transfer(spi, message, false);
2920
2921 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2922 } else {
2923 status = spi_async_locked(spi, message);
2924 }
2925
2926 if (status == 0) {
2927
2928
2929
2930 if (master->transfer == spi_queued_transfer) {
2931 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2932 spi_sync_immediate);
2933 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2934 spi_sync_immediate);
2935 __spi_pump_messages(master, false);
2936 }
2937
2938 wait_for_completion(&done);
2939 status = message->status;
2940 }
2941 message->context = NULL;
2942 return status;
2943}
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966int spi_sync(struct spi_device *spi, struct spi_message *message)
2967{
2968 int ret;
2969
2970 mutex_lock(&spi->master->bus_lock_mutex);
2971 ret = __spi_sync(spi, message);
2972 mutex_unlock(&spi->master->bus_lock_mutex);
2973
2974 return ret;
2975}
2976EXPORT_SYMBOL_GPL(spi_sync);
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2995{
2996 return __spi_sync(spi, message);
2997}
2998EXPORT_SYMBOL_GPL(spi_sync_locked);
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015int spi_bus_lock(struct spi_master *master)
3016{
3017 unsigned long flags;
3018
3019 mutex_lock(&master->bus_lock_mutex);
3020
3021 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
3022 master->bus_lock_flag = 1;
3023 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
3024
3025
3026
3027 return 0;
3028}
3029EXPORT_SYMBOL_GPL(spi_bus_lock);
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int spi_bus_unlock(struct spi_master *master)
3045{
3046 master->bus_lock_flag = 0;
3047
3048 mutex_unlock(&master->bus_lock_mutex);
3049
3050 return 0;
3051}
3052EXPORT_SYMBOL_GPL(spi_bus_unlock);
3053
3054
3055#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3056
3057static u8 *buf;
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080int spi_write_then_read(struct spi_device *spi,
3081 const void *txbuf, unsigned n_tx,
3082 void *rxbuf, unsigned n_rx)
3083{
3084 static DEFINE_MUTEX(lock);
3085
3086 int status;
3087 struct spi_message message;
3088 struct spi_transfer x[2];
3089 u8 *local_buf;
3090
3091
3092
3093
3094
3095
3096 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3097 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3098 GFP_KERNEL | GFP_DMA);
3099 if (!local_buf)
3100 return -ENOMEM;
3101 } else {
3102 local_buf = buf;
3103 }
3104
3105 spi_message_init(&message);
3106 memset(x, 0, sizeof(x));
3107 if (n_tx) {
3108 x[0].len = n_tx;
3109 spi_message_add_tail(&x[0], &message);
3110 }
3111 if (n_rx) {
3112 x[1].len = n_rx;
3113 spi_message_add_tail(&x[1], &message);
3114 }
3115
3116 memcpy(local_buf, txbuf, n_tx);
3117 x[0].tx_buf = local_buf;
3118 x[1].rx_buf = local_buf + n_tx;
3119
3120
3121 status = spi_sync(spi, &message);
3122 if (status == 0)
3123 memcpy(rxbuf, x[1].rx_buf, n_rx);
3124
3125 if (x[0].tx_buf == buf)
3126 mutex_unlock(&lock);
3127 else
3128 kfree(local_buf);
3129
3130 return status;
3131}
3132EXPORT_SYMBOL_GPL(spi_write_then_read);
3133
3134
3135
3136#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3137static int __spi_of_device_match(struct device *dev, void *data)
3138{
3139 return dev->of_node == data;
3140}
3141
3142
3143static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3144{
3145 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3146 __spi_of_device_match);
3147 return dev ? to_spi_device(dev) : NULL;
3148}
3149
3150static int __spi_of_master_match(struct device *dev, const void *data)
3151{
3152 return dev->of_node == data;
3153}
3154
3155
3156static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3157{
3158 struct device *dev;
3159
3160 dev = class_find_device(&spi_master_class, NULL, node,
3161 __spi_of_master_match);
3162 if (!dev)
3163 return NULL;
3164
3165
3166 return container_of(dev, struct spi_master, dev);
3167}
3168
3169static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3170 void *arg)
3171{
3172 struct of_reconfig_data *rd = arg;
3173 struct spi_master *master;
3174 struct spi_device *spi;
3175
3176 switch (of_reconfig_get_state_change(action, arg)) {
3177 case OF_RECONFIG_CHANGE_ADD:
3178 master = of_find_spi_master_by_node(rd->dn->parent);
3179 if (master == NULL)
3180 return NOTIFY_OK;
3181
3182 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3183 put_device(&master->dev);
3184 return NOTIFY_OK;
3185 }
3186
3187 spi = of_register_spi_device(master, rd->dn);
3188 put_device(&master->dev);
3189
3190 if (IS_ERR(spi)) {
3191 pr_err("%s: failed to create for '%s'\n",
3192 __func__, rd->dn->full_name);
3193 of_node_clear_flag(rd->dn, OF_POPULATED);
3194 return notifier_from_errno(PTR_ERR(spi));
3195 }
3196 break;
3197
3198 case OF_RECONFIG_CHANGE_REMOVE:
3199
3200 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3201 return NOTIFY_OK;
3202
3203
3204 spi = of_find_spi_device_by_node(rd->dn);
3205 if (spi == NULL)
3206 return NOTIFY_OK;
3207
3208
3209 spi_unregister_device(spi);
3210
3211
3212 put_device(&spi->dev);
3213 break;
3214 }
3215
3216 return NOTIFY_OK;
3217}
3218
3219static struct notifier_block spi_of_notifier = {
3220 .notifier_call = of_spi_notify,
3221};
3222#else
3223extern struct notifier_block spi_of_notifier;
3224#endif
3225
3226#if IS_ENABLED(CONFIG_ACPI)
3227static int spi_acpi_master_match(struct device *dev, const void *data)
3228{
3229 return ACPI_COMPANION(dev->parent) == data;
3230}
3231
3232static int spi_acpi_device_match(struct device *dev, void *data)
3233{
3234 return ACPI_COMPANION(dev) == data;
3235}
3236
3237static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3238{
3239 struct device *dev;
3240
3241 dev = class_find_device(&spi_master_class, NULL, adev,
3242 spi_acpi_master_match);
3243 if (!dev)
3244 return NULL;
3245
3246 return container_of(dev, struct spi_master, dev);
3247}
3248
3249static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3250{
3251 struct device *dev;
3252
3253 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3254
3255 return dev ? to_spi_device(dev) : NULL;
3256}
3257
3258static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3259 void *arg)
3260{
3261 struct acpi_device *adev = arg;
3262 struct spi_master *master;
3263 struct spi_device *spi;
3264
3265 switch (value) {
3266 case ACPI_RECONFIG_DEVICE_ADD:
3267 master = acpi_spi_find_master_by_adev(adev->parent);
3268 if (!master)
3269 break;
3270
3271 acpi_register_spi_device(master, adev);
3272 put_device(&master->dev);
3273 break;
3274 case ACPI_RECONFIG_DEVICE_REMOVE:
3275 if (!acpi_device_enumerated(adev))
3276 break;
3277
3278 spi = acpi_spi_find_device_by_adev(adev);
3279 if (!spi)
3280 break;
3281
3282 spi_unregister_device(spi);
3283 put_device(&spi->dev);
3284 break;
3285 }
3286
3287 return NOTIFY_OK;
3288}
3289
3290static struct notifier_block spi_acpi_notifier = {
3291 .notifier_call = acpi_spi_notify,
3292};
3293#else
3294extern struct notifier_block spi_acpi_notifier;
3295#endif
3296
3297static int __init spi_init(void)
3298{
3299 int status;
3300
3301 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3302 if (!buf) {
3303 status = -ENOMEM;
3304 goto err0;
3305 }
3306
3307 status = bus_register(&spi_bus_type);
3308 if (status < 0)
3309 goto err1;
3310
3311 status = class_register(&spi_master_class);
3312 if (status < 0)
3313 goto err2;
3314
3315 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3316 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3317 if (IS_ENABLED(CONFIG_ACPI))
3318 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3319
3320 return 0;
3321
3322err2:
3323 bus_unregister(&spi_bus_type);
3324err1:
3325 kfree(buf);
3326 buf = NULL;
3327err0:
3328 return status;
3329}
3330
3331
3332
3333
3334
3335
3336
3337
3338postcore_initcall(spi_init);
3339
3340