1
2
3
4
5
6
7
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/spi/spi.h>
14
15#include "greybus.h"
16#include "spilib.h"
17
18struct gb_spilib {
19 struct gb_connection *connection;
20 struct device *parent;
21 struct spi_transfer *first_xfer;
22 struct spi_transfer *last_xfer;
23 struct spilib_ops *ops;
24 u32 rx_xfer_offset;
25 u32 tx_xfer_offset;
26 u32 last_xfer_size;
27 unsigned int op_timeout;
28 u16 mode;
29 u16 flags;
30 u32 bits_per_word_mask;
31 u8 num_chipselect;
32 u32 min_speed_hz;
33 u32 max_speed_hz;
34};
35
36#define GB_SPI_STATE_MSG_DONE ((void *)0)
37#define GB_SPI_STATE_MSG_IDLE ((void *)1)
38#define GB_SPI_STATE_MSG_RUNNING ((void *)2)
39#define GB_SPI_STATE_OP_READY ((void *)3)
40#define GB_SPI_STATE_OP_DONE ((void *)4)
41#define GB_SPI_STATE_MSG_ERROR ((void *)-1)
42
43#define XFER_TIMEOUT_TOLERANCE 200
44
45static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
46{
47 return gb_connection_get_data(spi->connection);
48}
49
50static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
51{
52 size_t headers_size;
53
54 data_max -= sizeof(struct gb_spi_transfer_request);
55 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
56
57 return tx_size + headers_size > data_max ? 0 : 1;
58}
59
60static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
61 size_t data_max)
62{
63 size_t rx_xfer_size;
64
65 data_max -= sizeof(struct gb_spi_transfer_response);
66
67 if (rx_size + len > data_max)
68 rx_xfer_size = data_max - rx_size;
69 else
70 rx_xfer_size = len;
71
72
73 if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
74 rx_xfer_size = *tx_xfer_size;
75 if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
76 *tx_xfer_size = rx_xfer_size;
77
78 return rx_xfer_size;
79}
80
81static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
82 size_t data_max)
83{
84 size_t headers_size;
85
86 data_max -= sizeof(struct gb_spi_transfer_request);
87 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
88
89 if (tx_size + headers_size + len > data_max)
90 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
91
92 return len;
93}
94
95static void clean_xfer_state(struct gb_spilib *spi)
96{
97 spi->first_xfer = NULL;
98 spi->last_xfer = NULL;
99 spi->rx_xfer_offset = 0;
100 spi->tx_xfer_offset = 0;
101 spi->last_xfer_size = 0;
102 spi->op_timeout = 0;
103}
104
105static bool is_last_xfer_done(struct gb_spilib *spi)
106{
107 struct spi_transfer *last_xfer = spi->last_xfer;
108
109 if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
110 (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
111 return true;
112
113 return false;
114}
115
116static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
117{
118 struct spi_transfer *last_xfer = spi->last_xfer;
119
120 if (msg->state != GB_SPI_STATE_OP_DONE)
121 return 0;
122
123
124
125
126
127 if (is_last_xfer_done(spi)) {
128 spi->tx_xfer_offset = 0;
129 spi->rx_xfer_offset = 0;
130 spi->op_timeout = 0;
131 if (last_xfer == list_last_entry(&msg->transfers,
132 struct spi_transfer,
133 transfer_list))
134 msg->state = GB_SPI_STATE_MSG_DONE;
135 else
136 spi->first_xfer = list_next_entry(last_xfer,
137 transfer_list);
138 return 0;
139 }
140
141 spi->first_xfer = last_xfer;
142 if (last_xfer->tx_buf)
143 spi->tx_xfer_offset += spi->last_xfer_size;
144
145 if (last_xfer->rx_buf)
146 spi->rx_xfer_offset += spi->last_xfer_size;
147
148 return 0;
149}
150
151static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
152 struct spi_message *msg)
153{
154 if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
155 transfer_list))
156 return NULL;
157
158 return list_next_entry(xfer, transfer_list);
159}
160
161
162static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
163 struct gb_connection *connection, struct spi_message *msg)
164{
165 struct gb_spi_transfer_request *request;
166 struct spi_device *dev = msg->spi;
167 struct spi_transfer *xfer;
168 struct gb_spi_transfer *gb_xfer;
169 struct gb_operation *operation;
170 u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
171 u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
172 u32 total_len = 0;
173 unsigned int xfer_timeout;
174 size_t data_max;
175 void *tx_data;
176
177 data_max = gb_operation_get_payload_size_max(connection);
178 xfer = spi->first_xfer;
179
180
181
182 while (msg->state != GB_SPI_STATE_OP_READY) {
183 msg->state = GB_SPI_STATE_MSG_RUNNING;
184 spi->last_xfer = xfer;
185
186 if (!xfer->tx_buf && !xfer->rx_buf) {
187 dev_err(spi->parent,
188 "bufferless transfer, length %u\n", xfer->len);
189 msg->state = GB_SPI_STATE_MSG_ERROR;
190 return NULL;
191 }
192
193 tx_xfer_size = 0;
194 rx_xfer_size = 0;
195
196 if (xfer->tx_buf) {
197 len = xfer->len - spi->tx_xfer_offset;
198 if (!tx_header_fit_operation(tx_size, count, data_max))
199 break;
200 tx_xfer_size = calc_tx_xfer_size(tx_size, count,
201 len, data_max);
202 spi->last_xfer_size = tx_xfer_size;
203 }
204
205 if (xfer->rx_buf) {
206 len = xfer->len - spi->rx_xfer_offset;
207 rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
208 len, data_max);
209 spi->last_xfer_size = rx_xfer_size;
210 }
211
212 tx_size += tx_xfer_size;
213 rx_size += rx_xfer_size;
214
215 total_len += spi->last_xfer_size;
216 count++;
217
218 xfer = get_next_xfer(xfer, msg);
219 if (!xfer || total_len >= data_max)
220 msg->state = GB_SPI_STATE_OP_READY;
221 }
222
223
224
225
226
227 request_size = sizeof(*request);
228 request_size += count * sizeof(*gb_xfer);
229 request_size += tx_size;
230
231
232 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
233 request_size, rx_size, GFP_KERNEL);
234 if (!operation)
235 return NULL;
236
237 request = operation->request->payload;
238 request->count = cpu_to_le16(count);
239 request->mode = dev->mode;
240 request->chip_select = dev->chip_select;
241
242 gb_xfer = &request->transfers[0];
243 tx_data = gb_xfer + count;
244
245
246 xfer = spi->first_xfer;
247 while (msg->state != GB_SPI_STATE_OP_DONE) {
248 if (xfer == spi->last_xfer)
249 xfer_len = spi->last_xfer_size;
250 else
251 xfer_len = xfer->len;
252
253
254 xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
255 xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
256
257 if (xfer_timeout > spi->op_timeout)
258 spi->op_timeout = xfer_timeout;
259
260 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
261 gb_xfer->len = cpu_to_le32(xfer_len);
262 gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
263 gb_xfer->cs_change = xfer->cs_change;
264 gb_xfer->bits_per_word = xfer->bits_per_word;
265
266
267 if (xfer->tx_buf) {
268 gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
269 memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
270 xfer_len);
271 tx_data += xfer_len;
272 }
273
274 if (xfer->rx_buf)
275 gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
276
277 if (xfer == spi->last_xfer) {
278 if (!is_last_xfer_done(spi))
279 gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
280 msg->state = GB_SPI_STATE_OP_DONE;
281 continue;
282 }
283
284 gb_xfer++;
285 xfer = get_next_xfer(xfer, msg);
286 }
287
288 msg->actual_length += total_len;
289
290 return operation;
291}
292
293static void gb_spi_decode_response(struct gb_spilib *spi,
294 struct spi_message *msg,
295 struct gb_spi_transfer_response *response)
296{
297 struct spi_transfer *xfer = spi->first_xfer;
298 void *rx_data = response->data;
299 u32 xfer_len;
300
301 while (xfer) {
302
303 if (xfer->rx_buf) {
304 if (xfer == spi->first_xfer)
305 xfer_len = xfer->len - spi->rx_xfer_offset;
306 else if (xfer == spi->last_xfer)
307 xfer_len = spi->last_xfer_size;
308 else
309 xfer_len = xfer->len;
310
311 memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
312 xfer_len);
313 rx_data += xfer_len;
314 }
315
316 if (xfer == spi->last_xfer)
317 break;
318
319 xfer = list_next_entry(xfer, transfer_list);
320 }
321}
322
323static int gb_spi_transfer_one_message(struct spi_master *master,
324 struct spi_message *msg)
325{
326 struct gb_spilib *spi = spi_master_get_devdata(master);
327 struct gb_connection *connection = spi->connection;
328 struct gb_spi_transfer_response *response;
329 struct gb_operation *operation;
330 int ret = 0;
331
332 spi->first_xfer = list_first_entry_or_null(&msg->transfers,
333 struct spi_transfer,
334 transfer_list);
335 if (!spi->first_xfer) {
336 ret = -ENOMEM;
337 goto out;
338 }
339
340 msg->state = GB_SPI_STATE_MSG_IDLE;
341
342 while (msg->state != GB_SPI_STATE_MSG_DONE &&
343 msg->state != GB_SPI_STATE_MSG_ERROR) {
344 operation = gb_spi_operation_create(spi, connection, msg);
345 if (!operation) {
346 msg->state = GB_SPI_STATE_MSG_ERROR;
347 ret = -EINVAL;
348 continue;
349 }
350
351 ret = gb_operation_request_send_sync_timeout(operation,
352 spi->op_timeout);
353 if (!ret) {
354 response = operation->response->payload;
355 if (response)
356 gb_spi_decode_response(spi, msg, response);
357 } else {
358 dev_err(spi->parent,
359 "transfer operation failed: %d\n", ret);
360 msg->state = GB_SPI_STATE_MSG_ERROR;
361 }
362
363 gb_operation_put(operation);
364 setup_next_xfer(spi, msg);
365 }
366
367out:
368 msg->status = ret;
369 clean_xfer_state(spi);
370 spi_finalize_current_message(master);
371
372 return ret;
373}
374
375static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
376{
377 struct gb_spilib *spi = spi_master_get_devdata(master);
378
379 return spi->ops->prepare_transfer_hardware(spi->parent);
380}
381
382static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
383{
384 struct gb_spilib *spi = spi_master_get_devdata(master);
385
386 spi->ops->unprepare_transfer_hardware(spi->parent);
387
388 return 0;
389}
390
391static int gb_spi_setup(struct spi_device *spi)
392{
393
394 return 0;
395}
396
397static void gb_spi_cleanup(struct spi_device *spi)
398{
399
400}
401
402
403
404
405
406
407
408#define gb_spi_mode_map(mode) mode
409#define gb_spi_flags_map(flags) flags
410
411static int gb_spi_get_master_config(struct gb_spilib *spi)
412{
413 struct gb_spi_master_config_response response;
414 u16 mode, flags;
415 int ret;
416
417 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
418 NULL, 0, &response, sizeof(response));
419 if (ret < 0)
420 return ret;
421
422 mode = le16_to_cpu(response.mode);
423 spi->mode = gb_spi_mode_map(mode);
424
425 flags = le16_to_cpu(response.flags);
426 spi->flags = gb_spi_flags_map(flags);
427
428 spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
429 spi->num_chipselect = response.num_chipselect;
430
431 spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
432 spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
433
434 return 0;
435}
436
437static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
438{
439 struct spi_master *master = get_master_from_spi(spi);
440 struct gb_spi_device_config_request request;
441 struct gb_spi_device_config_response response;
442 struct spi_board_info spi_board = { {0} };
443 struct spi_device *spidev;
444 int ret;
445 u8 dev_type;
446
447 request.chip_select = cs;
448
449 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
450 &request, sizeof(request),
451 &response, sizeof(response));
452 if (ret < 0)
453 return ret;
454
455 dev_type = response.device_type;
456
457 if (dev_type == GB_SPI_SPI_DEV)
458 strlcpy(spi_board.modalias, "spidev",
459 sizeof(spi_board.modalias));
460 else if (dev_type == GB_SPI_SPI_NOR)
461 strlcpy(spi_board.modalias, "spi-nor",
462 sizeof(spi_board.modalias));
463 else if (dev_type == GB_SPI_SPI_MODALIAS)
464 memcpy(spi_board.modalias, response.name,
465 sizeof(spi_board.modalias));
466 else
467 return -EINVAL;
468
469 spi_board.mode = le16_to_cpu(response.mode);
470 spi_board.bus_num = master->bus_num;
471 spi_board.chip_select = cs;
472 spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
473
474 spidev = spi_new_device(master, &spi_board);
475 if (!spidev)
476 return -EINVAL;
477
478 return 0;
479}
480
481int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
482 struct spilib_ops *ops)
483{
484 struct gb_spilib *spi;
485 struct spi_master *master;
486 int ret;
487 u8 i;
488
489
490 master = spi_alloc_master(dev, sizeof(*spi));
491 if (!master) {
492 dev_err(dev, "cannot alloc SPI master\n");
493 return -ENOMEM;
494 }
495
496 spi = spi_master_get_devdata(master);
497 spi->connection = connection;
498 gb_connection_set_data(connection, master);
499 spi->parent = dev;
500 spi->ops = ops;
501
502
503 ret = gb_spi_get_master_config(spi);
504 if (ret)
505 goto exit_spi_put;
506
507 master->bus_num = -1;
508 master->num_chipselect = spi->num_chipselect;
509 master->mode_bits = spi->mode;
510 master->flags = spi->flags;
511 master->bits_per_word_mask = spi->bits_per_word_mask;
512
513
514 master->cleanup = gb_spi_cleanup;
515 master->setup = gb_spi_setup;
516 master->transfer_one_message = gb_spi_transfer_one_message;
517
518 if (ops && ops->prepare_transfer_hardware) {
519 master->prepare_transfer_hardware =
520 gb_spi_prepare_transfer_hardware;
521 }
522
523 if (ops && ops->unprepare_transfer_hardware) {
524 master->unprepare_transfer_hardware =
525 gb_spi_unprepare_transfer_hardware;
526 }
527
528 master->auto_runtime_pm = true;
529
530 ret = spi_register_master(master);
531 if (ret < 0)
532 goto exit_spi_put;
533
534
535 for (i = 0; i < spi->num_chipselect; i++) {
536 ret = gb_spi_setup_device(spi, i);
537 if (ret < 0) {
538 dev_err(dev, "failed to allocate spi device %d: %d\n",
539 i, ret);
540 goto exit_spi_unregister;
541 }
542 }
543
544 return 0;
545
546exit_spi_put:
547 spi_master_put(master);
548
549 return ret;
550
551exit_spi_unregister:
552 spi_unregister_master(master);
553
554 return ret;
555}
556EXPORT_SYMBOL_GPL(gb_spilib_master_init);
557
558void gb_spilib_master_exit(struct gb_connection *connection)
559{
560 struct spi_master *master = gb_connection_get_data(connection);
561
562 spi_unregister_master(master);
563}
564EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
565
566MODULE_LICENSE("GPL v2");
567