1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include "hbm.h"
23#include "client.h"
24
25int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
26{
27 unsigned long tx_free_flags;
28 int size;
29
30 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
31 size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
32 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
33
34 return size;
35}
36EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
37
38int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
39{
40 return cl->tx_ring_free_size;
41}
42EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
43
44
45
46
47
48
49
50static void ishtp_read_list_flush(struct ishtp_cl *cl)
51{
52 struct ishtp_cl_rb *rb;
53 struct ishtp_cl_rb *next;
54 unsigned long flags;
55
56 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
57 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
58 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
59 list_del(&rb->list);
60 ishtp_io_rb_free(rb);
61 }
62 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
63}
64
65
66
67
68
69
70
71
72
73
74int ishtp_cl_flush_queues(struct ishtp_cl *cl)
75{
76 if (WARN_ON(!cl || !cl->dev))
77 return -EINVAL;
78
79 ishtp_read_list_flush(cl);
80
81 return 0;
82}
83EXPORT_SYMBOL(ishtp_cl_flush_queues);
84
85
86
87
88
89
90
91
92
93static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
94{
95 memset(cl, 0, sizeof(struct ishtp_cl));
96 init_waitqueue_head(&cl->wait_ctrl_res);
97 spin_lock_init(&cl->free_list_spinlock);
98 spin_lock_init(&cl->in_process_spinlock);
99 spin_lock_init(&cl->tx_list_spinlock);
100 spin_lock_init(&cl->tx_free_list_spinlock);
101 spin_lock_init(&cl->fc_spinlock);
102 INIT_LIST_HEAD(&cl->link);
103 cl->dev = dev;
104
105 INIT_LIST_HEAD(&cl->free_rb_list.list);
106 INIT_LIST_HEAD(&cl->tx_list.list);
107 INIT_LIST_HEAD(&cl->tx_free_list.list);
108 INIT_LIST_HEAD(&cl->in_process_list.list);
109
110 cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
111 cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
112 cl->tx_ring_free_size = cl->tx_ring_size;
113
114
115 cl->last_tx_path = CL_TX_PATH_IPC;
116 cl->last_dma_acked = 1;
117 cl->last_dma_addr = NULL;
118 cl->last_ipc_acked = 1;
119}
120
121
122
123
124
125
126
127
128
129struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
130{
131 struct ishtp_cl *cl;
132
133 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
134 if (!cl)
135 return NULL;
136
137 ishtp_cl_init(cl, dev);
138 return cl;
139}
140EXPORT_SYMBOL(ishtp_cl_allocate);
141
142
143
144
145
146
147
148void ishtp_cl_free(struct ishtp_cl *cl)
149{
150 struct ishtp_device *dev;
151 unsigned long flags;
152
153 if (!cl)
154 return;
155
156 dev = cl->dev;
157 if (!dev)
158 return;
159
160 spin_lock_irqsave(&dev->cl_list_lock, flags);
161 ishtp_cl_free_rx_ring(cl);
162 ishtp_cl_free_tx_ring(cl);
163 kfree(cl);
164 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
165}
166EXPORT_SYMBOL(ishtp_cl_free);
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182int ishtp_cl_link(struct ishtp_cl *cl, int id)
183{
184 struct ishtp_device *dev;
185 unsigned long flags, flags_cl;
186 int ret = 0;
187
188 if (WARN_ON(!cl || !cl->dev))
189 return -EINVAL;
190
191 dev = cl->dev;
192
193 spin_lock_irqsave(&dev->device_lock, flags);
194
195 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
196 ret = -EMFILE;
197 goto unlock_dev;
198 }
199
200
201 if (id == ISHTP_HOST_CLIENT_ID_ANY)
202 id = find_first_zero_bit(dev->host_clients_map,
203 ISHTP_CLIENTS_MAX);
204
205 if (id >= ISHTP_CLIENTS_MAX) {
206 spin_unlock_irqrestore(&dev->device_lock, flags);
207 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
208 return -ENOENT;
209 }
210
211 dev->open_handle_count++;
212 cl->host_client_id = id;
213 spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
214 if (dev->dev_state != ISHTP_DEV_ENABLED) {
215 ret = -ENODEV;
216 goto unlock_cl;
217 }
218 list_add_tail(&cl->link, &dev->cl_list);
219 set_bit(id, dev->host_clients_map);
220 cl->state = ISHTP_CL_INITIALIZING;
221
222unlock_cl:
223 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
224unlock_dev:
225 spin_unlock_irqrestore(&dev->device_lock, flags);
226 return ret;
227}
228EXPORT_SYMBOL(ishtp_cl_link);
229
230
231
232
233
234
235
236void ishtp_cl_unlink(struct ishtp_cl *cl)
237{
238 struct ishtp_device *dev;
239 struct ishtp_cl *pos;
240 unsigned long flags;
241
242
243 if (!cl || !cl->dev)
244 return;
245
246 dev = cl->dev;
247
248 spin_lock_irqsave(&dev->device_lock, flags);
249 if (dev->open_handle_count > 0) {
250 clear_bit(cl->host_client_id, dev->host_clients_map);
251 dev->open_handle_count--;
252 }
253 spin_unlock_irqrestore(&dev->device_lock, flags);
254
255
256
257
258
259 spin_lock_irqsave(&dev->cl_list_lock, flags);
260 list_for_each_entry(pos, &dev->cl_list, link)
261 if (cl->host_client_id == pos->host_client_id) {
262 list_del_init(&pos->link);
263 break;
264 }
265 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
266}
267EXPORT_SYMBOL(ishtp_cl_unlink);
268
269
270
271
272
273
274
275
276
277
278int ishtp_cl_disconnect(struct ishtp_cl *cl)
279{
280 struct ishtp_device *dev;
281 int err;
282
283 if (WARN_ON(!cl || !cl->dev))
284 return -ENODEV;
285
286 dev = cl->dev;
287
288 dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
289
290 if (cl->state != ISHTP_CL_DISCONNECTING) {
291 dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
292 return 0;
293 }
294
295 if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
296 dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
297 dev_err(&cl->device->dev, "failed to disconnect.\n");
298 return -ENODEV;
299 }
300
301 err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
302 (dev->dev_state != ISHTP_DEV_ENABLED ||
303 cl->state == ISHTP_CL_DISCONNECTED),
304 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
305
306
307
308
309
310 if (dev->dev_state != ISHTP_DEV_ENABLED) {
311 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
312 __func__);
313 return -ENODEV;
314 }
315
316 if (cl->state == ISHTP_CL_DISCONNECTED) {
317 dev->print_log(dev, "%s() successful\n", __func__);
318 return 0;
319 }
320
321 return -ENODEV;
322}
323EXPORT_SYMBOL(ishtp_cl_disconnect);
324
325
326
327
328
329
330
331
332
333static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
334{
335 struct ishtp_device *dev;
336 struct ishtp_cl *pos;
337 unsigned long flags;
338
339 if (WARN_ON(!cl || !cl->dev))
340 return false;
341
342 dev = cl->dev;
343 spin_lock_irqsave(&dev->cl_list_lock, flags);
344 list_for_each_entry(pos, &dev->cl_list, link) {
345 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
346 cl->fw_client_id == pos->fw_client_id) {
347 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
348 return true;
349 }
350 }
351 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
352
353 return false;
354}
355
356
357
358
359
360
361
362
363
364
365
366int ishtp_cl_connect(struct ishtp_cl *cl)
367{
368 struct ishtp_device *dev;
369 int rets;
370
371 if (WARN_ON(!cl || !cl->dev))
372 return -ENODEV;
373
374 dev = cl->dev;
375
376 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
377
378 if (ishtp_cl_is_other_connecting(cl)) {
379 dev->print_log(dev, "%s() Busy\n", __func__);
380 return -EBUSY;
381 }
382
383 if (ishtp_hbm_cl_connect_req(dev, cl)) {
384 dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
385 return -ENODEV;
386 }
387
388 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
389 (dev->dev_state == ISHTP_DEV_ENABLED &&
390 (cl->state == ISHTP_CL_CONNECTED ||
391 cl->state == ISHTP_CL_DISCONNECTED)),
392 ishtp_secs_to_jiffies(
393 ISHTP_CL_CONNECT_TIMEOUT));
394
395
396
397
398 if (dev->dev_state != ISHTP_DEV_ENABLED) {
399 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
400 __func__);
401 return -EFAULT;
402 }
403
404 if (cl->state != ISHTP_CL_CONNECTED) {
405 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
406 __func__);
407 return -EFAULT;
408 }
409
410 rets = cl->status;
411 if (rets) {
412 dev->print_log(dev, "%s() Invalid status\n", __func__);
413 return rets;
414 }
415
416 rets = ishtp_cl_device_bind(cl);
417 if (rets) {
418 dev->print_log(dev, "%s() Bind error\n", __func__);
419 ishtp_cl_disconnect(cl);
420 return rets;
421 }
422
423 rets = ishtp_cl_alloc_rx_ring(cl);
424 if (rets) {
425 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
426
427 ishtp_cl_disconnect(cl);
428 return rets;
429 }
430
431 rets = ishtp_cl_alloc_tx_ring(cl);
432 if (rets) {
433 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
434
435 ishtp_cl_free_rx_ring(cl);
436 ishtp_cl_disconnect(cl);
437 return rets;
438 }
439
440
441 rets = ishtp_cl_read_start(cl);
442
443 dev->print_log(dev, "%s() successful\n", __func__);
444
445 return rets;
446}
447EXPORT_SYMBOL(ishtp_cl_connect);
448
449
450
451
452
453
454
455
456
457
458
459int ishtp_cl_read_start(struct ishtp_cl *cl)
460{
461 struct ishtp_device *dev;
462 struct ishtp_cl_rb *rb;
463 int rets;
464 int i;
465 unsigned long flags;
466 unsigned long dev_flags;
467
468 if (WARN_ON(!cl || !cl->dev))
469 return -ENODEV;
470
471 dev = cl->dev;
472
473 if (cl->state != ISHTP_CL_CONNECTED)
474 return -ENODEV;
475
476 if (dev->dev_state != ISHTP_DEV_ENABLED)
477 return -ENODEV;
478
479 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
480 if (i < 0) {
481 dev_err(&cl->device->dev, "no such fw client %d\n",
482 cl->fw_client_id);
483 return -ENODEV;
484 }
485
486
487 spin_lock_irqsave(&cl->free_list_spinlock, flags);
488 if (list_empty(&cl->free_rb_list.list)) {
489 dev_warn(&cl->device->dev,
490 "[ishtp-ish] Rx buffers pool is empty\n");
491 rets = -ENOMEM;
492 rb = NULL;
493 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
494 goto out;
495 }
496 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
497 list_del_init(&rb->list);
498 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
499
500 rb->cl = cl;
501 rb->buf_idx = 0;
502
503 INIT_LIST_HEAD(&rb->list);
504 rets = 0;
505
506
507
508
509
510 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
511 list_add_tail(&rb->list, &dev->read_list.list);
512 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
513 if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
514 rets = -ENODEV;
515 goto out;
516 }
517out:
518
519 if (rets && rb) {
520 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
521 list_del(&rb->list);
522 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
523
524 spin_lock_irqsave(&cl->free_list_spinlock, flags);
525 list_add_tail(&rb->list, &cl->free_rb_list.list);
526 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
527 }
528 return rets;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
544{
545 struct ishtp_device *dev;
546 int id;
547 struct ishtp_cl_tx_ring *cl_msg;
548 int have_msg_to_send = 0;
549 unsigned long tx_flags, tx_free_flags;
550
551 if (WARN_ON(!cl || !cl->dev))
552 return -ENODEV;
553
554 dev = cl->dev;
555
556 if (cl->state != ISHTP_CL_CONNECTED) {
557 ++cl->err_send_msg;
558 return -EPIPE;
559 }
560
561 if (dev->dev_state != ISHTP_DEV_ENABLED) {
562 ++cl->err_send_msg;
563 return -ENODEV;
564 }
565
566
567 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
568 if (id < 0) {
569 ++cl->err_send_msg;
570 return -ENOENT;
571 }
572
573 if (length > dev->fw_clients[id].props.max_msg_length) {
574 ++cl->err_send_msg;
575 return -EMSGSIZE;
576 }
577
578
579 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
580 if (list_empty(&cl->tx_free_list.list)) {
581 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
582 tx_free_flags);
583 ++cl->err_send_msg;
584 return -ENOMEM;
585 }
586
587 cl_msg = list_first_entry(&cl->tx_free_list.list,
588 struct ishtp_cl_tx_ring, list);
589 if (!cl_msg->send_buf.data) {
590 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
591 tx_free_flags);
592 return -EIO;
593
594 }
595
596
597
598
599 list_del_init(&cl_msg->list);
600 --cl->tx_ring_free_size;
601
602 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
603 memcpy(cl_msg->send_buf.data, buf, length);
604 cl_msg->send_buf.size = length;
605 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
606 have_msg_to_send = !list_empty(&cl->tx_list.list);
607 list_add_tail(&cl_msg->list, &cl->tx_list.list);
608 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
609
610 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
611 ishtp_cl_send_msg(dev, cl);
612
613 return 0;
614}
615EXPORT_SYMBOL(ishtp_cl_send);
616
617
618
619
620
621
622
623
624static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
625{
626 unsigned long flags;
627 int schedule_work_flag = 0;
628 struct ishtp_cl *cl = rb->cl;
629
630 spin_lock_irqsave(&cl->in_process_spinlock, flags);
631
632
633
634
635 schedule_work_flag = list_empty(&cl->in_process_list.list);
636 list_add_tail(&rb->list, &cl->in_process_list.list);
637 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
638
639 if (schedule_work_flag)
640 ishtp_cl_bus_rx_event(cl->device);
641}
642
643
644
645
646
647
648
649
650static void ipc_tx_callback(void *prm)
651{
652 struct ishtp_cl *cl = prm;
653 struct ishtp_cl_tx_ring *cl_msg;
654 size_t rem;
655 struct ishtp_device *dev = (cl ? cl->dev : NULL);
656 struct ishtp_msg_hdr ishtp_hdr;
657 unsigned long tx_flags, tx_free_flags;
658 unsigned char *pmsg;
659
660 if (!dev)
661 return;
662
663
664
665
666
667 if (dev->dev_state != ISHTP_DEV_ENABLED)
668 return;
669
670 if (cl->state != ISHTP_CL_CONNECTED)
671 return;
672
673 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
674 if (list_empty(&cl->tx_list.list)) {
675 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
676 return;
677 }
678
679 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
680 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
681 return;
682 }
683
684 if (!cl->sending) {
685 --cl->ishtp_flow_ctrl_creds;
686 cl->last_ipc_acked = 0;
687 cl->last_tx_path = CL_TX_PATH_IPC;
688 cl->sending = 1;
689 }
690
691 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
692 list);
693 rem = cl_msg->send_buf.size - cl->tx_offs;
694
695 ishtp_hdr.host_addr = cl->host_client_id;
696 ishtp_hdr.fw_addr = cl->fw_client_id;
697 ishtp_hdr.reserved = 0;
698 pmsg = cl_msg->send_buf.data + cl->tx_offs;
699
700 if (rem <= dev->mtu) {
701 ishtp_hdr.length = rem;
702 ishtp_hdr.msg_complete = 1;
703 cl->sending = 0;
704 list_del_init(&cl_msg->list);
705 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
706
707 ishtp_write_message(dev, &ishtp_hdr, pmsg);
708 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
709 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
710 ++cl->tx_ring_free_size;
711 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
712 tx_free_flags);
713 } else {
714
715 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
716 cl->tx_offs += dev->mtu;
717 ishtp_hdr.length = dev->mtu;
718 ishtp_hdr.msg_complete = 0;
719 ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
720 }
721}
722
723
724
725
726
727
728
729
730static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
731 struct ishtp_cl *cl)
732{
733
734 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
735 return;
736
737 cl->tx_offs = 0;
738 ipc_tx_callback(cl);
739 ++cl->send_msg_cnt_ipc;
740}
741
742
743
744
745
746
747
748
749static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
750 struct ishtp_cl *cl)
751{
752 struct ishtp_msg_hdr hdr;
753 struct dma_xfer_hbm dma_xfer;
754 unsigned char *msg_addr;
755 int off;
756 struct ishtp_cl_tx_ring *cl_msg;
757 unsigned long tx_flags, tx_free_flags;
758
759
760 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
761 return;
762
763 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
764 if (list_empty(&cl->tx_list.list)) {
765 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
766 return;
767 }
768
769 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
770 list);
771
772 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
773 if (!msg_addr) {
774 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
775 if (dev->transfer_path == CL_TX_PATH_DEFAULT)
776 ishtp_cl_send_msg_ipc(dev, cl);
777 return;
778 }
779
780 list_del_init(&cl_msg->list);
781 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
782
783 --cl->ishtp_flow_ctrl_creds;
784 cl->last_dma_acked = 0;
785 cl->last_dma_addr = msg_addr;
786 cl->last_tx_path = CL_TX_PATH_DMA;
787
788
789 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
790
791
792 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
793 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
794 dma_xfer.hbm = DMA_XFER;
795 dma_xfer.fw_client_id = cl->fw_client_id;
796 dma_xfer.host_client_id = cl->host_client_id;
797 dma_xfer.reserved = 0;
798 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
799 dma_xfer.msg_length = cl_msg->send_buf.size;
800 dma_xfer.reserved2 = 0;
801 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
802 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
803 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
804 ++cl->tx_ring_free_size;
805 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
806 ++cl->send_msg_cnt_dma;
807}
808
809
810
811
812
813
814
815
816void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
817{
818 if (dev->transfer_path == CL_TX_PATH_DMA)
819 ishtp_cl_send_msg_dma(dev, cl);
820 else
821 ishtp_cl_send_msg_ipc(dev, cl);
822}
823
824
825
826
827
828
829
830
831
832void recv_ishtp_cl_msg(struct ishtp_device *dev,
833 struct ishtp_msg_hdr *ishtp_hdr)
834{
835 struct ishtp_cl *cl;
836 struct ishtp_cl_rb *rb;
837 struct ishtp_cl_rb *new_rb;
838 unsigned char *buffer = NULL;
839 struct ishtp_cl_rb *complete_rb = NULL;
840 unsigned long flags;
841 int rb_count;
842
843 if (ishtp_hdr->reserved) {
844 dev_err(dev->devc, "corrupted message header.\n");
845 goto eoi;
846 }
847
848 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
849 dev_err(dev->devc,
850 "ISHTP message length in hdr exceeds IPC MTU\n");
851 goto eoi;
852 }
853
854 spin_lock_irqsave(&dev->read_list_spinlock, flags);
855 rb_count = -1;
856 list_for_each_entry(rb, &dev->read_list.list, list) {
857 ++rb_count;
858 cl = rb->cl;
859 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
860 cl->fw_client_id == ishtp_hdr->fw_addr) ||
861 !(cl->state == ISHTP_CL_CONNECTED))
862 continue;
863
864
865 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
866 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
867 dev_err(&cl->device->dev,
868 "Rx buffer is not allocated.\n");
869 list_del(&rb->list);
870 ishtp_io_rb_free(rb);
871 cl->status = -ENOMEM;
872 goto eoi;
873 }
874
875
876
877
878
879
880
881 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
882 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
883 dev_err(&cl->device->dev,
884 "message overflow. size %d len %d idx %ld\n",
885 rb->buffer.size, ishtp_hdr->length,
886 rb->buf_idx);
887 list_del(&rb->list);
888 ishtp_cl_io_rb_recycle(rb);
889 cl->status = -EIO;
890 goto eoi;
891 }
892
893 buffer = rb->buffer.data + rb->buf_idx;
894 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
895
896 rb->buf_idx += ishtp_hdr->length;
897 if (ishtp_hdr->msg_complete) {
898
899 cl->status = 0;
900 list_del(&rb->list);
901 complete_rb = rb;
902
903 --cl->out_flow_ctrl_creds;
904
905
906
907
908 spin_lock(&cl->free_list_spinlock);
909
910 if (!list_empty(&cl->free_rb_list.list)) {
911 new_rb = list_entry(cl->free_rb_list.list.next,
912 struct ishtp_cl_rb, list);
913 list_del_init(&new_rb->list);
914 spin_unlock(&cl->free_list_spinlock);
915 new_rb->cl = cl;
916 new_rb->buf_idx = 0;
917 INIT_LIST_HEAD(&new_rb->list);
918 list_add_tail(&new_rb->list,
919 &dev->read_list.list);
920
921 ishtp_hbm_cl_flow_control_req(dev, cl);
922 } else {
923 spin_unlock(&cl->free_list_spinlock);
924 }
925 }
926
927 ++cl->recv_msg_num_frags;
928
929
930
931
932
933 break;
934 }
935
936 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
937
938 if (!buffer) {
939 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
940
941 dev_err(dev->devc, "Dropped Rx msg - no request\n");
942 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
943 goto eoi;
944 }
945
946 if (complete_rb) {
947 cl = complete_rb->cl;
948 getnstimeofday(&cl->ts_rx);
949 ++cl->recv_msg_cnt_ipc;
950 ishtp_cl_read_complete(complete_rb);
951 }
952eoi:
953 return;
954}
955
956
957
958
959
960
961
962
963
964
965void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
966 struct dma_xfer_hbm *hbm)
967{
968 struct ishtp_cl *cl;
969 struct ishtp_cl_rb *rb;
970 struct ishtp_cl_rb *new_rb;
971 unsigned char *buffer = NULL;
972 struct ishtp_cl_rb *complete_rb = NULL;
973 unsigned long flags;
974
975 spin_lock_irqsave(&dev->read_list_spinlock, flags);
976
977 list_for_each_entry(rb, &dev->read_list.list, list) {
978 cl = rb->cl;
979 if (!cl || !(cl->host_client_id == hbm->host_client_id &&
980 cl->fw_client_id == hbm->fw_client_id) ||
981 !(cl->state == ISHTP_CL_CONNECTED))
982 continue;
983
984
985
986
987 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
988 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
989 dev_err(&cl->device->dev,
990 "response buffer is not allocated.\n");
991 list_del(&rb->list);
992 ishtp_io_rb_free(rb);
993 cl->status = -ENOMEM;
994 goto eoi;
995 }
996
997
998
999
1000
1001
1002
1003 if (rb->buffer.size < hbm->msg_length) {
1004 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1005 dev_err(&cl->device->dev,
1006 "message overflow. size %d len %d idx %ld\n",
1007 rb->buffer.size, hbm->msg_length, rb->buf_idx);
1008 list_del(&rb->list);
1009 ishtp_cl_io_rb_recycle(rb);
1010 cl->status = -EIO;
1011 goto eoi;
1012 }
1013
1014 buffer = rb->buffer.data;
1015 memcpy(buffer, msg, hbm->msg_length);
1016 rb->buf_idx = hbm->msg_length;
1017
1018
1019 cl->status = 0;
1020 list_del(&rb->list);
1021 complete_rb = rb;
1022
1023 --cl->out_flow_ctrl_creds;
1024
1025
1026
1027
1028 spin_lock(&cl->free_list_spinlock);
1029
1030 if (!list_empty(&cl->free_rb_list.list)) {
1031 new_rb = list_entry(cl->free_rb_list.list.next,
1032 struct ishtp_cl_rb, list);
1033 list_del_init(&new_rb->list);
1034 spin_unlock(&cl->free_list_spinlock);
1035 new_rb->cl = cl;
1036 new_rb->buf_idx = 0;
1037 INIT_LIST_HEAD(&new_rb->list);
1038 list_add_tail(&new_rb->list,
1039 &dev->read_list.list);
1040
1041 ishtp_hbm_cl_flow_control_req(dev, cl);
1042 } else {
1043 spin_unlock(&cl->free_list_spinlock);
1044 }
1045
1046
1047 ++cl->recv_msg_num_frags;
1048
1049
1050
1051
1052
1053 break;
1054 }
1055
1056 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1057
1058 if (!buffer) {
1059 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1060 goto eoi;
1061 }
1062
1063 if (complete_rb) {
1064 cl = complete_rb->cl;
1065 getnstimeofday(&cl->ts_rx);
1066 ++cl->recv_msg_cnt_dma;
1067 ishtp_cl_read_complete(complete_rb);
1068 }
1069eoi:
1070 return;
1071}
1072