1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/pci.h>
18#include <linux/jiffies.h>
19#include <linux/ktime.h>
20#include <linux/delay.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/pm_runtime.h>
24
25#include <linux/mei.h>
26
27#include "mei_dev.h"
28#include "hw-txe.h"
29#include "client.h"
30#include "hbm.h"
31
32#include "mei-trace.h"
33
34
35
36
37
38
39
40
41
42
43static inline u32 mei_txe_reg_read(void __iomem *base_addr,
44 unsigned long offset)
45{
46 return ioread32(base_addr + offset);
47}
48
49
50
51
52
53
54
55
56static inline void mei_txe_reg_write(void __iomem *base_addr,
57 unsigned long offset, u32 value)
58{
59 iowrite32(value, base_addr + offset);
60}
61
62
63
64
65
66
67
68
69
70
71
72static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
73 unsigned long offset)
74{
75 return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
76}
77
78
79
80
81
82
83
84
85
86
87
88static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
89 unsigned long offset)
90{
91 WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
92 return mei_txe_sec_reg_read_silent(hw, offset);
93}
94
95
96
97
98
99
100
101
102
103
104static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
105 unsigned long offset, u32 value)
106{
107 mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
108}
109
110
111
112
113
114
115
116
117
118
119static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
120 unsigned long offset, u32 value)
121{
122 WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
123 mei_txe_sec_reg_write_silent(hw, offset, value);
124}
125
126
127
128
129
130
131
132
133static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
134 unsigned long offset)
135{
136 return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
137}
138
139
140
141
142
143
144
145
146static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
147 unsigned long offset, u32 value)
148{
149 mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
167{
168
169 struct mei_txe_hw *hw = to_txe_hw(dev);
170 bool do_req = hw->aliveness != req;
171
172 dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
173 hw->aliveness, req);
174 if (do_req) {
175 dev->pg_event = MEI_PG_EVENT_WAIT;
176 mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
177 }
178 return do_req;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
193{
194 struct mei_txe_hw *hw = to_txe_hw(dev);
195 u32 reg;
196
197 reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
198 return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
199}
200
201
202
203
204
205
206
207
208
209static u32 mei_txe_aliveness_get(struct mei_device *dev)
210{
211 struct mei_txe_hw *hw = to_txe_hw(dev);
212 u32 reg;
213
214 reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
215 return reg & HICR_HOST_ALIVENESS_RESP_ACK;
216}
217
218
219
220
221
222
223
224
225
226
227
228static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
229{
230 struct mei_txe_hw *hw = to_txe_hw(dev);
231 ktime_t stop, start;
232
233 start = ktime_get();
234 stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
235 do {
236 hw->aliveness = mei_txe_aliveness_get(dev);
237 if (hw->aliveness == expected) {
238 dev->pg_event = MEI_PG_EVENT_IDLE;
239 dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
240 ktime_to_us(ktime_sub(ktime_get(), start)));
241 return 0;
242 }
243 usleep_range(20, 50);
244 } while (ktime_before(ktime_get(), stop));
245
246 dev->pg_event = MEI_PG_EVENT_IDLE;
247 dev_err(dev->dev, "aliveness timed out\n");
248 return -ETIME;
249}
250
251
252
253
254
255
256
257
258
259
260
261static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
262{
263 struct mei_txe_hw *hw = to_txe_hw(dev);
264 const unsigned long timeout =
265 msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
266 long err;
267 int ret;
268
269 hw->aliveness = mei_txe_aliveness_get(dev);
270 if (hw->aliveness == expected)
271 return 0;
272
273 mutex_unlock(&dev->device_lock);
274 err = wait_event_timeout(hw->wait_aliveness_resp,
275 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
276 mutex_lock(&dev->device_lock);
277
278 hw->aliveness = mei_txe_aliveness_get(dev);
279 ret = hw->aliveness == expected ? 0 : -ETIME;
280
281 if (ret)
282 dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
283 err, hw->aliveness, dev->pg_event);
284 else
285 dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
286 jiffies_to_msecs(timeout - err),
287 hw->aliveness, dev->pg_event);
288
289 dev->pg_event = MEI_PG_EVENT_IDLE;
290 return ret;
291}
292
293
294
295
296
297
298
299
300
301int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
302{
303 if (mei_txe_aliveness_set(dev, req))
304 return mei_txe_aliveness_wait(dev, req);
305 return 0;
306}
307
308
309
310
311
312
313
314
315static bool mei_txe_pg_in_transition(struct mei_device *dev)
316{
317 return dev->pg_event == MEI_PG_EVENT_WAIT;
318}
319
320
321
322
323
324
325
326
327static bool mei_txe_pg_is_enabled(struct mei_device *dev)
328{
329 return true;
330}
331
332
333
334
335
336
337
338
339
340static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
341{
342 struct mei_txe_hw *hw = to_txe_hw(dev);
343
344 return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
345}
346
347
348
349
350
351
352static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
353{
354 struct mei_txe_hw *hw = to_txe_hw(dev);
355 u32 hintmsk;
356
357 hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
358 hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
359 mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
360}
361
362
363
364
365
366
367
368static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
369{
370
371 clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
372 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
373}
374
375
376
377
378
379
380static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
381{
382 mei_txe_br_reg_write(hw,
383 SICR_SEC_IPC_OUTPUT_STATUS_REG,
384 SEC_IPC_OUTPUT_STATUS_RDY);
385}
386
387
388
389
390
391
392
393
394static bool mei_txe_is_input_ready(struct mei_device *dev)
395{
396 struct mei_txe_hw *hw = to_txe_hw(dev);
397 u32 status;
398
399 status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
400 return !!(SEC_IPC_INPUT_STATUS_RDY & status);
401}
402
403
404
405
406
407
408static inline void mei_txe_intr_clear(struct mei_device *dev)
409{
410 struct mei_txe_hw *hw = to_txe_hw(dev);
411
412 mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
413 SEC_IPC_HOST_INT_STATUS_PENDING);
414 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
415 mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
416}
417
418
419
420
421
422
423static void mei_txe_intr_disable(struct mei_device *dev)
424{
425 struct mei_txe_hw *hw = to_txe_hw(dev);
426
427 mei_txe_br_reg_write(hw, HHIER_REG, 0);
428 mei_txe_br_reg_write(hw, HIER_REG, 0);
429}
430
431
432
433
434
435static void mei_txe_intr_enable(struct mei_device *dev)
436{
437 struct mei_txe_hw *hw = to_txe_hw(dev);
438
439 mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
440 mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
441}
442
443
444
445
446
447
448static void mei_txe_synchronize_irq(struct mei_device *dev)
449{
450 struct pci_dev *pdev = to_pci_dev(dev->dev);
451
452 synchronize_irq(pdev->irq);
453}
454
455
456
457
458
459
460
461
462
463
464
465
466static bool mei_txe_pending_interrupts(struct mei_device *dev)
467{
468
469 struct mei_txe_hw *hw = to_txe_hw(dev);
470 bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
471 TXE_INTR_ALIVENESS |
472 TXE_INTR_IN_READY |
473 TXE_INTR_OUT_DB));
474
475 if (ret) {
476 dev_dbg(dev->dev,
477 "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
478 !!(hw->intr_cause & TXE_INTR_IN_READY),
479 !!(hw->intr_cause & TXE_INTR_READINESS),
480 !!(hw->intr_cause & TXE_INTR_ALIVENESS),
481 !!(hw->intr_cause & TXE_INTR_OUT_DB));
482 }
483 return ret;
484}
485
486
487
488
489
490
491
492
493
494static void mei_txe_input_payload_write(struct mei_device *dev,
495 unsigned long idx, u32 value)
496{
497 struct mei_txe_hw *hw = to_txe_hw(dev);
498
499 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
500 (idx * sizeof(u32)), value);
501}
502
503
504
505
506
507
508
509
510
511
512static u32 mei_txe_out_data_read(const struct mei_device *dev,
513 unsigned long idx)
514{
515 struct mei_txe_hw *hw = to_txe_hw(dev);
516
517 return mei_txe_br_reg_read(hw,
518 BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
519}
520
521
522
523
524
525
526
527
528static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
529{
530 struct mei_txe_hw *hw = to_txe_hw(dev);
531
532 mei_txe_br_reg_write(hw,
533 SICR_HOST_IPC_READINESS_REQ_REG,
534 SICR_HOST_IPC_READINESS_HOST_RDY);
535}
536
537
538
539
540
541
542static void mei_txe_readiness_clear(struct mei_device *dev)
543{
544 struct mei_txe_hw *hw = to_txe_hw(dev);
545
546 mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
547 SICR_HOST_IPC_READINESS_RDY_CLR);
548}
549
550
551
552
553
554
555
556
557static u32 mei_txe_readiness_get(struct mei_device *dev)
558{
559 struct mei_txe_hw *hw = to_txe_hw(dev);
560
561 return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
562}
563
564
565
566
567
568
569
570
571
572
573static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
574{
575 return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
576}
577
578
579
580
581
582
583
584
585static bool mei_txe_hw_is_ready(struct mei_device *dev)
586{
587 u32 readiness = mei_txe_readiness_get(dev);
588
589 return mei_txe_readiness_is_sec_rdy(readiness);
590}
591
592
593
594
595
596
597
598
599static inline bool mei_txe_host_is_ready(struct mei_device *dev)
600{
601 struct mei_txe_hw *hw = to_txe_hw(dev);
602 u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
603
604 return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
605}
606
607
608
609
610
611
612
613
614static int mei_txe_readiness_wait(struct mei_device *dev)
615{
616 if (mei_txe_hw_is_ready(dev))
617 return 0;
618
619 mutex_unlock(&dev->device_lock);
620 wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
621 msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
622 mutex_lock(&dev->device_lock);
623 if (!dev->recvd_hw_ready) {
624 dev_err(dev->dev, "wait for readiness failed\n");
625 return -ETIME;
626 }
627
628 dev->recvd_hw_ready = false;
629 return 0;
630}
631
632static const struct mei_fw_status mei_txe_fw_sts = {
633 .count = 2,
634 .status[0] = PCI_CFG_TXE_FW_STS0,
635 .status[1] = PCI_CFG_TXE_FW_STS1
636};
637
638
639
640
641
642
643
644
645
646static int mei_txe_fw_status(struct mei_device *dev,
647 struct mei_fw_status *fw_status)
648{
649 const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
650 struct pci_dev *pdev = to_pci_dev(dev->dev);
651 int ret;
652 int i;
653
654 if (!fw_status)
655 return -EINVAL;
656
657 fw_status->count = fw_src->count;
658 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
659 ret = pci_read_config_dword(pdev, fw_src->status[i],
660 &fw_status->status[i]);
661 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
662 fw_src->status[i],
663 fw_status->status[i]);
664 if (ret)
665 return ret;
666 }
667
668 return 0;
669}
670
671
672
673
674
675
676
677
678
679static void mei_txe_hw_config(struct mei_device *dev)
680{
681
682 struct mei_txe_hw *hw = to_txe_hw(dev);
683
684
685 dev->hbuf_depth = PAYLOAD_SIZE / 4;
686
687 hw->aliveness = mei_txe_aliveness_get(dev);
688 hw->readiness = mei_txe_readiness_get(dev);
689
690 dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
691 hw->aliveness, hw->readiness);
692}
693
694
695
696
697
698
699
700
701
702
703
704
705static int mei_txe_write(struct mei_device *dev,
706 struct mei_msg_hdr *header,
707 const unsigned char *buf)
708{
709 struct mei_txe_hw *hw = to_txe_hw(dev);
710 unsigned long rem;
711 unsigned long length;
712 int slots = dev->hbuf_depth;
713 u32 *reg_buf = (u32 *)buf;
714 u32 dw_cnt;
715 int i;
716
717 if (WARN_ON(!header || !buf))
718 return -EINVAL;
719
720 length = header->length;
721
722 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
723
724 dw_cnt = mei_data2slots(length);
725 if (dw_cnt > slots)
726 return -EMSGSIZE;
727
728 if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
729 return -EAGAIN;
730
731
732 mei_txe_input_ready_interrupt_enable(dev);
733
734 if (!mei_txe_is_input_ready(dev)) {
735 char fw_sts_str[MEI_FW_STATUS_STR_SZ];
736
737 mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
738 dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
739 return -EAGAIN;
740 }
741
742 mei_txe_input_payload_write(dev, 0, *((u32 *)header));
743
744 for (i = 0; i < length / 4; i++)
745 mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
746
747 rem = length & 0x3;
748 if (rem > 0) {
749 u32 reg = 0;
750
751 memcpy(®, &buf[length - rem], rem);
752 mei_txe_input_payload_write(dev, i + 1, reg);
753 }
754
755
756 hw->slots = 0;
757
758
759 mei_txe_input_doorbell_set(hw);
760
761 return 0;
762}
763
764
765
766
767
768
769
770
771static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
772{
773 return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
774}
775
776
777
778
779
780
781
782
783static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
784{
785 struct mei_txe_hw *hw = to_txe_hw(dev);
786
787 return hw->slots;
788}
789
790
791
792
793
794
795
796
797static int mei_txe_count_full_read_slots(struct mei_device *dev)
798{
799
800 return PAYLOAD_SIZE / 4;
801}
802
803
804
805
806
807
808
809
810
811static u32 mei_txe_read_hdr(const struct mei_device *dev)
812{
813 return mei_txe_out_data_read(dev, 0);
814}
815
816
817
818
819
820
821
822
823
824static int mei_txe_read(struct mei_device *dev,
825 unsigned char *buf, unsigned long len)
826{
827
828 struct mei_txe_hw *hw = to_txe_hw(dev);
829 u32 *reg_buf, reg;
830 u32 rem;
831 u32 i;
832
833 if (WARN_ON(!buf || !len))
834 return -EINVAL;
835
836 reg_buf = (u32 *)buf;
837 rem = len & 0x3;
838
839 dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
840 len, mei_txe_out_data_read(dev, 0));
841
842 for (i = 0; i < len / 4; i++) {
843
844 reg = mei_txe_out_data_read(dev, i + 1);
845 dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
846 *reg_buf++ = reg;
847 }
848
849 if (rem) {
850 reg = mei_txe_out_data_read(dev, i + 1);
851 memcpy(reg_buf, ®, rem);
852 }
853
854 mei_txe_output_ready_set(hw);
855 return 0;
856}
857
858
859
860
861
862
863
864
865
866static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
867{
868 struct mei_txe_hw *hw = to_txe_hw(dev);
869
870 u32 aliveness_req;
871
872
873
874
875 (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
876
877 aliveness_req = mei_txe_aliveness_req_get(dev);
878 hw->aliveness = mei_txe_aliveness_get(dev);
879
880
881 mei_txe_intr_disable(dev);
882
883
884
885
886
887
888 if (aliveness_req != hw->aliveness)
889 if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
890 dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
891 return -EIO;
892 }
893
894
895
896
897 if (aliveness_req) {
898 mei_txe_aliveness_set(dev, 0);
899 if (mei_txe_aliveness_poll(dev, 0) < 0) {
900 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
901 return -EIO;
902 }
903 }
904
905
906
907
908 mei_txe_readiness_clear(dev);
909
910 return 0;
911}
912
913
914
915
916
917
918
919
920static int mei_txe_hw_start(struct mei_device *dev)
921{
922 struct mei_txe_hw *hw = to_txe_hw(dev);
923 int ret;
924
925 u32 hisr;
926
927
928 mei_txe_intr_enable(dev);
929
930 ret = mei_txe_readiness_wait(dev);
931 if (ret < 0) {
932 dev_err(dev->dev, "waiting for readiness failed\n");
933 return ret;
934 }
935
936
937
938
939 hisr = mei_txe_br_reg_read(hw, HISR_REG);
940 if (hisr & HISR_INT_2_STS)
941 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
942
943
944 clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
945
946 ret = mei_txe_aliveness_set_sync(dev, 1);
947 if (ret < 0) {
948 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
949 return ret;
950 }
951
952 pm_runtime_set_active(dev->dev);
953
954
955
956
957 mei_txe_input_ready_interrupt_enable(dev);
958
959
960
961 mei_txe_output_ready_set(hw);
962
963
964
965 mei_txe_readiness_set_host_rdy(dev);
966
967 return 0;
968}
969
970
971
972
973
974
975
976
977
978
979static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
980{
981 struct mei_txe_hw *hw = to_txe_hw(dev);
982 u32 hisr;
983 u32 hhisr;
984 u32 ipc_isr;
985 u32 aliveness;
986 bool generated;
987
988
989 hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
990 generated = (hhisr & IPC_HHIER_MSK);
991 if (!generated)
992 goto out;
993
994 hisr = mei_txe_br_reg_read(hw, HISR_REG);
995
996 aliveness = mei_txe_aliveness_get(dev);
997 if (hhisr & IPC_HHIER_SEC && aliveness) {
998 ipc_isr = mei_txe_sec_reg_read_silent(hw,
999 SEC_IPC_HOST_INT_STATUS_REG);
1000 } else {
1001 ipc_isr = 0;
1002 hhisr &= ~IPC_HHIER_SEC;
1003 }
1004
1005 generated = generated ||
1006 (hisr & HISR_INT_STS_MSK) ||
1007 (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
1008
1009 if (generated && do_ack) {
1010
1011 hw->intr_cause |= hisr & HISR_INT_STS_MSK;
1012 if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
1013 hw->intr_cause |= TXE_INTR_IN_READY;
1014
1015
1016 mei_txe_intr_disable(dev);
1017
1018
1019 mei_txe_sec_reg_write_silent(hw,
1020 SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
1021 mei_txe_br_reg_write(hw, HISR_REG, hisr);
1022 mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
1023 }
1024
1025out:
1026 return generated;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
1039{
1040 struct mei_device *dev = dev_id;
1041
1042 if (mei_txe_check_and_ack_intrs(dev, true))
1043 return IRQ_WAKE_THREAD;
1044 return IRQ_NONE;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1057{
1058 struct mei_device *dev = (struct mei_device *) dev_id;
1059 struct mei_txe_hw *hw = to_txe_hw(dev);
1060 struct list_head cmpl_list;
1061 s32 slots;
1062 int rets = 0;
1063
1064 dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
1065 mei_txe_br_reg_read(hw, HHISR_REG),
1066 mei_txe_br_reg_read(hw, HISR_REG),
1067 mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
1068
1069
1070
1071 mutex_lock(&dev->device_lock);
1072 INIT_LIST_HEAD(&cmpl_list);
1073
1074 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
1075 mei_txe_check_and_ack_intrs(dev, true);
1076
1077
1078 mei_txe_pending_interrupts(dev);
1079
1080 hw->aliveness = mei_txe_aliveness_get(dev);
1081 hw->readiness = mei_txe_readiness_get(dev);
1082
1083
1084
1085
1086
1087 if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
1088 dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
1089
1090
1091 if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
1092 dev_dbg(dev->dev, "we need to start the dev.\n");
1093 dev->recvd_hw_ready = true;
1094 } else {
1095 dev->recvd_hw_ready = false;
1096 if (dev->dev_state != MEI_DEV_RESETTING) {
1097
1098 dev_warn(dev->dev, "FW not ready: resetting.\n");
1099 schedule_work(&dev->reset_work);
1100 goto end;
1101
1102 }
1103 }
1104 wake_up(&dev->wait_hw_ready);
1105 }
1106
1107
1108
1109
1110
1111
1112
1113 if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
1114
1115 dev_dbg(dev->dev,
1116 "Aliveness Interrupt: Status: %d\n", hw->aliveness);
1117 dev->pg_event = MEI_PG_EVENT_RECEIVED;
1118 if (waitqueue_active(&hw->wait_aliveness_resp))
1119 wake_up(&hw->wait_aliveness_resp);
1120 }
1121
1122
1123
1124
1125
1126 slots = mei_count_full_read_slots(dev);
1127 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
1128
1129 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1130 if (rets &&
1131 (dev->dev_state != MEI_DEV_RESETTING &&
1132 dev->dev_state != MEI_DEV_POWER_DOWN)) {
1133 dev_err(dev->dev,
1134 "mei_irq_read_handler ret = %d.\n", rets);
1135
1136 schedule_work(&dev->reset_work);
1137 goto end;
1138 }
1139 }
1140
1141 if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
1142 dev->hbuf_is_ready = true;
1143 hw->slots = dev->hbuf_depth;
1144 }
1145
1146 if (hw->aliveness && dev->hbuf_is_ready) {
1147
1148 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1149 rets = mei_irq_write_handler(dev, &cmpl_list);
1150 if (rets && rets != -EMSGSIZE)
1151 dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
1152 rets);
1153 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1154 }
1155
1156 mei_irq_compl_handler(dev, &cmpl_list);
1157
1158end:
1159 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1160
1161 mutex_unlock(&dev->device_lock);
1162
1163 mei_enable_interrupts(dev);
1164 return IRQ_HANDLED;
1165}
1166
1167static const struct mei_hw_ops mei_txe_hw_ops = {
1168
1169 .host_is_ready = mei_txe_host_is_ready,
1170
1171 .fw_status = mei_txe_fw_status,
1172 .pg_state = mei_txe_pg_state,
1173
1174 .hw_is_ready = mei_txe_hw_is_ready,
1175 .hw_reset = mei_txe_hw_reset,
1176 .hw_config = mei_txe_hw_config,
1177 .hw_start = mei_txe_hw_start,
1178
1179 .pg_in_transition = mei_txe_pg_in_transition,
1180 .pg_is_enabled = mei_txe_pg_is_enabled,
1181
1182 .intr_clear = mei_txe_intr_clear,
1183 .intr_enable = mei_txe_intr_enable,
1184 .intr_disable = mei_txe_intr_disable,
1185 .synchronize_irq = mei_txe_synchronize_irq,
1186
1187 .hbuf_free_slots = mei_txe_hbuf_empty_slots,
1188 .hbuf_is_ready = mei_txe_is_input_ready,
1189 .hbuf_max_len = mei_txe_hbuf_max_len,
1190
1191 .write = mei_txe_write,
1192
1193 .rdbuf_full_slots = mei_txe_count_full_read_slots,
1194 .read_hdr = mei_txe_read_hdr,
1195
1196 .read = mei_txe_read,
1197
1198};
1199
1200
1201
1202
1203
1204
1205
1206
1207struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
1208{
1209 struct mei_device *dev;
1210 struct mei_txe_hw *hw;
1211
1212 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1213 sizeof(struct mei_txe_hw), GFP_KERNEL);
1214 if (!dev)
1215 return NULL;
1216
1217 mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
1218
1219 hw = to_txe_hw(dev);
1220
1221 init_waitqueue_head(&hw->wait_aliveness_resp);
1222
1223 return dev;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
1236{
1237 struct mei_txe_hw *hw = to_txe_hw(dev);
1238
1239 u32 lo32 = lower_32_bits(addr);
1240 u32 hi32 = upper_32_bits(addr);
1241 u32 ctrl;
1242
1243
1244 if (hi32 & ~0xF)
1245 return -EINVAL;
1246
1247
1248 if (lo32 & 0xF)
1249 return -EINVAL;
1250
1251
1252 if (range & 0x4)
1253 return -EINVAL;
1254
1255
1256 if (range > SATT_RANGE_MAX)
1257 return -EINVAL;
1258
1259 ctrl = SATT2_CTRL_VALID_MSK;
1260 ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
1261
1262 mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
1263 mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
1264 mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
1265 dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
1266 range, lo32, ctrl);
1267
1268 return 0;
1269}
1270