1
2
3
4
5
6
7#include <linux/pci.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/delay.h>
11#include <linux/kthread.h>
12#include <linux/interrupt.h>
13#include <linux/pm_runtime.h>
14
15#include <linux/mei.h>
16
17#include "mei_dev.h"
18#include "hw-txe.h"
19#include "client.h"
20#include "hbm.h"
21
22#include "mei-trace.h"
23
24#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
25
26
27
28
29
30
31
32
33
34static inline u32 mei_txe_reg_read(void __iomem *base_addr,
35 unsigned long offset)
36{
37 return ioread32(base_addr + offset);
38}
39
40
41
42
43
44
45
46
47static inline void mei_txe_reg_write(void __iomem *base_addr,
48 unsigned long offset, u32 value)
49{
50 iowrite32(value, base_addr + offset);
51}
52
53
54
55
56
57
58
59
60
61
62
63static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
64 unsigned long offset)
65{
66 return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
67}
68
69
70
71
72
73
74
75
76
77
78
79static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
80 unsigned long offset)
81{
82 WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
83 return mei_txe_sec_reg_read_silent(hw, offset);
84}
85
86
87
88
89
90
91
92
93
94
95static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
96 unsigned long offset, u32 value)
97{
98 mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
99}
100
101
102
103
104
105
106
107
108
109
110static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
111 unsigned long offset, u32 value)
112{
113 WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
114 mei_txe_sec_reg_write_silent(hw, offset, value);
115}
116
117
118
119
120
121
122
123
124static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
125 unsigned long offset)
126{
127 return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
128}
129
130
131
132
133
134
135
136
137static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
138 unsigned long offset, u32 value)
139{
140 mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
158{
159
160 struct mei_txe_hw *hw = to_txe_hw(dev);
161 bool do_req = hw->aliveness != req;
162
163 dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
164 hw->aliveness, req);
165 if (do_req) {
166 dev->pg_event = MEI_PG_EVENT_WAIT;
167 mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
168 }
169 return do_req;
170}
171
172
173
174
175
176
177
178
179
180
181
182
183static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
184{
185 struct mei_txe_hw *hw = to_txe_hw(dev);
186 u32 reg;
187
188 reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
189 return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
190}
191
192
193
194
195
196
197
198
199
200static u32 mei_txe_aliveness_get(struct mei_device *dev)
201{
202 struct mei_txe_hw *hw = to_txe_hw(dev);
203 u32 reg;
204
205 reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
206 return reg & HICR_HOST_ALIVENESS_RESP_ACK;
207}
208
209
210
211
212
213
214
215
216
217
218
219static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
220{
221 struct mei_txe_hw *hw = to_txe_hw(dev);
222 ktime_t stop, start;
223
224 start = ktime_get();
225 stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
226 do {
227 hw->aliveness = mei_txe_aliveness_get(dev);
228 if (hw->aliveness == expected) {
229 dev->pg_event = MEI_PG_EVENT_IDLE;
230 dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
231 ktime_to_us(ktime_sub(ktime_get(), start)));
232 return 0;
233 }
234 usleep_range(20, 50);
235 } while (ktime_before(ktime_get(), stop));
236
237 dev->pg_event = MEI_PG_EVENT_IDLE;
238 dev_err(dev->dev, "aliveness timed out\n");
239 return -ETIME;
240}
241
242
243
244
245
246
247
248
249
250
251
252static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
253{
254 struct mei_txe_hw *hw = to_txe_hw(dev);
255 const unsigned long timeout =
256 msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
257 long err;
258 int ret;
259
260 hw->aliveness = mei_txe_aliveness_get(dev);
261 if (hw->aliveness == expected)
262 return 0;
263
264 mutex_unlock(&dev->device_lock);
265 err = wait_event_timeout(hw->wait_aliveness_resp,
266 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
267 mutex_lock(&dev->device_lock);
268
269 hw->aliveness = mei_txe_aliveness_get(dev);
270 ret = hw->aliveness == expected ? 0 : -ETIME;
271
272 if (ret)
273 dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
274 err, hw->aliveness, dev->pg_event);
275 else
276 dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
277 jiffies_to_msecs(timeout - err),
278 hw->aliveness, dev->pg_event);
279
280 dev->pg_event = MEI_PG_EVENT_IDLE;
281 return ret;
282}
283
284
285
286
287
288
289
290
291
292int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
293{
294 if (mei_txe_aliveness_set(dev, req))
295 return mei_txe_aliveness_wait(dev, req);
296 return 0;
297}
298
299
300
301
302
303
304
305
306static bool mei_txe_pg_in_transition(struct mei_device *dev)
307{
308 return dev->pg_event == MEI_PG_EVENT_WAIT;
309}
310
311
312
313
314
315
316
317
318static bool mei_txe_pg_is_enabled(struct mei_device *dev)
319{
320 return true;
321}
322
323
324
325
326
327
328
329
330
331static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
332{
333 struct mei_txe_hw *hw = to_txe_hw(dev);
334
335 return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
336}
337
338
339
340
341
342
343static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
344{
345 struct mei_txe_hw *hw = to_txe_hw(dev);
346 u32 hintmsk;
347
348 hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
349 hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
350 mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
351}
352
353
354
355
356
357
358
359static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
360{
361
362 clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
363 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
364}
365
366
367
368
369
370
371static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
372{
373 mei_txe_br_reg_write(hw,
374 SICR_SEC_IPC_OUTPUT_STATUS_REG,
375 SEC_IPC_OUTPUT_STATUS_RDY);
376}
377
378
379
380
381
382
383
384
385static bool mei_txe_is_input_ready(struct mei_device *dev)
386{
387 struct mei_txe_hw *hw = to_txe_hw(dev);
388 u32 status;
389
390 status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
391 return !!(SEC_IPC_INPUT_STATUS_RDY & status);
392}
393
394
395
396
397
398
399static inline void mei_txe_intr_clear(struct mei_device *dev)
400{
401 struct mei_txe_hw *hw = to_txe_hw(dev);
402
403 mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
404 SEC_IPC_HOST_INT_STATUS_PENDING);
405 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
406 mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
407}
408
409
410
411
412
413
414static void mei_txe_intr_disable(struct mei_device *dev)
415{
416 struct mei_txe_hw *hw = to_txe_hw(dev);
417
418 mei_txe_br_reg_write(hw, HHIER_REG, 0);
419 mei_txe_br_reg_write(hw, HIER_REG, 0);
420}
421
422
423
424
425
426static void mei_txe_intr_enable(struct mei_device *dev)
427{
428 struct mei_txe_hw *hw = to_txe_hw(dev);
429
430 mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
431 mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
432}
433
434
435
436
437
438
439static void mei_txe_synchronize_irq(struct mei_device *dev)
440{
441 struct pci_dev *pdev = to_pci_dev(dev->dev);
442
443 synchronize_irq(pdev->irq);
444}
445
446
447
448
449
450
451
452
453
454
455
456
457static bool mei_txe_pending_interrupts(struct mei_device *dev)
458{
459
460 struct mei_txe_hw *hw = to_txe_hw(dev);
461 bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
462 TXE_INTR_ALIVENESS |
463 TXE_INTR_IN_READY |
464 TXE_INTR_OUT_DB));
465
466 if (ret) {
467 dev_dbg(dev->dev,
468 "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
469 !!(hw->intr_cause & TXE_INTR_IN_READY),
470 !!(hw->intr_cause & TXE_INTR_READINESS),
471 !!(hw->intr_cause & TXE_INTR_ALIVENESS),
472 !!(hw->intr_cause & TXE_INTR_OUT_DB));
473 }
474 return ret;
475}
476
477
478
479
480
481
482
483
484
485static void mei_txe_input_payload_write(struct mei_device *dev,
486 unsigned long idx, u32 value)
487{
488 struct mei_txe_hw *hw = to_txe_hw(dev);
489
490 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
491 (idx * sizeof(u32)), value);
492}
493
494
495
496
497
498
499
500
501
502
503static u32 mei_txe_out_data_read(const struct mei_device *dev,
504 unsigned long idx)
505{
506 struct mei_txe_hw *hw = to_txe_hw(dev);
507
508 return mei_txe_br_reg_read(hw,
509 BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
510}
511
512
513
514
515
516
517
518
519static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
520{
521 struct mei_txe_hw *hw = to_txe_hw(dev);
522
523 mei_txe_br_reg_write(hw,
524 SICR_HOST_IPC_READINESS_REQ_REG,
525 SICR_HOST_IPC_READINESS_HOST_RDY);
526}
527
528
529
530
531
532
533static void mei_txe_readiness_clear(struct mei_device *dev)
534{
535 struct mei_txe_hw *hw = to_txe_hw(dev);
536
537 mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
538 SICR_HOST_IPC_READINESS_RDY_CLR);
539}
540
541
542
543
544
545
546
547
548static u32 mei_txe_readiness_get(struct mei_device *dev)
549{
550 struct mei_txe_hw *hw = to_txe_hw(dev);
551
552 return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
553}
554
555
556
557
558
559
560
561
562
563
564static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
565{
566 return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
567}
568
569
570
571
572
573
574
575
576static bool mei_txe_hw_is_ready(struct mei_device *dev)
577{
578 u32 readiness = mei_txe_readiness_get(dev);
579
580 return mei_txe_readiness_is_sec_rdy(readiness);
581}
582
583
584
585
586
587
588
589
590static inline bool mei_txe_host_is_ready(struct mei_device *dev)
591{
592 struct mei_txe_hw *hw = to_txe_hw(dev);
593 u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
594
595 return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
596}
597
598
599
600
601
602
603
604
605static int mei_txe_readiness_wait(struct mei_device *dev)
606{
607 if (mei_txe_hw_is_ready(dev))
608 return 0;
609
610 mutex_unlock(&dev->device_lock);
611 wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
612 msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
613 mutex_lock(&dev->device_lock);
614 if (!dev->recvd_hw_ready) {
615 dev_err(dev->dev, "wait for readiness failed\n");
616 return -ETIME;
617 }
618
619 dev->recvd_hw_ready = false;
620 return 0;
621}
622
623static const struct mei_fw_status mei_txe_fw_sts = {
624 .count = 2,
625 .status[0] = PCI_CFG_TXE_FW_STS0,
626 .status[1] = PCI_CFG_TXE_FW_STS1
627};
628
629
630
631
632
633
634
635
636
637static int mei_txe_fw_status(struct mei_device *dev,
638 struct mei_fw_status *fw_status)
639{
640 const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
641 struct pci_dev *pdev = to_pci_dev(dev->dev);
642 int ret;
643 int i;
644
645 if (!fw_status)
646 return -EINVAL;
647
648 fw_status->count = fw_src->count;
649 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
650 ret = pci_read_config_dword(pdev, fw_src->status[i],
651 &fw_status->status[i]);
652 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
653 fw_src->status[i],
654 fw_status->status[i]);
655 if (ret)
656 return ret;
657 }
658
659 return 0;
660}
661
662
663
664
665
666
667
668
669
670
671
672static int mei_txe_hw_config(struct mei_device *dev)
673{
674
675 struct mei_txe_hw *hw = to_txe_hw(dev);
676
677 hw->aliveness = mei_txe_aliveness_get(dev);
678 hw->readiness = mei_txe_readiness_get(dev);
679
680 dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
681 hw->aliveness, hw->readiness);
682
683 return 0;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697static int mei_txe_write(struct mei_device *dev,
698 const void *hdr, size_t hdr_len,
699 const void *data, size_t data_len)
700{
701 struct mei_txe_hw *hw = to_txe_hw(dev);
702 unsigned long rem;
703 const u32 *reg_buf;
704 u32 slots = TXE_HBUF_DEPTH;
705 u32 dw_cnt;
706 unsigned long i, j;
707
708 if (WARN_ON(!hdr || !data || hdr_len & 0x3))
709 return -EINVAL;
710
711 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
712
713 dw_cnt = mei_data2slots(hdr_len + data_len);
714 if (dw_cnt > slots)
715 return -EMSGSIZE;
716
717 if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
718 return -EAGAIN;
719
720
721 mei_txe_input_ready_interrupt_enable(dev);
722
723 if (!mei_txe_is_input_ready(dev)) {
724 char fw_sts_str[MEI_FW_STATUS_STR_SZ];
725
726 mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
727 dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
728 return -EAGAIN;
729 }
730
731 reg_buf = hdr;
732 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
733 mei_txe_input_payload_write(dev, i, reg_buf[i]);
734
735 reg_buf = data;
736 for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
737 mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
738
739 rem = data_len & 0x3;
740 if (rem > 0) {
741 u32 reg = 0;
742
743 memcpy(®, (const u8 *)data + data_len - rem, rem);
744 mei_txe_input_payload_write(dev, i + j, reg);
745 }
746
747
748 hw->slots = 0;
749
750
751 mei_txe_input_doorbell_set(hw);
752
753 return 0;
754}
755
756
757
758
759
760
761
762
763static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
764{
765 return TXE_HBUF_DEPTH;
766}
767
768
769
770
771
772
773
774
775static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
776{
777 struct mei_txe_hw *hw = to_txe_hw(dev);
778
779 return hw->slots;
780}
781
782
783
784
785
786
787
788
789static int mei_txe_count_full_read_slots(struct mei_device *dev)
790{
791
792 return TXE_HBUF_DEPTH;
793}
794
795
796
797
798
799
800
801
802
803static u32 mei_txe_read_hdr(const struct mei_device *dev)
804{
805 return mei_txe_out_data_read(dev, 0);
806}
807
808
809
810
811
812
813
814
815
816static int mei_txe_read(struct mei_device *dev,
817 unsigned char *buf, unsigned long len)
818{
819
820 struct mei_txe_hw *hw = to_txe_hw(dev);
821 u32 *reg_buf, reg;
822 u32 rem;
823 u32 i;
824
825 if (WARN_ON(!buf || !len))
826 return -EINVAL;
827
828 reg_buf = (u32 *)buf;
829 rem = len & 0x3;
830
831 dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
832 len, mei_txe_out_data_read(dev, 0));
833
834 for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
835
836 reg = mei_txe_out_data_read(dev, i + 1);
837 dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
838 *reg_buf++ = reg;
839 }
840
841 if (rem) {
842 reg = mei_txe_out_data_read(dev, i + 1);
843 memcpy(reg_buf, ®, rem);
844 }
845
846 mei_txe_output_ready_set(hw);
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
859{
860 struct mei_txe_hw *hw = to_txe_hw(dev);
861
862 u32 aliveness_req;
863
864
865
866
867 (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
868
869 aliveness_req = mei_txe_aliveness_req_get(dev);
870 hw->aliveness = mei_txe_aliveness_get(dev);
871
872
873 mei_txe_intr_disable(dev);
874
875
876
877
878
879
880 if (aliveness_req != hw->aliveness)
881 if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
882 dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
883 return -EIO;
884 }
885
886
887
888
889 if (aliveness_req) {
890 mei_txe_aliveness_set(dev, 0);
891 if (mei_txe_aliveness_poll(dev, 0) < 0) {
892 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
893 return -EIO;
894 }
895 }
896
897
898
899
900 mei_txe_readiness_clear(dev);
901
902 return 0;
903}
904
905
906
907
908
909
910
911
912static int mei_txe_hw_start(struct mei_device *dev)
913{
914 struct mei_txe_hw *hw = to_txe_hw(dev);
915 int ret;
916
917 u32 hisr;
918
919
920 mei_txe_intr_enable(dev);
921
922 ret = mei_txe_readiness_wait(dev);
923 if (ret < 0) {
924 dev_err(dev->dev, "waiting for readiness failed\n");
925 return ret;
926 }
927
928
929
930
931 hisr = mei_txe_br_reg_read(hw, HISR_REG);
932 if (hisr & HISR_INT_2_STS)
933 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
934
935
936 clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
937
938 ret = mei_txe_aliveness_set_sync(dev, 1);
939 if (ret < 0) {
940 dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
941 return ret;
942 }
943
944 pm_runtime_set_active(dev->dev);
945
946
947
948
949 mei_txe_input_ready_interrupt_enable(dev);
950
951
952
953 mei_txe_output_ready_set(hw);
954
955
956
957 mei_txe_readiness_set_host_rdy(dev);
958
959 return 0;
960}
961
962
963
964
965
966
967
968
969
970
971static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
972{
973 struct mei_txe_hw *hw = to_txe_hw(dev);
974 u32 hisr;
975 u32 hhisr;
976 u32 ipc_isr;
977 u32 aliveness;
978 bool generated;
979
980
981 hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
982 generated = (hhisr & IPC_HHIER_MSK);
983 if (!generated)
984 goto out;
985
986 hisr = mei_txe_br_reg_read(hw, HISR_REG);
987
988 aliveness = mei_txe_aliveness_get(dev);
989 if (hhisr & IPC_HHIER_SEC && aliveness) {
990 ipc_isr = mei_txe_sec_reg_read_silent(hw,
991 SEC_IPC_HOST_INT_STATUS_REG);
992 } else {
993 ipc_isr = 0;
994 hhisr &= ~IPC_HHIER_SEC;
995 }
996
997 generated = generated ||
998 (hisr & HISR_INT_STS_MSK) ||
999 (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
1000
1001 if (generated && do_ack) {
1002
1003 hw->intr_cause |= hisr & HISR_INT_STS_MSK;
1004 if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
1005 hw->intr_cause |= TXE_INTR_IN_READY;
1006
1007
1008 mei_txe_intr_disable(dev);
1009
1010
1011 mei_txe_sec_reg_write_silent(hw,
1012 SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
1013 mei_txe_br_reg_write(hw, HISR_REG, hisr);
1014 mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
1015 }
1016
1017out:
1018 return generated;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
1031{
1032 struct mei_device *dev = dev_id;
1033
1034 if (mei_txe_check_and_ack_intrs(dev, true))
1035 return IRQ_WAKE_THREAD;
1036 return IRQ_NONE;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1049{
1050 struct mei_device *dev = (struct mei_device *) dev_id;
1051 struct mei_txe_hw *hw = to_txe_hw(dev);
1052 struct list_head cmpl_list;
1053 s32 slots;
1054 int rets = 0;
1055
1056 dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
1057 mei_txe_br_reg_read(hw, HHISR_REG),
1058 mei_txe_br_reg_read(hw, HISR_REG),
1059 mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
1060
1061
1062
1063 mutex_lock(&dev->device_lock);
1064 INIT_LIST_HEAD(&cmpl_list);
1065
1066 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
1067 mei_txe_check_and_ack_intrs(dev, true);
1068
1069
1070 mei_txe_pending_interrupts(dev);
1071
1072 hw->aliveness = mei_txe_aliveness_get(dev);
1073 hw->readiness = mei_txe_readiness_get(dev);
1074
1075
1076
1077
1078
1079 if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
1080 dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
1081
1082
1083 if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
1084 dev_dbg(dev->dev, "we need to start the dev.\n");
1085 dev->recvd_hw_ready = true;
1086 } else {
1087 dev->recvd_hw_ready = false;
1088 if (dev->dev_state != MEI_DEV_RESETTING) {
1089
1090 dev_warn(dev->dev, "FW not ready: resetting.\n");
1091 schedule_work(&dev->reset_work);
1092 goto end;
1093
1094 }
1095 }
1096 wake_up(&dev->wait_hw_ready);
1097 }
1098
1099
1100
1101
1102
1103
1104
1105 if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
1106
1107 dev_dbg(dev->dev,
1108 "Aliveness Interrupt: Status: %d\n", hw->aliveness);
1109 dev->pg_event = MEI_PG_EVENT_RECEIVED;
1110 if (waitqueue_active(&hw->wait_aliveness_resp))
1111 wake_up(&hw->wait_aliveness_resp);
1112 }
1113
1114
1115
1116
1117
1118 slots = mei_count_full_read_slots(dev);
1119 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
1120
1121 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1122 if (rets &&
1123 (dev->dev_state != MEI_DEV_RESETTING &&
1124 dev->dev_state != MEI_DEV_POWER_DOWN)) {
1125 dev_err(dev->dev,
1126 "mei_irq_read_handler ret = %d.\n", rets);
1127
1128 schedule_work(&dev->reset_work);
1129 goto end;
1130 }
1131 }
1132
1133 if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
1134 dev->hbuf_is_ready = true;
1135 hw->slots = TXE_HBUF_DEPTH;
1136 }
1137
1138 if (hw->aliveness && dev->hbuf_is_ready) {
1139
1140 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1141 rets = mei_irq_write_handler(dev, &cmpl_list);
1142 if (rets && rets != -EMSGSIZE)
1143 dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
1144 rets);
1145 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1146 }
1147
1148 mei_irq_compl_handler(dev, &cmpl_list);
1149
1150end:
1151 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1152
1153 mutex_unlock(&dev->device_lock);
1154
1155 mei_enable_interrupts(dev);
1156 return IRQ_HANDLED;
1157}
1158
1159static const struct mei_hw_ops mei_txe_hw_ops = {
1160
1161 .host_is_ready = mei_txe_host_is_ready,
1162
1163 .fw_status = mei_txe_fw_status,
1164 .pg_state = mei_txe_pg_state,
1165
1166 .hw_is_ready = mei_txe_hw_is_ready,
1167 .hw_reset = mei_txe_hw_reset,
1168 .hw_config = mei_txe_hw_config,
1169 .hw_start = mei_txe_hw_start,
1170
1171 .pg_in_transition = mei_txe_pg_in_transition,
1172 .pg_is_enabled = mei_txe_pg_is_enabled,
1173
1174 .intr_clear = mei_txe_intr_clear,
1175 .intr_enable = mei_txe_intr_enable,
1176 .intr_disable = mei_txe_intr_disable,
1177 .synchronize_irq = mei_txe_synchronize_irq,
1178
1179 .hbuf_free_slots = mei_txe_hbuf_empty_slots,
1180 .hbuf_is_ready = mei_txe_is_input_ready,
1181 .hbuf_depth = mei_txe_hbuf_depth,
1182
1183 .write = mei_txe_write,
1184
1185 .rdbuf_full_slots = mei_txe_count_full_read_slots,
1186 .read_hdr = mei_txe_read_hdr,
1187
1188 .read = mei_txe_read,
1189
1190};
1191
1192
1193
1194
1195
1196
1197
1198
1199struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
1200{
1201 struct mei_device *dev;
1202 struct mei_txe_hw *hw;
1203
1204 dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1205 if (!dev)
1206 return NULL;
1207
1208 mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
1209
1210 hw = to_txe_hw(dev);
1211
1212 init_waitqueue_head(&hw->wait_aliveness_resp);
1213
1214 return dev;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
1227{
1228 struct mei_txe_hw *hw = to_txe_hw(dev);
1229
1230 u32 lo32 = lower_32_bits(addr);
1231 u32 hi32 = upper_32_bits(addr);
1232 u32 ctrl;
1233
1234
1235 if (hi32 & ~0xF)
1236 return -EINVAL;
1237
1238
1239 if (lo32 & 0xF)
1240 return -EINVAL;
1241
1242
1243 if (range & 0x4)
1244 return -EINVAL;
1245
1246
1247 if (range > SATT_RANGE_MAX)
1248 return -EINVAL;
1249
1250 ctrl = SATT2_CTRL_VALID_MSK;
1251 ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
1252
1253 mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
1254 mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
1255 mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
1256 dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
1257 range, lo32, ctrl);
1258
1259 return 0;
1260}
1261