1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/pci.h>
18
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21#include <linux/pm_runtime.h>
22
23#include "mei_dev.h"
24#include "hbm.h"
25
26#include "hw-me.h"
27#include "hw-me-regs.h"
28
29#include "mei-trace.h"
30
31
32
33
34
35
36
37
38
39static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
40 unsigned long offset)
41{
42 return ioread32(hw->mem_addr + offset);
43}
44
45
46
47
48
49
50
51
52
53static inline void mei_me_reg_write(const struct mei_me_hw *hw,
54 unsigned long offset, u32 value)
55{
56 iowrite32(value, hw->mem_addr + offset);
57}
58
59
60
61
62
63
64
65
66
67static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
68{
69 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
70}
71
72
73
74
75
76
77
78static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
79{
80 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
81}
82
83
84
85
86
87
88
89
90static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
91{
92 u32 reg;
93
94 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
95 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
96
97 return reg;
98}
99
100
101
102
103
104
105
106
107static inline u32 mei_hcsr_read(const struct mei_device *dev)
108{
109 u32 reg;
110
111 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
112 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
113
114 return reg;
115}
116
117
118
119
120
121
122
123static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
124{
125 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
126 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
127}
128
129
130
131
132
133
134
135
136static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
137{
138 reg &= ~H_CSR_IS_MASK;
139 mei_hcsr_write(dev, reg);
140}
141
142
143
144
145
146
147static inline void mei_hcsr_set_hig(struct mei_device *dev)
148{
149 u32 hcsr;
150
151 hcsr = mei_hcsr_read(dev) | H_IG;
152 mei_hcsr_set(dev, hcsr);
153}
154
155
156
157
158
159
160
161
162static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
163{
164 u32 reg;
165
166 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
167 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
168
169 return reg;
170}
171
172
173
174
175
176
177
178static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
179{
180 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
181 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
182}
183
184
185
186
187
188
189
190
191
192static int mei_me_fw_status(struct mei_device *dev,
193 struct mei_fw_status *fw_status)
194{
195 struct pci_dev *pdev = to_pci_dev(dev->dev);
196 struct mei_me_hw *hw = to_me_hw(dev);
197 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
198 int ret;
199 int i;
200
201 if (!fw_status)
202 return -EINVAL;
203
204 fw_status->count = fw_src->count;
205 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
206 ret = pci_read_config_dword(pdev, fw_src->status[i],
207 &fw_status->status[i]);
208 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
209 fw_src->status[i],
210 fw_status->status[i]);
211 if (ret)
212 return ret;
213 }
214
215 return 0;
216}
217
218
219
220
221
222
223static void mei_me_hw_config(struct mei_device *dev)
224{
225 struct pci_dev *pdev = to_pci_dev(dev->dev);
226 struct mei_me_hw *hw = to_me_hw(dev);
227 u32 hcsr, reg;
228
229
230 hcsr = mei_hcsr_read(dev);
231 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
232
233 reg = 0;
234 pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®);
235 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
236 hw->d0i3_supported =
237 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
238
239 hw->pg_state = MEI_PG_OFF;
240 if (hw->d0i3_supported) {
241 reg = mei_me_d0i3c_read(dev);
242 if (reg & H_D0I3C_I3)
243 hw->pg_state = MEI_PG_ON;
244 }
245}
246
247
248
249
250
251
252
253
254
255static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
256{
257 struct mei_me_hw *hw = to_me_hw(dev);
258
259 return hw->pg_state;
260}
261
262static inline u32 me_intr_src(u32 hcsr)
263{
264 return hcsr & H_CSR_IS_MASK;
265}
266
267
268
269
270
271
272
273
274static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
275{
276 hcsr &= ~H_CSR_IE_MASK;
277 mei_hcsr_set(dev, hcsr);
278}
279
280
281
282
283
284
285
286static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
287{
288 if (me_intr_src(hcsr))
289 mei_hcsr_write(dev, hcsr);
290}
291
292
293
294
295
296
297static void mei_me_intr_clear(struct mei_device *dev)
298{
299 u32 hcsr = mei_hcsr_read(dev);
300
301 me_intr_clear(dev, hcsr);
302}
303
304
305
306
307
308static void mei_me_intr_enable(struct mei_device *dev)
309{
310 u32 hcsr = mei_hcsr_read(dev);
311
312 hcsr |= H_CSR_IE_MASK;
313 mei_hcsr_set(dev, hcsr);
314}
315
316
317
318
319
320
321static void mei_me_intr_disable(struct mei_device *dev)
322{
323 u32 hcsr = mei_hcsr_read(dev);
324
325 me_intr_disable(dev, hcsr);
326}
327
328
329
330
331
332
333static void mei_me_synchronize_irq(struct mei_device *dev)
334{
335 struct pci_dev *pdev = to_pci_dev(dev->dev);
336
337 synchronize_irq(pdev->irq);
338}
339
340
341
342
343
344
345static void mei_me_hw_reset_release(struct mei_device *dev)
346{
347 u32 hcsr = mei_hcsr_read(dev);
348
349 hcsr |= H_IG;
350 hcsr &= ~H_RST;
351 mei_hcsr_set(dev, hcsr);
352
353
354 mmiowb();
355}
356
357
358
359
360
361
362static void mei_me_host_set_ready(struct mei_device *dev)
363{
364 u32 hcsr = mei_hcsr_read(dev);
365
366 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
367 mei_hcsr_set(dev, hcsr);
368}
369
370
371
372
373
374
375
376static bool mei_me_host_is_ready(struct mei_device *dev)
377{
378 u32 hcsr = mei_hcsr_read(dev);
379
380 return (hcsr & H_RDY) == H_RDY;
381}
382
383
384
385
386
387
388
389static bool mei_me_hw_is_ready(struct mei_device *dev)
390{
391 u32 mecsr = mei_me_mecsr_read(dev);
392
393 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
394}
395
396
397
398
399
400
401
402static bool mei_me_hw_is_resetting(struct mei_device *dev)
403{
404 u32 mecsr = mei_me_mecsr_read(dev);
405
406 return (mecsr & ME_RST_HRA) == ME_RST_HRA;
407}
408
409
410
411
412
413
414
415
416static int mei_me_hw_ready_wait(struct mei_device *dev)
417{
418 mutex_unlock(&dev->device_lock);
419 wait_event_timeout(dev->wait_hw_ready,
420 dev->recvd_hw_ready,
421 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
422 mutex_lock(&dev->device_lock);
423 if (!dev->recvd_hw_ready) {
424 dev_err(dev->dev, "wait hw ready failed\n");
425 return -ETIME;
426 }
427
428 mei_me_hw_reset_release(dev);
429 dev->recvd_hw_ready = false;
430 return 0;
431}
432
433
434
435
436
437
438
439static int mei_me_hw_start(struct mei_device *dev)
440{
441 int ret = mei_me_hw_ready_wait(dev);
442
443 if (ret)
444 return ret;
445 dev_dbg(dev->dev, "hw is ready\n");
446
447 mei_me_host_set_ready(dev);
448 return ret;
449}
450
451
452
453
454
455
456
457
458
459static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
460{
461 u32 hcsr;
462 char read_ptr, write_ptr;
463
464 hcsr = mei_hcsr_read(dev);
465
466 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
467 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
468
469 return (unsigned char) (write_ptr - read_ptr);
470}
471
472
473
474
475
476
477
478
479static bool mei_me_hbuf_is_empty(struct mei_device *dev)
480{
481 return mei_hbuf_filled_slots(dev) == 0;
482}
483
484
485
486
487
488
489
490
491static int mei_me_hbuf_empty_slots(struct mei_device *dev)
492{
493 unsigned char filled_slots, empty_slots;
494
495 filled_slots = mei_hbuf_filled_slots(dev);
496 empty_slots = dev->hbuf_depth - filled_slots;
497
498
499 if (filled_slots > dev->hbuf_depth)
500 return -EOVERFLOW;
501
502 return empty_slots;
503}
504
505
506
507
508
509
510
511
512static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
513{
514 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
515}
516
517
518
519
520
521
522
523
524
525
526
527static int mei_me_hbuf_write(struct mei_device *dev,
528 struct mei_msg_hdr *header,
529 const unsigned char *buf)
530{
531 unsigned long rem;
532 unsigned long length = header->length;
533 u32 *reg_buf = (u32 *)buf;
534 u32 dw_cnt;
535 int i;
536 int empty_slots;
537
538 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
539
540 empty_slots = mei_hbuf_empty_slots(dev);
541 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
542
543 dw_cnt = mei_data2slots(length);
544 if (empty_slots < 0 || dw_cnt > empty_slots)
545 return -EMSGSIZE;
546
547 mei_me_hcbww_write(dev, *((u32 *) header));
548
549 for (i = 0; i < length / 4; i++)
550 mei_me_hcbww_write(dev, reg_buf[i]);
551
552 rem = length & 0x3;
553 if (rem > 0) {
554 u32 reg = 0;
555
556 memcpy(®, &buf[length - rem], rem);
557 mei_me_hcbww_write(dev, reg);
558 }
559
560 mei_hcsr_set_hig(dev);
561 if (!mei_me_hw_is_ready(dev))
562 return -EIO;
563
564 return 0;
565}
566
567
568
569
570
571
572
573
574static int mei_me_count_full_read_slots(struct mei_device *dev)
575{
576 u32 me_csr;
577 char read_ptr, write_ptr;
578 unsigned char buffer_depth, filled_slots;
579
580 me_csr = mei_me_mecsr_read(dev);
581 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
582 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
583 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
584 filled_slots = (unsigned char) (write_ptr - read_ptr);
585
586
587 if (filled_slots > buffer_depth)
588 return -EOVERFLOW;
589
590 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
591 return (int)filled_slots;
592}
593
594
595
596
597
598
599
600
601
602
603static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
604 unsigned long buffer_length)
605{
606 u32 *reg_buf = (u32 *)buffer;
607
608 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
609 *reg_buf++ = mei_me_mecbrw_read(dev);
610
611 if (buffer_length > 0) {
612 u32 reg = mei_me_mecbrw_read(dev);
613
614 memcpy(reg_buf, ®, buffer_length);
615 }
616
617 mei_hcsr_set_hig(dev);
618 return 0;
619}
620
621
622
623
624
625
626static void mei_me_pg_set(struct mei_device *dev)
627{
628 struct mei_me_hw *hw = to_me_hw(dev);
629 u32 reg;
630
631 reg = mei_me_reg_read(hw, H_HPG_CSR);
632 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
633
634 reg |= H_HPG_CSR_PGI;
635
636 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
637 mei_me_reg_write(hw, H_HPG_CSR, reg);
638}
639
640
641
642
643
644
645static void mei_me_pg_unset(struct mei_device *dev)
646{
647 struct mei_me_hw *hw = to_me_hw(dev);
648 u32 reg;
649
650 reg = mei_me_reg_read(hw, H_HPG_CSR);
651 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
652
653 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
654
655 reg |= H_HPG_CSR_PGIHEXR;
656
657 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
658 mei_me_reg_write(hw, H_HPG_CSR, reg);
659}
660
661
662
663
664
665
666
667
668static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
669{
670 struct mei_me_hw *hw = to_me_hw(dev);
671 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
672 int ret;
673
674 dev->pg_event = MEI_PG_EVENT_WAIT;
675
676 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
677 if (ret)
678 return ret;
679
680 mutex_unlock(&dev->device_lock);
681 wait_event_timeout(dev->wait_pg,
682 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
683 mutex_lock(&dev->device_lock);
684
685 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
686 mei_me_pg_set(dev);
687 ret = 0;
688 } else {
689 ret = -ETIME;
690 }
691
692 dev->pg_event = MEI_PG_EVENT_IDLE;
693 hw->pg_state = MEI_PG_ON;
694
695 return ret;
696}
697
698
699
700
701
702
703
704
705static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
706{
707 struct mei_me_hw *hw = to_me_hw(dev);
708 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
709 int ret;
710
711 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
712 goto reply;
713
714 dev->pg_event = MEI_PG_EVENT_WAIT;
715
716 mei_me_pg_unset(dev);
717
718 mutex_unlock(&dev->device_lock);
719 wait_event_timeout(dev->wait_pg,
720 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
721 mutex_lock(&dev->device_lock);
722
723reply:
724 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
725 ret = -ETIME;
726 goto out;
727 }
728
729 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
730 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
731 if (ret)
732 return ret;
733
734 mutex_unlock(&dev->device_lock);
735 wait_event_timeout(dev->wait_pg,
736 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
737 mutex_lock(&dev->device_lock);
738
739 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
740 ret = 0;
741 else
742 ret = -ETIME;
743
744out:
745 dev->pg_event = MEI_PG_EVENT_IDLE;
746 hw->pg_state = MEI_PG_OFF;
747
748 return ret;
749}
750
751
752
753
754
755
756
757
758static bool mei_me_pg_in_transition(struct mei_device *dev)
759{
760 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
761 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
762}
763
764
765
766
767
768
769
770
771static bool mei_me_pg_is_enabled(struct mei_device *dev)
772{
773 struct mei_me_hw *hw = to_me_hw(dev);
774 u32 reg = mei_me_mecsr_read(dev);
775
776 if (hw->d0i3_supported)
777 return true;
778
779 if ((reg & ME_PGIC_HRA) == 0)
780 goto notsupported;
781
782 if (!dev->hbm_f_pg_supported)
783 goto notsupported;
784
785 return true;
786
787notsupported:
788 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
789 hw->d0i3_supported,
790 !!(reg & ME_PGIC_HRA),
791 dev->version.major_version,
792 dev->version.minor_version,
793 HBM_MAJOR_VERSION_PGI,
794 HBM_MINOR_VERSION_PGI);
795
796 return false;
797}
798
799
800
801
802
803
804
805
806
807static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
808{
809 u32 reg = mei_me_d0i3c_read(dev);
810
811 reg |= H_D0I3C_I3;
812 if (intr)
813 reg |= H_D0I3C_IR;
814 else
815 reg &= ~H_D0I3C_IR;
816 mei_me_d0i3c_write(dev, reg);
817
818 reg = mei_me_d0i3c_read(dev);
819 return reg;
820}
821
822
823
824
825
826
827
828
829static u32 mei_me_d0i3_unset(struct mei_device *dev)
830{
831 u32 reg = mei_me_d0i3c_read(dev);
832
833 reg &= ~H_D0I3C_I3;
834 reg |= H_D0I3C_IR;
835 mei_me_d0i3c_write(dev, reg);
836
837 reg = mei_me_d0i3c_read(dev);
838 return reg;
839}
840
841
842
843
844
845
846
847
848static int mei_me_d0i3_enter_sync(struct mei_device *dev)
849{
850 struct mei_me_hw *hw = to_me_hw(dev);
851 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
852 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
853 int ret;
854 u32 reg;
855
856 reg = mei_me_d0i3c_read(dev);
857 if (reg & H_D0I3C_I3) {
858
859 dev_dbg(dev->dev, "d0i3 set not needed\n");
860 ret = 0;
861 goto on;
862 }
863
864
865 dev->pg_event = MEI_PG_EVENT_WAIT;
866
867 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
868 if (ret)
869
870 goto out;
871
872 mutex_unlock(&dev->device_lock);
873 wait_event_timeout(dev->wait_pg,
874 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
875 mutex_lock(&dev->device_lock);
876
877 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
878 ret = -ETIME;
879 goto out;
880 }
881
882
883 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
884
885 reg = mei_me_d0i3_set(dev, true);
886 if (!(reg & H_D0I3C_CIP)) {
887 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
888 ret = 0;
889 goto on;
890 }
891
892 mutex_unlock(&dev->device_lock);
893 wait_event_timeout(dev->wait_pg,
894 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
895 mutex_lock(&dev->device_lock);
896
897 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
898 reg = mei_me_d0i3c_read(dev);
899 if (!(reg & H_D0I3C_I3)) {
900 ret = -ETIME;
901 goto out;
902 }
903 }
904
905 ret = 0;
906on:
907 hw->pg_state = MEI_PG_ON;
908out:
909 dev->pg_event = MEI_PG_EVENT_IDLE;
910 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
911 return ret;
912}
913
914
915
916
917
918
919
920
921
922
923
924static int mei_me_d0i3_enter(struct mei_device *dev)
925{
926 struct mei_me_hw *hw = to_me_hw(dev);
927 u32 reg;
928
929 reg = mei_me_d0i3c_read(dev);
930 if (reg & H_D0I3C_I3) {
931
932 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
933 goto on;
934 }
935
936 mei_me_d0i3_set(dev, false);
937on:
938 hw->pg_state = MEI_PG_ON;
939 dev->pg_event = MEI_PG_EVENT_IDLE;
940 dev_dbg(dev->dev, "d0i3 enter\n");
941 return 0;
942}
943
944
945
946
947
948
949
950
951static int mei_me_d0i3_exit_sync(struct mei_device *dev)
952{
953 struct mei_me_hw *hw = to_me_hw(dev);
954 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
955 int ret;
956 u32 reg;
957
958 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
959
960 reg = mei_me_d0i3c_read(dev);
961 if (!(reg & H_D0I3C_I3)) {
962
963 dev_dbg(dev->dev, "d0i3 exit not needed\n");
964 ret = 0;
965 goto off;
966 }
967
968 reg = mei_me_d0i3_unset(dev);
969 if (!(reg & H_D0I3C_CIP)) {
970 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
971 ret = 0;
972 goto off;
973 }
974
975 mutex_unlock(&dev->device_lock);
976 wait_event_timeout(dev->wait_pg,
977 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
978 mutex_lock(&dev->device_lock);
979
980 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
981 reg = mei_me_d0i3c_read(dev);
982 if (reg & H_D0I3C_I3) {
983 ret = -ETIME;
984 goto out;
985 }
986 }
987
988 ret = 0;
989off:
990 hw->pg_state = MEI_PG_OFF;
991out:
992 dev->pg_event = MEI_PG_EVENT_IDLE;
993
994 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
995 return ret;
996}
997
998
999
1000
1001
1002
1003
1004static void mei_me_pg_legacy_intr(struct mei_device *dev)
1005{
1006 struct mei_me_hw *hw = to_me_hw(dev);
1007
1008 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1009 return;
1010
1011 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1012 hw->pg_state = MEI_PG_OFF;
1013 if (waitqueue_active(&dev->wait_pg))
1014 wake_up(&dev->wait_pg);
1015}
1016
1017
1018
1019
1020
1021
1022
1023static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1024{
1025 struct mei_me_hw *hw = to_me_hw(dev);
1026
1027 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1028 (intr_source & H_D0I3C_IS)) {
1029 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1030 if (hw->pg_state == MEI_PG_ON) {
1031 hw->pg_state = MEI_PG_OFF;
1032 if (dev->hbm_state != MEI_HBM_IDLE) {
1033
1034
1035
1036
1037 dev_dbg(dev->dev, "d0i3 set host ready\n");
1038 mei_me_host_set_ready(dev);
1039 }
1040 } else {
1041 hw->pg_state = MEI_PG_ON;
1042 }
1043
1044 wake_up(&dev->wait_pg);
1045 }
1046
1047 if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1048
1049
1050
1051
1052
1053 dev_dbg(dev->dev, "d0i3 want resume\n");
1054 mei_hbm_pg_resume(dev);
1055 }
1056}
1057
1058
1059
1060
1061
1062
1063
1064static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1065{
1066 struct mei_me_hw *hw = to_me_hw(dev);
1067
1068 if (hw->d0i3_supported)
1069 mei_me_d0i3_intr(dev, intr_source);
1070 else
1071 mei_me_pg_legacy_intr(dev);
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081int mei_me_pg_enter_sync(struct mei_device *dev)
1082{
1083 struct mei_me_hw *hw = to_me_hw(dev);
1084
1085 if (hw->d0i3_supported)
1086 return mei_me_d0i3_enter_sync(dev);
1087 else
1088 return mei_me_pg_legacy_enter_sync(dev);
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098int mei_me_pg_exit_sync(struct mei_device *dev)
1099{
1100 struct mei_me_hw *hw = to_me_hw(dev);
1101
1102 if (hw->d0i3_supported)
1103 return mei_me_d0i3_exit_sync(dev);
1104 else
1105 return mei_me_pg_legacy_exit_sync(dev);
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1117{
1118 struct mei_me_hw *hw = to_me_hw(dev);
1119 int ret;
1120 u32 hcsr;
1121
1122 if (intr_enable) {
1123 mei_me_intr_enable(dev);
1124 if (hw->d0i3_supported) {
1125 ret = mei_me_d0i3_exit_sync(dev);
1126 if (ret)
1127 return ret;
1128 }
1129 }
1130
1131 pm_runtime_set_active(dev->dev);
1132
1133 hcsr = mei_hcsr_read(dev);
1134
1135
1136
1137
1138
1139 if ((hcsr & H_RST) == H_RST) {
1140 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1141 hcsr &= ~H_RST;
1142 mei_hcsr_set(dev, hcsr);
1143 hcsr = mei_hcsr_read(dev);
1144 }
1145
1146 hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1147
1148 if (!intr_enable)
1149 hcsr &= ~H_CSR_IE_MASK;
1150
1151 dev->recvd_hw_ready = false;
1152 mei_hcsr_write(dev, hcsr);
1153
1154
1155
1156
1157
1158 hcsr = mei_hcsr_read(dev);
1159
1160 if ((hcsr & H_RST) == 0)
1161 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1162
1163 if ((hcsr & H_RDY) == H_RDY)
1164 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1165
1166 if (!intr_enable) {
1167 mei_me_hw_reset_release(dev);
1168 if (hw->d0i3_supported) {
1169 ret = mei_me_d0i3_enter(dev);
1170 if (ret)
1171 return ret;
1172 }
1173 }
1174 return 0;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1186{
1187 struct mei_device *dev = (struct mei_device *)dev_id;
1188 u32 hcsr;
1189
1190 hcsr = mei_hcsr_read(dev);
1191 if (!me_intr_src(hcsr))
1192 return IRQ_NONE;
1193
1194 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1195
1196
1197 me_intr_disable(dev, hcsr);
1198 return IRQ_WAKE_THREAD;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1212{
1213 struct mei_device *dev = (struct mei_device *) dev_id;
1214 struct list_head cmpl_list;
1215 s32 slots;
1216 u32 hcsr;
1217 int rets = 0;
1218
1219 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1220
1221 mutex_lock(&dev->device_lock);
1222
1223 hcsr = mei_hcsr_read(dev);
1224 me_intr_clear(dev, hcsr);
1225
1226 INIT_LIST_HEAD(&cmpl_list);
1227
1228
1229 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1230 dev_warn(dev->dev, "FW not ready: resetting.\n");
1231 schedule_work(&dev->reset_work);
1232 goto end;
1233 }
1234
1235 if (mei_me_hw_is_resetting(dev))
1236 mei_hcsr_set_hig(dev);
1237
1238 mei_me_pg_intr(dev, me_intr_src(hcsr));
1239
1240
1241 if (!mei_host_is_ready(dev)) {
1242 if (mei_hw_is_ready(dev)) {
1243 dev_dbg(dev->dev, "we need to start the dev.\n");
1244 dev->recvd_hw_ready = true;
1245 wake_up(&dev->wait_hw_ready);
1246 } else {
1247 dev_dbg(dev->dev, "Spurious Interrupt\n");
1248 }
1249 goto end;
1250 }
1251
1252 slots = mei_count_full_read_slots(dev);
1253 while (slots > 0) {
1254 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1255 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1256
1257
1258
1259
1260 if (rets == -ENODATA)
1261 break;
1262
1263 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
1264 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1265 rets);
1266 schedule_work(&dev->reset_work);
1267 goto end;
1268 }
1269 }
1270
1271 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1272
1273
1274
1275
1276
1277
1278 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1279 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1280 rets = mei_irq_write_handler(dev, &cmpl_list);
1281 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1282 }
1283
1284 mei_irq_compl_handler(dev, &cmpl_list);
1285
1286end:
1287 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1288 mei_me_intr_enable(dev);
1289 mutex_unlock(&dev->device_lock);
1290 return IRQ_HANDLED;
1291}
1292
1293static const struct mei_hw_ops mei_me_hw_ops = {
1294
1295 .fw_status = mei_me_fw_status,
1296 .pg_state = mei_me_pg_state,
1297
1298 .host_is_ready = mei_me_host_is_ready,
1299
1300 .hw_is_ready = mei_me_hw_is_ready,
1301 .hw_reset = mei_me_hw_reset,
1302 .hw_config = mei_me_hw_config,
1303 .hw_start = mei_me_hw_start,
1304
1305 .pg_in_transition = mei_me_pg_in_transition,
1306 .pg_is_enabled = mei_me_pg_is_enabled,
1307
1308 .intr_clear = mei_me_intr_clear,
1309 .intr_enable = mei_me_intr_enable,
1310 .intr_disable = mei_me_intr_disable,
1311 .synchronize_irq = mei_me_synchronize_irq,
1312
1313 .hbuf_free_slots = mei_me_hbuf_empty_slots,
1314 .hbuf_is_ready = mei_me_hbuf_is_empty,
1315 .hbuf_max_len = mei_me_hbuf_max_len,
1316
1317 .write = mei_me_hbuf_write,
1318
1319 .rdbuf_full_slots = mei_me_count_full_read_slots,
1320 .read_hdr = mei_me_mecbrw_read,
1321 .read = mei_me_read_slots
1322};
1323
1324static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1325{
1326 u32 reg;
1327
1328 pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®);
1329 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1330
1331 return (reg & 0x600) == 0x200;
1332}
1333
1334#define MEI_CFG_FW_NM \
1335 .quirk_probe = mei_me_fw_type_nm
1336
1337static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1338{
1339 u32 reg;
1340 unsigned int devfn;
1341
1342
1343
1344
1345
1346 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1347 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
1348 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1349
1350 return (reg & 0xf0000) == 0xf0000;
1351}
1352
1353#define MEI_CFG_FW_SPS \
1354 .quirk_probe = mei_me_fw_type_sps
1355
1356
1357#define MEI_CFG_LEGACY_HFS \
1358 .fw_status.count = 0
1359
1360#define MEI_CFG_ICH_HFS \
1361 .fw_status.count = 1, \
1362 .fw_status.status[0] = PCI_CFG_HFS_1
1363
1364#define MEI_CFG_PCH_HFS \
1365 .fw_status.count = 2, \
1366 .fw_status.status[0] = PCI_CFG_HFS_1, \
1367 .fw_status.status[1] = PCI_CFG_HFS_2
1368
1369#define MEI_CFG_PCH8_HFS \
1370 .fw_status.count = 6, \
1371 .fw_status.status[0] = PCI_CFG_HFS_1, \
1372 .fw_status.status[1] = PCI_CFG_HFS_2, \
1373 .fw_status.status[2] = PCI_CFG_HFS_3, \
1374 .fw_status.status[3] = PCI_CFG_HFS_4, \
1375 .fw_status.status[4] = PCI_CFG_HFS_5, \
1376 .fw_status.status[5] = PCI_CFG_HFS_6
1377
1378
1379const struct mei_cfg mei_me_legacy_cfg = {
1380 MEI_CFG_LEGACY_HFS,
1381};
1382
1383
1384const struct mei_cfg mei_me_ich_cfg = {
1385 MEI_CFG_ICH_HFS,
1386};
1387
1388
1389const struct mei_cfg mei_me_pch_cfg = {
1390 MEI_CFG_PCH_HFS,
1391};
1392
1393
1394
1395const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1396 MEI_CFG_PCH_HFS,
1397 MEI_CFG_FW_NM,
1398};
1399
1400
1401const struct mei_cfg mei_me_pch8_cfg = {
1402 MEI_CFG_PCH8_HFS,
1403};
1404
1405
1406const struct mei_cfg mei_me_pch8_sps_cfg = {
1407 MEI_CFG_PCH8_HFS,
1408 MEI_CFG_FW_SPS,
1409};
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1420 const struct mei_cfg *cfg)
1421{
1422 struct mei_device *dev;
1423 struct mei_me_hw *hw;
1424
1425 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1426 sizeof(struct mei_me_hw), GFP_KERNEL);
1427 if (!dev)
1428 return NULL;
1429 hw = to_me_hw(dev);
1430
1431 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
1432 hw->cfg = cfg;
1433 return dev;
1434}
1435
1436