1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/pci.h>
18
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21#include <linux/pm_runtime.h>
22
23#include "mei_dev.h"
24#include "hbm.h"
25
26#include "hw-me.h"
27#include "hw-me-regs.h"
28
29#include "mei-trace.h"
30
31
32
33
34
35
36
37
38
39static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
40 unsigned long offset)
41{
42 return ioread32(hw->mem_addr + offset);
43}
44
45
46
47
48
49
50
51
52
53static inline void mei_me_reg_write(const struct mei_me_hw *hw,
54 unsigned long offset, u32 value)
55{
56 iowrite32(value, hw->mem_addr + offset);
57}
58
59
60
61
62
63
64
65
66
67static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
68{
69 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
70}
71
72
73
74
75
76
77
78static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
79{
80 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
81}
82
83
84
85
86
87
88
89
90static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
91{
92 u32 reg;
93
94 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
95 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
96
97 return reg;
98}
99
100
101
102
103
104
105
106
107static inline u32 mei_hcsr_read(const struct mei_device *dev)
108{
109 u32 reg;
110
111 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
112 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
113
114 return reg;
115}
116
117
118
119
120
121
122
123static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
124{
125 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
126 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
127}
128
129
130
131
132
133
134
135
136static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
137{
138 reg &= ~H_CSR_IS_MASK;
139 mei_hcsr_write(dev, reg);
140}
141
142
143
144
145
146
147
148
149static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
150{
151 u32 reg;
152
153 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
154 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
155
156 return reg;
157}
158
159
160
161
162
163
164
165static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
166{
167 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
168 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
169}
170
171
172
173
174
175
176
177
178
179static int mei_me_fw_status(struct mei_device *dev,
180 struct mei_fw_status *fw_status)
181{
182 struct pci_dev *pdev = to_pci_dev(dev->dev);
183 struct mei_me_hw *hw = to_me_hw(dev);
184 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
185 int ret;
186 int i;
187
188 if (!fw_status)
189 return -EINVAL;
190
191 fw_status->count = fw_src->count;
192 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
193 ret = pci_read_config_dword(pdev, fw_src->status[i],
194 &fw_status->status[i]);
195 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
196 fw_src->status[i],
197 fw_status->status[i]);
198 if (ret)
199 return ret;
200 }
201
202 return 0;
203}
204
205
206
207
208
209
210static void mei_me_hw_config(struct mei_device *dev)
211{
212 struct pci_dev *pdev = to_pci_dev(dev->dev);
213 struct mei_me_hw *hw = to_me_hw(dev);
214 u32 hcsr, reg;
215
216
217 hcsr = mei_hcsr_read(dev);
218 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
219
220 reg = 0;
221 pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®);
222 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
223 hw->d0i3_supported =
224 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
225
226 hw->pg_state = MEI_PG_OFF;
227 if (hw->d0i3_supported) {
228 reg = mei_me_d0i3c_read(dev);
229 if (reg & H_D0I3C_I3)
230 hw->pg_state = MEI_PG_ON;
231 }
232}
233
234
235
236
237
238
239
240
241
242static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
243{
244 struct mei_me_hw *hw = to_me_hw(dev);
245
246 return hw->pg_state;
247}
248
249
250
251
252
253
254static void mei_me_intr_clear(struct mei_device *dev)
255{
256 u32 hcsr = mei_hcsr_read(dev);
257
258 if (hcsr & H_CSR_IS_MASK)
259 mei_hcsr_write(dev, hcsr);
260}
261
262
263
264
265
266static void mei_me_intr_enable(struct mei_device *dev)
267{
268 u32 hcsr = mei_hcsr_read(dev);
269
270 hcsr |= H_CSR_IE_MASK;
271 mei_hcsr_set(dev, hcsr);
272}
273
274
275
276
277
278
279static void mei_me_intr_disable(struct mei_device *dev)
280{
281 u32 hcsr = mei_hcsr_read(dev);
282
283 hcsr &= ~H_CSR_IE_MASK;
284 mei_hcsr_set(dev, hcsr);
285}
286
287
288
289
290
291
292static void mei_me_hw_reset_release(struct mei_device *dev)
293{
294 u32 hcsr = mei_hcsr_read(dev);
295
296 hcsr |= H_IG;
297 hcsr &= ~H_RST;
298 mei_hcsr_set(dev, hcsr);
299
300
301 mmiowb();
302}
303
304
305
306
307
308
309static void mei_me_host_set_ready(struct mei_device *dev)
310{
311 u32 hcsr = mei_hcsr_read(dev);
312
313 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
314 mei_hcsr_set(dev, hcsr);
315}
316
317
318
319
320
321
322
323static bool mei_me_host_is_ready(struct mei_device *dev)
324{
325 u32 hcsr = mei_hcsr_read(dev);
326
327 return (hcsr & H_RDY) == H_RDY;
328}
329
330
331
332
333
334
335
336static bool mei_me_hw_is_ready(struct mei_device *dev)
337{
338 u32 mecsr = mei_me_mecsr_read(dev);
339
340 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
341}
342
343
344
345
346
347
348
349
350static int mei_me_hw_ready_wait(struct mei_device *dev)
351{
352 mutex_unlock(&dev->device_lock);
353 wait_event_timeout(dev->wait_hw_ready,
354 dev->recvd_hw_ready,
355 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
356 mutex_lock(&dev->device_lock);
357 if (!dev->recvd_hw_ready) {
358 dev_err(dev->dev, "wait hw ready failed\n");
359 return -ETIME;
360 }
361
362 mei_me_hw_reset_release(dev);
363 dev->recvd_hw_ready = false;
364 return 0;
365}
366
367
368
369
370
371
372
373static int mei_me_hw_start(struct mei_device *dev)
374{
375 int ret = mei_me_hw_ready_wait(dev);
376
377 if (ret)
378 return ret;
379 dev_dbg(dev->dev, "hw is ready\n");
380
381 mei_me_host_set_ready(dev);
382 return ret;
383}
384
385
386
387
388
389
390
391
392
393static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
394{
395 u32 hcsr;
396 char read_ptr, write_ptr;
397
398 hcsr = mei_hcsr_read(dev);
399
400 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
401 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
402
403 return (unsigned char) (write_ptr - read_ptr);
404}
405
406
407
408
409
410
411
412
413static bool mei_me_hbuf_is_empty(struct mei_device *dev)
414{
415 return mei_hbuf_filled_slots(dev) == 0;
416}
417
418
419
420
421
422
423
424
425static int mei_me_hbuf_empty_slots(struct mei_device *dev)
426{
427 unsigned char filled_slots, empty_slots;
428
429 filled_slots = mei_hbuf_filled_slots(dev);
430 empty_slots = dev->hbuf_depth - filled_slots;
431
432
433 if (filled_slots > dev->hbuf_depth)
434 return -EOVERFLOW;
435
436 return empty_slots;
437}
438
439
440
441
442
443
444
445
446static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
447{
448 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
449}
450
451
452
453
454
455
456
457
458
459
460
461static int mei_me_write_message(struct mei_device *dev,
462 struct mei_msg_hdr *header,
463 unsigned char *buf)
464{
465 unsigned long rem;
466 unsigned long length = header->length;
467 u32 *reg_buf = (u32 *)buf;
468 u32 hcsr;
469 u32 dw_cnt;
470 int i;
471 int empty_slots;
472
473 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
474
475 empty_slots = mei_hbuf_empty_slots(dev);
476 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
477
478 dw_cnt = mei_data2slots(length);
479 if (empty_slots < 0 || dw_cnt > empty_slots)
480 return -EMSGSIZE;
481
482 mei_me_hcbww_write(dev, *((u32 *) header));
483
484 for (i = 0; i < length / 4; i++)
485 mei_me_hcbww_write(dev, reg_buf[i]);
486
487 rem = length & 0x3;
488 if (rem > 0) {
489 u32 reg = 0;
490
491 memcpy(®, &buf[length - rem], rem);
492 mei_me_hcbww_write(dev, reg);
493 }
494
495 hcsr = mei_hcsr_read(dev) | H_IG;
496 mei_hcsr_set(dev, hcsr);
497 if (!mei_me_hw_is_ready(dev))
498 return -EIO;
499
500 return 0;
501}
502
503
504
505
506
507
508
509
510static int mei_me_count_full_read_slots(struct mei_device *dev)
511{
512 u32 me_csr;
513 char read_ptr, write_ptr;
514 unsigned char buffer_depth, filled_slots;
515
516 me_csr = mei_me_mecsr_read(dev);
517 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
518 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
519 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
520 filled_slots = (unsigned char) (write_ptr - read_ptr);
521
522
523 if (filled_slots > buffer_depth)
524 return -EOVERFLOW;
525
526 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
527 return (int)filled_slots;
528}
529
530
531
532
533
534
535
536
537
538
539static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
540 unsigned long buffer_length)
541{
542 u32 *reg_buf = (u32 *)buffer;
543 u32 hcsr;
544
545 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
546 *reg_buf++ = mei_me_mecbrw_read(dev);
547
548 if (buffer_length > 0) {
549 u32 reg = mei_me_mecbrw_read(dev);
550
551 memcpy(reg_buf, ®, buffer_length);
552 }
553
554 hcsr = mei_hcsr_read(dev) | H_IG;
555 mei_hcsr_set(dev, hcsr);
556 return 0;
557}
558
559
560
561
562
563
564static void mei_me_pg_set(struct mei_device *dev)
565{
566 struct mei_me_hw *hw = to_me_hw(dev);
567 u32 reg;
568
569 reg = mei_me_reg_read(hw, H_HPG_CSR);
570 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
571
572 reg |= H_HPG_CSR_PGI;
573
574 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
575 mei_me_reg_write(hw, H_HPG_CSR, reg);
576}
577
578
579
580
581
582
583static void mei_me_pg_unset(struct mei_device *dev)
584{
585 struct mei_me_hw *hw = to_me_hw(dev);
586 u32 reg;
587
588 reg = mei_me_reg_read(hw, H_HPG_CSR);
589 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
590
591 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
592
593 reg |= H_HPG_CSR_PGIHEXR;
594
595 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
596 mei_me_reg_write(hw, H_HPG_CSR, reg);
597}
598
599
600
601
602
603
604
605
606static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
607{
608 struct mei_me_hw *hw = to_me_hw(dev);
609 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
610 int ret;
611
612 dev->pg_event = MEI_PG_EVENT_WAIT;
613
614 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
615 if (ret)
616 return ret;
617
618 mutex_unlock(&dev->device_lock);
619 wait_event_timeout(dev->wait_pg,
620 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
621 mutex_lock(&dev->device_lock);
622
623 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
624 mei_me_pg_set(dev);
625 ret = 0;
626 } else {
627 ret = -ETIME;
628 }
629
630 dev->pg_event = MEI_PG_EVENT_IDLE;
631 hw->pg_state = MEI_PG_ON;
632
633 return ret;
634}
635
636
637
638
639
640
641
642
643static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
644{
645 struct mei_me_hw *hw = to_me_hw(dev);
646 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
647 int ret;
648
649 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
650 goto reply;
651
652 dev->pg_event = MEI_PG_EVENT_WAIT;
653
654 mei_me_pg_unset(dev);
655
656 mutex_unlock(&dev->device_lock);
657 wait_event_timeout(dev->wait_pg,
658 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
659 mutex_lock(&dev->device_lock);
660
661reply:
662 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
663 ret = -ETIME;
664 goto out;
665 }
666
667 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
668 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
669 if (ret)
670 return ret;
671
672 mutex_unlock(&dev->device_lock);
673 wait_event_timeout(dev->wait_pg,
674 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
675 mutex_lock(&dev->device_lock);
676
677 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
678 ret = 0;
679 else
680 ret = -ETIME;
681
682out:
683 dev->pg_event = MEI_PG_EVENT_IDLE;
684 hw->pg_state = MEI_PG_OFF;
685
686 return ret;
687}
688
689
690
691
692
693
694
695
696static bool mei_me_pg_in_transition(struct mei_device *dev)
697{
698 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
699 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
700}
701
702
703
704
705
706
707
708
709static bool mei_me_pg_is_enabled(struct mei_device *dev)
710{
711 struct mei_me_hw *hw = to_me_hw(dev);
712 u32 reg = mei_me_mecsr_read(dev);
713
714 if (hw->d0i3_supported)
715 return true;
716
717 if ((reg & ME_PGIC_HRA) == 0)
718 goto notsupported;
719
720 if (!dev->hbm_f_pg_supported)
721 goto notsupported;
722
723 return true;
724
725notsupported:
726 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
727 hw->d0i3_supported,
728 !!(reg & ME_PGIC_HRA),
729 dev->version.major_version,
730 dev->version.minor_version,
731 HBM_MAJOR_VERSION_PGI,
732 HBM_MINOR_VERSION_PGI);
733
734 return false;
735}
736
737
738
739
740
741
742
743
744
745static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
746{
747 u32 reg = mei_me_d0i3c_read(dev);
748
749 reg |= H_D0I3C_I3;
750 if (intr)
751 reg |= H_D0I3C_IR;
752 else
753 reg &= ~H_D0I3C_IR;
754 mei_me_d0i3c_write(dev, reg);
755
756 reg = mei_me_d0i3c_read(dev);
757 return reg;
758}
759
760
761
762
763
764
765
766
767static u32 mei_me_d0i3_unset(struct mei_device *dev)
768{
769 u32 reg = mei_me_d0i3c_read(dev);
770
771 reg &= ~H_D0I3C_I3;
772 reg |= H_D0I3C_IR;
773 mei_me_d0i3c_write(dev, reg);
774
775 reg = mei_me_d0i3c_read(dev);
776 return reg;
777}
778
779
780
781
782
783
784
785
786static int mei_me_d0i3_enter_sync(struct mei_device *dev)
787{
788 struct mei_me_hw *hw = to_me_hw(dev);
789 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
790 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
791 int ret;
792 u32 reg;
793
794 reg = mei_me_d0i3c_read(dev);
795 if (reg & H_D0I3C_I3) {
796
797 dev_dbg(dev->dev, "d0i3 set not needed\n");
798 ret = 0;
799 goto on;
800 }
801
802
803 dev->pg_event = MEI_PG_EVENT_WAIT;
804
805 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
806 if (ret)
807
808 goto out;
809
810 mutex_unlock(&dev->device_lock);
811 wait_event_timeout(dev->wait_pg,
812 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
813 mutex_lock(&dev->device_lock);
814
815 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
816 ret = -ETIME;
817 goto out;
818 }
819
820
821 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
822
823 reg = mei_me_d0i3_set(dev, true);
824 if (!(reg & H_D0I3C_CIP)) {
825 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
826 ret = 0;
827 goto on;
828 }
829
830 mutex_unlock(&dev->device_lock);
831 wait_event_timeout(dev->wait_pg,
832 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
833 mutex_lock(&dev->device_lock);
834
835 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
836 reg = mei_me_d0i3c_read(dev);
837 if (!(reg & H_D0I3C_I3)) {
838 ret = -ETIME;
839 goto out;
840 }
841 }
842
843 ret = 0;
844on:
845 hw->pg_state = MEI_PG_ON;
846out:
847 dev->pg_event = MEI_PG_EVENT_IDLE;
848 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
849 return ret;
850}
851
852
853
854
855
856
857
858
859
860
861
862static int mei_me_d0i3_enter(struct mei_device *dev)
863{
864 struct mei_me_hw *hw = to_me_hw(dev);
865 u32 reg;
866
867 reg = mei_me_d0i3c_read(dev);
868 if (reg & H_D0I3C_I3) {
869
870 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
871 goto on;
872 }
873
874 mei_me_d0i3_set(dev, false);
875on:
876 hw->pg_state = MEI_PG_ON;
877 dev->pg_event = MEI_PG_EVENT_IDLE;
878 dev_dbg(dev->dev, "d0i3 enter\n");
879 return 0;
880}
881
882
883
884
885
886
887
888
889static int mei_me_d0i3_exit_sync(struct mei_device *dev)
890{
891 struct mei_me_hw *hw = to_me_hw(dev);
892 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
893 int ret;
894 u32 reg;
895
896 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
897
898 reg = mei_me_d0i3c_read(dev);
899 if (!(reg & H_D0I3C_I3)) {
900
901 dev_dbg(dev->dev, "d0i3 exit not needed\n");
902 ret = 0;
903 goto off;
904 }
905
906 reg = mei_me_d0i3_unset(dev);
907 if (!(reg & H_D0I3C_CIP)) {
908 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
909 ret = 0;
910 goto off;
911 }
912
913 mutex_unlock(&dev->device_lock);
914 wait_event_timeout(dev->wait_pg,
915 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
916 mutex_lock(&dev->device_lock);
917
918 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
919 reg = mei_me_d0i3c_read(dev);
920 if (reg & H_D0I3C_I3) {
921 ret = -ETIME;
922 goto out;
923 }
924 }
925
926 ret = 0;
927off:
928 hw->pg_state = MEI_PG_OFF;
929out:
930 dev->pg_event = MEI_PG_EVENT_IDLE;
931
932 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
933 return ret;
934}
935
936
937
938
939
940
941
942static void mei_me_pg_legacy_intr(struct mei_device *dev)
943{
944 struct mei_me_hw *hw = to_me_hw(dev);
945
946 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
947 return;
948
949 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
950 hw->pg_state = MEI_PG_OFF;
951 if (waitqueue_active(&dev->wait_pg))
952 wake_up(&dev->wait_pg);
953}
954
955
956
957
958
959
960static void mei_me_d0i3_intr(struct mei_device *dev)
961{
962 struct mei_me_hw *hw = to_me_hw(dev);
963
964 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
965 (hw->intr_source & H_D0I3C_IS)) {
966 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
967 if (hw->pg_state == MEI_PG_ON) {
968 hw->pg_state = MEI_PG_OFF;
969 if (dev->hbm_state != MEI_HBM_IDLE) {
970
971
972
973
974 dev_dbg(dev->dev, "d0i3 set host ready\n");
975 mei_me_host_set_ready(dev);
976 }
977 } else {
978 hw->pg_state = MEI_PG_ON;
979 }
980
981 wake_up(&dev->wait_pg);
982 }
983
984 if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
985
986
987
988
989
990 dev_dbg(dev->dev, "d0i3 want resume\n");
991 mei_hbm_pg_resume(dev);
992 }
993}
994
995
996
997
998
999
1000static void mei_me_pg_intr(struct mei_device *dev)
1001{
1002 struct mei_me_hw *hw = to_me_hw(dev);
1003
1004 if (hw->d0i3_supported)
1005 mei_me_d0i3_intr(dev);
1006 else
1007 mei_me_pg_legacy_intr(dev);
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017int mei_me_pg_enter_sync(struct mei_device *dev)
1018{
1019 struct mei_me_hw *hw = to_me_hw(dev);
1020
1021 if (hw->d0i3_supported)
1022 return mei_me_d0i3_enter_sync(dev);
1023 else
1024 return mei_me_pg_legacy_enter_sync(dev);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034int mei_me_pg_exit_sync(struct mei_device *dev)
1035{
1036 struct mei_me_hw *hw = to_me_hw(dev);
1037
1038 if (hw->d0i3_supported)
1039 return mei_me_d0i3_exit_sync(dev);
1040 else
1041 return mei_me_pg_legacy_exit_sync(dev);
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1053{
1054 struct mei_me_hw *hw = to_me_hw(dev);
1055 int ret;
1056 u32 hcsr;
1057
1058 if (intr_enable) {
1059 mei_me_intr_enable(dev);
1060 if (hw->d0i3_supported) {
1061 ret = mei_me_d0i3_exit_sync(dev);
1062 if (ret)
1063 return ret;
1064 }
1065 }
1066
1067 pm_runtime_set_active(dev->dev);
1068
1069 hcsr = mei_hcsr_read(dev);
1070
1071
1072
1073
1074
1075 if ((hcsr & H_RST) == H_RST) {
1076 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1077 hcsr &= ~H_RST;
1078 mei_hcsr_set(dev, hcsr);
1079 hcsr = mei_hcsr_read(dev);
1080 }
1081
1082 hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1083
1084 if (!intr_enable)
1085 hcsr &= ~H_CSR_IE_MASK;
1086
1087 dev->recvd_hw_ready = false;
1088 mei_hcsr_write(dev, hcsr);
1089
1090
1091
1092
1093
1094 hcsr = mei_hcsr_read(dev);
1095
1096 if ((hcsr & H_RST) == 0)
1097 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1098
1099 if ((hcsr & H_RDY) == H_RDY)
1100 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1101
1102 if (!intr_enable) {
1103 mei_me_hw_reset_release(dev);
1104 if (hw->d0i3_supported) {
1105 ret = mei_me_d0i3_enter(dev);
1106 if (ret)
1107 return ret;
1108 }
1109 }
1110
1111 return 0;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1124{
1125 struct mei_device *dev = (struct mei_device *)dev_id;
1126 struct mei_me_hw *hw = to_me_hw(dev);
1127 u32 hcsr;
1128
1129 hcsr = mei_hcsr_read(dev);
1130 if (!(hcsr & H_CSR_IS_MASK))
1131 return IRQ_NONE;
1132
1133 hw->intr_source = hcsr & H_CSR_IS_MASK;
1134 dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
1135
1136
1137 mei_hcsr_write(dev, hcsr);
1138
1139 return IRQ_WAKE_THREAD;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1153{
1154 struct mei_device *dev = (struct mei_device *) dev_id;
1155 struct list_head cmpl_list;
1156 s32 slots;
1157 int rets = 0;
1158
1159 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1160
1161 mutex_lock(&dev->device_lock);
1162 INIT_LIST_HEAD(&cmpl_list);
1163
1164
1165 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1166 dev_warn(dev->dev, "FW not ready: resetting.\n");
1167 schedule_work(&dev->reset_work);
1168 goto end;
1169 }
1170
1171 mei_me_pg_intr(dev);
1172
1173
1174 if (!mei_host_is_ready(dev)) {
1175 if (mei_hw_is_ready(dev)) {
1176 dev_dbg(dev->dev, "we need to start the dev.\n");
1177 dev->recvd_hw_ready = true;
1178 wake_up(&dev->wait_hw_ready);
1179 } else {
1180 dev_dbg(dev->dev, "Spurious Interrupt\n");
1181 }
1182 goto end;
1183 }
1184
1185 slots = mei_count_full_read_slots(dev);
1186 while (slots > 0) {
1187 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1188 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1189
1190
1191
1192
1193 if (rets == -ENODATA)
1194 break;
1195
1196 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
1197 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1198 rets);
1199 schedule_work(&dev->reset_work);
1200 goto end;
1201 }
1202 }
1203
1204 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1205
1206
1207
1208
1209
1210
1211 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1212 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1213 rets = mei_irq_write_handler(dev, &cmpl_list);
1214 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1215 }
1216
1217 mei_irq_compl_handler(dev, &cmpl_list);
1218
1219end:
1220 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1221 mutex_unlock(&dev->device_lock);
1222 return IRQ_HANDLED;
1223}
1224
1225static const struct mei_hw_ops mei_me_hw_ops = {
1226
1227 .fw_status = mei_me_fw_status,
1228 .pg_state = mei_me_pg_state,
1229
1230 .host_is_ready = mei_me_host_is_ready,
1231
1232 .hw_is_ready = mei_me_hw_is_ready,
1233 .hw_reset = mei_me_hw_reset,
1234 .hw_config = mei_me_hw_config,
1235 .hw_start = mei_me_hw_start,
1236
1237 .pg_in_transition = mei_me_pg_in_transition,
1238 .pg_is_enabled = mei_me_pg_is_enabled,
1239
1240 .intr_clear = mei_me_intr_clear,
1241 .intr_enable = mei_me_intr_enable,
1242 .intr_disable = mei_me_intr_disable,
1243
1244 .hbuf_free_slots = mei_me_hbuf_empty_slots,
1245 .hbuf_is_ready = mei_me_hbuf_is_empty,
1246 .hbuf_max_len = mei_me_hbuf_max_len,
1247
1248 .write = mei_me_write_message,
1249
1250 .rdbuf_full_slots = mei_me_count_full_read_slots,
1251 .read_hdr = mei_me_mecbrw_read,
1252 .read = mei_me_read_slots
1253};
1254
1255static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1256{
1257 u32 reg;
1258
1259 pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®);
1260 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1261
1262 return (reg & 0x600) == 0x200;
1263}
1264
1265#define MEI_CFG_FW_NM \
1266 .quirk_probe = mei_me_fw_type_nm
1267
1268static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1269{
1270 u32 reg;
1271 unsigned int devfn;
1272
1273
1274
1275
1276
1277 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1278 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
1279 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1280
1281 return (reg & 0xf0000) == 0xf0000;
1282}
1283
1284#define MEI_CFG_FW_SPS \
1285 .quirk_probe = mei_me_fw_type_sps
1286
1287
1288#define MEI_CFG_LEGACY_HFS \
1289 .fw_status.count = 0
1290
1291#define MEI_CFG_ICH_HFS \
1292 .fw_status.count = 1, \
1293 .fw_status.status[0] = PCI_CFG_HFS_1
1294
1295#define MEI_CFG_PCH_HFS \
1296 .fw_status.count = 2, \
1297 .fw_status.status[0] = PCI_CFG_HFS_1, \
1298 .fw_status.status[1] = PCI_CFG_HFS_2
1299
1300#define MEI_CFG_PCH8_HFS \
1301 .fw_status.count = 6, \
1302 .fw_status.status[0] = PCI_CFG_HFS_1, \
1303 .fw_status.status[1] = PCI_CFG_HFS_2, \
1304 .fw_status.status[2] = PCI_CFG_HFS_3, \
1305 .fw_status.status[3] = PCI_CFG_HFS_4, \
1306 .fw_status.status[4] = PCI_CFG_HFS_5, \
1307 .fw_status.status[5] = PCI_CFG_HFS_6
1308
1309
1310const struct mei_cfg mei_me_legacy_cfg = {
1311 MEI_CFG_LEGACY_HFS,
1312};
1313
1314
1315const struct mei_cfg mei_me_ich_cfg = {
1316 MEI_CFG_ICH_HFS,
1317};
1318
1319
1320const struct mei_cfg mei_me_pch_cfg = {
1321 MEI_CFG_PCH_HFS,
1322};
1323
1324
1325
1326const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1327 MEI_CFG_PCH_HFS,
1328 MEI_CFG_FW_NM,
1329};
1330
1331
1332const struct mei_cfg mei_me_pch8_cfg = {
1333 MEI_CFG_PCH8_HFS,
1334};
1335
1336
1337const struct mei_cfg mei_me_pch8_sps_cfg = {
1338 MEI_CFG_PCH8_HFS,
1339 MEI_CFG_FW_SPS,
1340};
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1351 const struct mei_cfg *cfg)
1352{
1353 struct mei_device *dev;
1354 struct mei_me_hw *hw;
1355
1356 dev = kzalloc(sizeof(struct mei_device) +
1357 sizeof(struct mei_me_hw), GFP_KERNEL);
1358 if (!dev)
1359 return NULL;
1360 hw = to_me_hw(dev);
1361
1362 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
1363 hw->cfg = cfg;
1364 return dev;
1365}
1366
1367