1
2
3
4
5
6
7#include <linux/pci.h>
8
9#include <linux/kthread.h>
10#include <linux/interrupt.h>
11#include <linux/pm_runtime.h>
12#include <linux/sizes.h>
13
14#include "mei_dev.h"
15#include "hbm.h"
16
17#include "hw-me.h"
18#include "hw-me-regs.h"
19
20#include "mei-trace.h"
21
22
23
24
25
26
27
28
29
30static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
31 unsigned long offset)
32{
33 return ioread32(hw->mem_addr + offset);
34}
35
36
37
38
39
40
41
42
43
44static inline void mei_me_reg_write(const struct mei_me_hw *hw,
45 unsigned long offset, u32 value)
46{
47 iowrite32(value, hw->mem_addr + offset);
48}
49
50
51
52
53
54
55
56
57
58static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
59{
60 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
61}
62
63
64
65
66
67
68
69static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
70{
71 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
72}
73
74
75
76
77
78
79
80
81static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
82{
83 u32 reg;
84
85 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
86 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
87
88 return reg;
89}
90
91
92
93
94
95
96
97
98static inline u32 mei_hcsr_read(const struct mei_device *dev)
99{
100 u32 reg;
101
102 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
103 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
104
105 return reg;
106}
107
108
109
110
111
112
113
114static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
115{
116 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
117 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
118}
119
120
121
122
123
124
125
126
127static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
128{
129 reg &= ~H_CSR_IS_MASK;
130 mei_hcsr_write(dev, reg);
131}
132
133
134
135
136
137
138static inline void mei_hcsr_set_hig(struct mei_device *dev)
139{
140 u32 hcsr;
141
142 hcsr = mei_hcsr_read(dev) | H_IG;
143 mei_hcsr_set(dev, hcsr);
144}
145
146
147
148
149
150
151
152
153static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
154{
155 u32 reg;
156
157 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
158 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
159
160 return reg;
161}
162
163
164
165
166
167
168
169static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
170{
171 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
172 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
173}
174
175
176
177
178
179
180
181
182
183static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
184{
185 struct mei_me_hw *hw = to_me_hw(dev);
186
187 if (!hw->cfg->hw_trc_supported)
188 return -EOPNOTSUPP;
189
190 *trc = mei_me_reg_read(hw, ME_TRC);
191 trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
192
193 return 0;
194}
195
196
197
198
199
200
201
202
203
204static int mei_me_fw_status(struct mei_device *dev,
205 struct mei_fw_status *fw_status)
206{
207 struct mei_me_hw *hw = to_me_hw(dev);
208 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
209 int ret;
210 int i;
211
212 if (!fw_status || !hw->read_fws)
213 return -EINVAL;
214
215 fw_status->count = fw_src->count;
216 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
217 ret = hw->read_fws(dev, fw_src->status[i],
218 &fw_status->status[i]);
219 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
220 fw_src->status[i],
221 fw_status->status[i]);
222 if (ret)
223 return ret;
224 }
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238
239static int mei_me_hw_config(struct mei_device *dev)
240{
241 struct mei_me_hw *hw = to_me_hw(dev);
242 u32 hcsr, reg;
243
244 if (WARN_ON(!hw->read_fws))
245 return -EINVAL;
246
247
248 hcsr = mei_hcsr_read(dev);
249 hw->hbuf_depth = (hcsr & H_CBD) >> 24;
250
251 reg = 0;
252 hw->read_fws(dev, PCI_CFG_HFS_1, ®);
253 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
254 hw->d0i3_supported =
255 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
256
257 hw->pg_state = MEI_PG_OFF;
258 if (hw->d0i3_supported) {
259 reg = mei_me_d0i3c_read(dev);
260 if (reg & H_D0I3C_I3)
261 hw->pg_state = MEI_PG_ON;
262 }
263
264 return 0;
265}
266
267
268
269
270
271
272
273
274
275static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
276{
277 struct mei_me_hw *hw = to_me_hw(dev);
278
279 return hw->pg_state;
280}
281
282static inline u32 me_intr_src(u32 hcsr)
283{
284 return hcsr & H_CSR_IS_MASK;
285}
286
287
288
289
290
291
292
293
294static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
295{
296 hcsr &= ~H_CSR_IE_MASK;
297 mei_hcsr_set(dev, hcsr);
298}
299
300
301
302
303
304
305
306static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
307{
308 if (me_intr_src(hcsr))
309 mei_hcsr_write(dev, hcsr);
310}
311
312
313
314
315
316
317static void mei_me_intr_clear(struct mei_device *dev)
318{
319 u32 hcsr = mei_hcsr_read(dev);
320
321 me_intr_clear(dev, hcsr);
322}
323
324
325
326
327
328static void mei_me_intr_enable(struct mei_device *dev)
329{
330 u32 hcsr = mei_hcsr_read(dev);
331
332 hcsr |= H_CSR_IE_MASK;
333 mei_hcsr_set(dev, hcsr);
334}
335
336
337
338
339
340
341static void mei_me_intr_disable(struct mei_device *dev)
342{
343 u32 hcsr = mei_hcsr_read(dev);
344
345 me_intr_disable(dev, hcsr);
346}
347
348
349
350
351
352
353static void mei_me_synchronize_irq(struct mei_device *dev)
354{
355 struct mei_me_hw *hw = to_me_hw(dev);
356
357 synchronize_irq(hw->irq);
358}
359
360
361
362
363
364
365static void mei_me_hw_reset_release(struct mei_device *dev)
366{
367 u32 hcsr = mei_hcsr_read(dev);
368
369 hcsr |= H_IG;
370 hcsr &= ~H_RST;
371 mei_hcsr_set(dev, hcsr);
372}
373
374
375
376
377
378
379static void mei_me_host_set_ready(struct mei_device *dev)
380{
381 u32 hcsr = mei_hcsr_read(dev);
382
383 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
384 mei_hcsr_set(dev, hcsr);
385}
386
387
388
389
390
391
392
393static bool mei_me_host_is_ready(struct mei_device *dev)
394{
395 u32 hcsr = mei_hcsr_read(dev);
396
397 return (hcsr & H_RDY) == H_RDY;
398}
399
400
401
402
403
404
405
406static bool mei_me_hw_is_ready(struct mei_device *dev)
407{
408 u32 mecsr = mei_me_mecsr_read(dev);
409
410 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
411}
412
413
414
415
416
417
418
419static bool mei_me_hw_is_resetting(struct mei_device *dev)
420{
421 u32 mecsr = mei_me_mecsr_read(dev);
422
423 return (mecsr & ME_RST_HRA) == ME_RST_HRA;
424}
425
426
427
428
429
430
431
432
433static int mei_me_hw_ready_wait(struct mei_device *dev)
434{
435 mutex_unlock(&dev->device_lock);
436 wait_event_timeout(dev->wait_hw_ready,
437 dev->recvd_hw_ready,
438 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
439 mutex_lock(&dev->device_lock);
440 if (!dev->recvd_hw_ready) {
441 dev_err(dev->dev, "wait hw ready failed\n");
442 return -ETIME;
443 }
444
445 mei_me_hw_reset_release(dev);
446 dev->recvd_hw_ready = false;
447 return 0;
448}
449
450
451
452
453
454
455
456static int mei_me_hw_start(struct mei_device *dev)
457{
458 int ret = mei_me_hw_ready_wait(dev);
459
460 if (ret)
461 return ret;
462 dev_dbg(dev->dev, "hw is ready\n");
463
464 mei_me_host_set_ready(dev);
465 return ret;
466}
467
468
469
470
471
472
473
474
475
476static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
477{
478 u32 hcsr;
479 char read_ptr, write_ptr;
480
481 hcsr = mei_hcsr_read(dev);
482
483 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
484 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
485
486 return (unsigned char) (write_ptr - read_ptr);
487}
488
489
490
491
492
493
494
495
496static bool mei_me_hbuf_is_empty(struct mei_device *dev)
497{
498 return mei_hbuf_filled_slots(dev) == 0;
499}
500
501
502
503
504
505
506
507
508static int mei_me_hbuf_empty_slots(struct mei_device *dev)
509{
510 struct mei_me_hw *hw = to_me_hw(dev);
511 unsigned char filled_slots, empty_slots;
512
513 filled_slots = mei_hbuf_filled_slots(dev);
514 empty_slots = hw->hbuf_depth - filled_slots;
515
516
517 if (filled_slots > hw->hbuf_depth)
518 return -EOVERFLOW;
519
520 return empty_slots;
521}
522
523
524
525
526
527
528
529
530static u32 mei_me_hbuf_depth(const struct mei_device *dev)
531{
532 struct mei_me_hw *hw = to_me_hw(dev);
533
534 return hw->hbuf_depth;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548static int mei_me_hbuf_write(struct mei_device *dev,
549 const void *hdr, size_t hdr_len,
550 const void *data, size_t data_len)
551{
552 unsigned long rem;
553 unsigned long i;
554 const u32 *reg_buf;
555 u32 dw_cnt;
556 int empty_slots;
557
558 if (WARN_ON(!hdr || !data || hdr_len & 0x3))
559 return -EINVAL;
560
561 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
562
563 empty_slots = mei_hbuf_empty_slots(dev);
564 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
565
566 if (empty_slots < 0)
567 return -EOVERFLOW;
568
569 dw_cnt = mei_data2slots(hdr_len + data_len);
570 if (dw_cnt > (u32)empty_slots)
571 return -EMSGSIZE;
572
573 reg_buf = hdr;
574 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
575 mei_me_hcbww_write(dev, reg_buf[i]);
576
577 reg_buf = data;
578 for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
579 mei_me_hcbww_write(dev, reg_buf[i]);
580
581 rem = data_len & 0x3;
582 if (rem > 0) {
583 u32 reg = 0;
584
585 memcpy(®, (const u8 *)data + data_len - rem, rem);
586 mei_me_hcbww_write(dev, reg);
587 }
588
589 mei_hcsr_set_hig(dev);
590 if (!mei_me_hw_is_ready(dev))
591 return -EIO;
592
593 return 0;
594}
595
596
597
598
599
600
601
602
603static int mei_me_count_full_read_slots(struct mei_device *dev)
604{
605 u32 me_csr;
606 char read_ptr, write_ptr;
607 unsigned char buffer_depth, filled_slots;
608
609 me_csr = mei_me_mecsr_read(dev);
610 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
611 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
612 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
613 filled_slots = (unsigned char) (write_ptr - read_ptr);
614
615
616 if (filled_slots > buffer_depth)
617 return -EOVERFLOW;
618
619 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
620 return (int)filled_slots;
621}
622
623
624
625
626
627
628
629
630
631
632static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
633 unsigned long buffer_length)
634{
635 u32 *reg_buf = (u32 *)buffer;
636
637 for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
638 *reg_buf++ = mei_me_mecbrw_read(dev);
639
640 if (buffer_length > 0) {
641 u32 reg = mei_me_mecbrw_read(dev);
642
643 memcpy(reg_buf, ®, buffer_length);
644 }
645
646 mei_hcsr_set_hig(dev);
647 return 0;
648}
649
650
651
652
653
654
655static void mei_me_pg_set(struct mei_device *dev)
656{
657 struct mei_me_hw *hw = to_me_hw(dev);
658 u32 reg;
659
660 reg = mei_me_reg_read(hw, H_HPG_CSR);
661 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
662
663 reg |= H_HPG_CSR_PGI;
664
665 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
666 mei_me_reg_write(hw, H_HPG_CSR, reg);
667}
668
669
670
671
672
673
674static void mei_me_pg_unset(struct mei_device *dev)
675{
676 struct mei_me_hw *hw = to_me_hw(dev);
677 u32 reg;
678
679 reg = mei_me_reg_read(hw, H_HPG_CSR);
680 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
681
682 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
683
684 reg |= H_HPG_CSR_PGIHEXR;
685
686 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
687 mei_me_reg_write(hw, H_HPG_CSR, reg);
688}
689
690
691
692
693
694
695
696
697static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
698{
699 struct mei_me_hw *hw = to_me_hw(dev);
700 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
701 int ret;
702
703 dev->pg_event = MEI_PG_EVENT_WAIT;
704
705 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
706 if (ret)
707 return ret;
708
709 mutex_unlock(&dev->device_lock);
710 wait_event_timeout(dev->wait_pg,
711 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
712 mutex_lock(&dev->device_lock);
713
714 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
715 mei_me_pg_set(dev);
716 ret = 0;
717 } else {
718 ret = -ETIME;
719 }
720
721 dev->pg_event = MEI_PG_EVENT_IDLE;
722 hw->pg_state = MEI_PG_ON;
723
724 return ret;
725}
726
727
728
729
730
731
732
733
734static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
735{
736 struct mei_me_hw *hw = to_me_hw(dev);
737 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
738 int ret;
739
740 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
741 goto reply;
742
743 dev->pg_event = MEI_PG_EVENT_WAIT;
744
745 mei_me_pg_unset(dev);
746
747 mutex_unlock(&dev->device_lock);
748 wait_event_timeout(dev->wait_pg,
749 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
750 mutex_lock(&dev->device_lock);
751
752reply:
753 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
754 ret = -ETIME;
755 goto out;
756 }
757
758 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
759 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
760 if (ret)
761 return ret;
762
763 mutex_unlock(&dev->device_lock);
764 wait_event_timeout(dev->wait_pg,
765 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
766 mutex_lock(&dev->device_lock);
767
768 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
769 ret = 0;
770 else
771 ret = -ETIME;
772
773out:
774 dev->pg_event = MEI_PG_EVENT_IDLE;
775 hw->pg_state = MEI_PG_OFF;
776
777 return ret;
778}
779
780
781
782
783
784
785
786
787static bool mei_me_pg_in_transition(struct mei_device *dev)
788{
789 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
790 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
791}
792
793
794
795
796
797
798
799
800static bool mei_me_pg_is_enabled(struct mei_device *dev)
801{
802 struct mei_me_hw *hw = to_me_hw(dev);
803 u32 reg = mei_me_mecsr_read(dev);
804
805 if (hw->d0i3_supported)
806 return true;
807
808 if ((reg & ME_PGIC_HRA) == 0)
809 goto notsupported;
810
811 if (!dev->hbm_f_pg_supported)
812 goto notsupported;
813
814 return true;
815
816notsupported:
817 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
818 hw->d0i3_supported,
819 !!(reg & ME_PGIC_HRA),
820 dev->version.major_version,
821 dev->version.minor_version,
822 HBM_MAJOR_VERSION_PGI,
823 HBM_MINOR_VERSION_PGI);
824
825 return false;
826}
827
828
829
830
831
832
833
834
835
836static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
837{
838 u32 reg = mei_me_d0i3c_read(dev);
839
840 reg |= H_D0I3C_I3;
841 if (intr)
842 reg |= H_D0I3C_IR;
843 else
844 reg &= ~H_D0I3C_IR;
845 mei_me_d0i3c_write(dev, reg);
846
847 reg = mei_me_d0i3c_read(dev);
848 return reg;
849}
850
851
852
853
854
855
856
857
858static u32 mei_me_d0i3_unset(struct mei_device *dev)
859{
860 u32 reg = mei_me_d0i3c_read(dev);
861
862 reg &= ~H_D0I3C_I3;
863 reg |= H_D0I3C_IR;
864 mei_me_d0i3c_write(dev, reg);
865
866 reg = mei_me_d0i3c_read(dev);
867 return reg;
868}
869
870
871
872
873
874
875
876
877static int mei_me_d0i3_enter_sync(struct mei_device *dev)
878{
879 struct mei_me_hw *hw = to_me_hw(dev);
880 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
881 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
882 int ret;
883 u32 reg;
884
885 reg = mei_me_d0i3c_read(dev);
886 if (reg & H_D0I3C_I3) {
887
888 dev_dbg(dev->dev, "d0i3 set not needed\n");
889 ret = 0;
890 goto on;
891 }
892
893
894 dev->pg_event = MEI_PG_EVENT_WAIT;
895
896 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
897 if (ret)
898
899 goto out;
900
901 mutex_unlock(&dev->device_lock);
902 wait_event_timeout(dev->wait_pg,
903 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
904 mutex_lock(&dev->device_lock);
905
906 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
907 ret = -ETIME;
908 goto out;
909 }
910
911
912 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
913
914 reg = mei_me_d0i3_set(dev, true);
915 if (!(reg & H_D0I3C_CIP)) {
916 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
917 ret = 0;
918 goto on;
919 }
920
921 mutex_unlock(&dev->device_lock);
922 wait_event_timeout(dev->wait_pg,
923 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
924 mutex_lock(&dev->device_lock);
925
926 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
927 reg = mei_me_d0i3c_read(dev);
928 if (!(reg & H_D0I3C_I3)) {
929 ret = -ETIME;
930 goto out;
931 }
932 }
933
934 ret = 0;
935on:
936 hw->pg_state = MEI_PG_ON;
937out:
938 dev->pg_event = MEI_PG_EVENT_IDLE;
939 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
940 return ret;
941}
942
943
944
945
946
947
948
949
950
951
952
953static int mei_me_d0i3_enter(struct mei_device *dev)
954{
955 struct mei_me_hw *hw = to_me_hw(dev);
956 u32 reg;
957
958 reg = mei_me_d0i3c_read(dev);
959 if (reg & H_D0I3C_I3) {
960
961 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
962 goto on;
963 }
964
965 mei_me_d0i3_set(dev, false);
966on:
967 hw->pg_state = MEI_PG_ON;
968 dev->pg_event = MEI_PG_EVENT_IDLE;
969 dev_dbg(dev->dev, "d0i3 enter\n");
970 return 0;
971}
972
973
974
975
976
977
978
979
980static int mei_me_d0i3_exit_sync(struct mei_device *dev)
981{
982 struct mei_me_hw *hw = to_me_hw(dev);
983 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
984 int ret;
985 u32 reg;
986
987 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
988
989 reg = mei_me_d0i3c_read(dev);
990 if (!(reg & H_D0I3C_I3)) {
991
992 dev_dbg(dev->dev, "d0i3 exit not needed\n");
993 ret = 0;
994 goto off;
995 }
996
997 reg = mei_me_d0i3_unset(dev);
998 if (!(reg & H_D0I3C_CIP)) {
999 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
1000 ret = 0;
1001 goto off;
1002 }
1003
1004 mutex_unlock(&dev->device_lock);
1005 wait_event_timeout(dev->wait_pg,
1006 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
1007 mutex_lock(&dev->device_lock);
1008
1009 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1010 reg = mei_me_d0i3c_read(dev);
1011 if (reg & H_D0I3C_I3) {
1012 ret = -ETIME;
1013 goto out;
1014 }
1015 }
1016
1017 ret = 0;
1018off:
1019 hw->pg_state = MEI_PG_OFF;
1020out:
1021 dev->pg_event = MEI_PG_EVENT_IDLE;
1022
1023 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
1024 return ret;
1025}
1026
1027
1028
1029
1030
1031
1032
1033static void mei_me_pg_legacy_intr(struct mei_device *dev)
1034{
1035 struct mei_me_hw *hw = to_me_hw(dev);
1036
1037 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1038 return;
1039
1040 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1041 hw->pg_state = MEI_PG_OFF;
1042 if (waitqueue_active(&dev->wait_pg))
1043 wake_up(&dev->wait_pg);
1044}
1045
1046
1047
1048
1049
1050
1051
1052static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1053{
1054 struct mei_me_hw *hw = to_me_hw(dev);
1055
1056 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1057 (intr_source & H_D0I3C_IS)) {
1058 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1059 if (hw->pg_state == MEI_PG_ON) {
1060 hw->pg_state = MEI_PG_OFF;
1061 if (dev->hbm_state != MEI_HBM_IDLE) {
1062
1063
1064
1065
1066 dev_dbg(dev->dev, "d0i3 set host ready\n");
1067 mei_me_host_set_ready(dev);
1068 }
1069 } else {
1070 hw->pg_state = MEI_PG_ON;
1071 }
1072
1073 wake_up(&dev->wait_pg);
1074 }
1075
1076 if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1077
1078
1079
1080
1081
1082 dev_dbg(dev->dev, "d0i3 want resume\n");
1083 mei_hbm_pg_resume(dev);
1084 }
1085}
1086
1087
1088
1089
1090
1091
1092
1093static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1094{
1095 struct mei_me_hw *hw = to_me_hw(dev);
1096
1097 if (hw->d0i3_supported)
1098 mei_me_d0i3_intr(dev, intr_source);
1099 else
1100 mei_me_pg_legacy_intr(dev);
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110int mei_me_pg_enter_sync(struct mei_device *dev)
1111{
1112 struct mei_me_hw *hw = to_me_hw(dev);
1113
1114 if (hw->d0i3_supported)
1115 return mei_me_d0i3_enter_sync(dev);
1116 else
1117 return mei_me_pg_legacy_enter_sync(dev);
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127int mei_me_pg_exit_sync(struct mei_device *dev)
1128{
1129 struct mei_me_hw *hw = to_me_hw(dev);
1130
1131 if (hw->d0i3_supported)
1132 return mei_me_d0i3_exit_sync(dev);
1133 else
1134 return mei_me_pg_legacy_exit_sync(dev);
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1146{
1147 struct mei_me_hw *hw = to_me_hw(dev);
1148 int ret;
1149 u32 hcsr;
1150
1151 if (intr_enable) {
1152 mei_me_intr_enable(dev);
1153 if (hw->d0i3_supported) {
1154 ret = mei_me_d0i3_exit_sync(dev);
1155 if (ret)
1156 return ret;
1157 }
1158 }
1159
1160 pm_runtime_set_active(dev->dev);
1161
1162 hcsr = mei_hcsr_read(dev);
1163
1164
1165
1166
1167
1168 if ((hcsr & H_RST) == H_RST) {
1169 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1170 hcsr &= ~H_RST;
1171 mei_hcsr_set(dev, hcsr);
1172 hcsr = mei_hcsr_read(dev);
1173 }
1174
1175 hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1176
1177 if (!intr_enable)
1178 hcsr &= ~H_CSR_IE_MASK;
1179
1180 dev->recvd_hw_ready = false;
1181 mei_hcsr_write(dev, hcsr);
1182
1183
1184
1185
1186
1187 hcsr = mei_hcsr_read(dev);
1188
1189 if ((hcsr & H_RST) == 0)
1190 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1191
1192 if ((hcsr & H_RDY) == H_RDY)
1193 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1194
1195 if (!intr_enable) {
1196 mei_me_hw_reset_release(dev);
1197 if (hw->d0i3_supported) {
1198 ret = mei_me_d0i3_enter(dev);
1199 if (ret)
1200 return ret;
1201 }
1202 }
1203 return 0;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1215{
1216 struct mei_device *dev = (struct mei_device *)dev_id;
1217 u32 hcsr;
1218
1219 hcsr = mei_hcsr_read(dev);
1220 if (!me_intr_src(hcsr))
1221 return IRQ_NONE;
1222
1223 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1224
1225
1226 me_intr_disable(dev, hcsr);
1227 return IRQ_WAKE_THREAD;
1228}
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1241{
1242 struct mei_device *dev = (struct mei_device *) dev_id;
1243 struct list_head cmpl_list;
1244 s32 slots;
1245 u32 hcsr;
1246 int rets = 0;
1247
1248 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1249
1250 mutex_lock(&dev->device_lock);
1251
1252 hcsr = mei_hcsr_read(dev);
1253 me_intr_clear(dev, hcsr);
1254
1255 INIT_LIST_HEAD(&cmpl_list);
1256
1257
1258 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1259 dev_warn(dev->dev, "FW not ready: resetting.\n");
1260 schedule_work(&dev->reset_work);
1261 goto end;
1262 }
1263
1264 if (mei_me_hw_is_resetting(dev))
1265 mei_hcsr_set_hig(dev);
1266
1267 mei_me_pg_intr(dev, me_intr_src(hcsr));
1268
1269
1270 if (!mei_host_is_ready(dev)) {
1271 if (mei_hw_is_ready(dev)) {
1272 dev_dbg(dev->dev, "we need to start the dev.\n");
1273 dev->recvd_hw_ready = true;
1274 wake_up(&dev->wait_hw_ready);
1275 } else {
1276 dev_dbg(dev->dev, "Spurious Interrupt\n");
1277 }
1278 goto end;
1279 }
1280
1281 slots = mei_count_full_read_slots(dev);
1282 while (slots > 0) {
1283 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1284 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1285
1286
1287
1288
1289 if (rets == -ENODATA)
1290 break;
1291
1292 if (rets &&
1293 (dev->dev_state != MEI_DEV_RESETTING &&
1294 dev->dev_state != MEI_DEV_POWER_DOWN)) {
1295 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1296 rets);
1297 schedule_work(&dev->reset_work);
1298 goto end;
1299 }
1300 }
1301
1302 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1303
1304
1305
1306
1307
1308
1309 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1310 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1311 rets = mei_irq_write_handler(dev, &cmpl_list);
1312 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1313 }
1314
1315 mei_irq_compl_handler(dev, &cmpl_list);
1316
1317end:
1318 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1319 mei_me_intr_enable(dev);
1320 mutex_unlock(&dev->device_lock);
1321 return IRQ_HANDLED;
1322}
1323
1324static const struct mei_hw_ops mei_me_hw_ops = {
1325
1326 .trc_status = mei_me_trc_status,
1327 .fw_status = mei_me_fw_status,
1328 .pg_state = mei_me_pg_state,
1329
1330 .host_is_ready = mei_me_host_is_ready,
1331
1332 .hw_is_ready = mei_me_hw_is_ready,
1333 .hw_reset = mei_me_hw_reset,
1334 .hw_config = mei_me_hw_config,
1335 .hw_start = mei_me_hw_start,
1336
1337 .pg_in_transition = mei_me_pg_in_transition,
1338 .pg_is_enabled = mei_me_pg_is_enabled,
1339
1340 .intr_clear = mei_me_intr_clear,
1341 .intr_enable = mei_me_intr_enable,
1342 .intr_disable = mei_me_intr_disable,
1343 .synchronize_irq = mei_me_synchronize_irq,
1344
1345 .hbuf_free_slots = mei_me_hbuf_empty_slots,
1346 .hbuf_is_ready = mei_me_hbuf_is_empty,
1347 .hbuf_depth = mei_me_hbuf_depth,
1348
1349 .write = mei_me_hbuf_write,
1350
1351 .rdbuf_full_slots = mei_me_count_full_read_slots,
1352 .read_hdr = mei_me_mecbrw_read,
1353 .read = mei_me_read_slots
1354};
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
1368{
1369 u32 reg;
1370 unsigned int devfn;
1371
1372 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1373 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®);
1374 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1375
1376 return (reg & 0x600) == 0x200;
1377}
1378
1379#define MEI_CFG_FW_NM \
1380 .quirk_probe = mei_me_fw_type_nm
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
1394{
1395 u32 reg;
1396 unsigned int devfn;
1397
1398 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1399 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
1400 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1401 return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
1402}
1403
1404#define MEI_CFG_FW_SPS_4 \
1405 .quirk_probe = mei_me_fw_type_sps_4
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
1418{
1419 u32 reg;
1420 u32 fw_type;
1421 unsigned int devfn;
1422
1423 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1424 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®);
1425 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
1426 fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
1427
1428 dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1429
1430 return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
1431}
1432
1433#define MEI_CFG_KIND_ITOUCH \
1434 .kind = "itouch"
1435
1436#define MEI_CFG_FW_SPS \
1437 .quirk_probe = mei_me_fw_type_sps
1438
1439#define MEI_CFG_FW_VER_SUPP \
1440 .fw_ver_supported = 1
1441
1442#define MEI_CFG_ICH_HFS \
1443 .fw_status.count = 0
1444
1445#define MEI_CFG_ICH10_HFS \
1446 .fw_status.count = 1, \
1447 .fw_status.status[0] = PCI_CFG_HFS_1
1448
1449#define MEI_CFG_PCH_HFS \
1450 .fw_status.count = 2, \
1451 .fw_status.status[0] = PCI_CFG_HFS_1, \
1452 .fw_status.status[1] = PCI_CFG_HFS_2
1453
1454#define MEI_CFG_PCH8_HFS \
1455 .fw_status.count = 6, \
1456 .fw_status.status[0] = PCI_CFG_HFS_1, \
1457 .fw_status.status[1] = PCI_CFG_HFS_2, \
1458 .fw_status.status[2] = PCI_CFG_HFS_3, \
1459 .fw_status.status[3] = PCI_CFG_HFS_4, \
1460 .fw_status.status[4] = PCI_CFG_HFS_5, \
1461 .fw_status.status[5] = PCI_CFG_HFS_6
1462
1463#define MEI_CFG_DMA_128 \
1464 .dma_size[DMA_DSCR_HOST] = SZ_128K, \
1465 .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1466 .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1467
1468#define MEI_CFG_TRC \
1469 .hw_trc_supported = 1
1470
1471
1472static const struct mei_cfg mei_me_ich_cfg = {
1473 MEI_CFG_ICH_HFS,
1474};
1475
1476
1477static const struct mei_cfg mei_me_ich10_cfg = {
1478 MEI_CFG_ICH10_HFS,
1479};
1480
1481
1482static const struct mei_cfg mei_me_pch6_cfg = {
1483 MEI_CFG_PCH_HFS,
1484};
1485
1486
1487static const struct mei_cfg mei_me_pch7_cfg = {
1488 MEI_CFG_PCH_HFS,
1489 MEI_CFG_FW_VER_SUPP,
1490};
1491
1492
1493static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1494 MEI_CFG_PCH_HFS,
1495 MEI_CFG_FW_VER_SUPP,
1496 MEI_CFG_FW_NM,
1497};
1498
1499
1500static const struct mei_cfg mei_me_pch8_cfg = {
1501 MEI_CFG_PCH8_HFS,
1502 MEI_CFG_FW_VER_SUPP,
1503};
1504
1505
1506static const struct mei_cfg mei_me_pch8_itouch_cfg = {
1507 MEI_CFG_KIND_ITOUCH,
1508 MEI_CFG_PCH8_HFS,
1509 MEI_CFG_FW_VER_SUPP,
1510};
1511
1512
1513static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
1514 MEI_CFG_PCH8_HFS,
1515 MEI_CFG_FW_VER_SUPP,
1516 MEI_CFG_FW_SPS_4,
1517};
1518
1519
1520static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
1521 MEI_CFG_PCH8_HFS,
1522 MEI_CFG_FW_VER_SUPP,
1523 MEI_CFG_FW_SPS_4,
1524};
1525
1526
1527static const struct mei_cfg mei_me_pch12_cfg = {
1528 MEI_CFG_PCH8_HFS,
1529 MEI_CFG_FW_VER_SUPP,
1530 MEI_CFG_DMA_128,
1531};
1532
1533
1534static const struct mei_cfg mei_me_pch12_sps_cfg = {
1535 MEI_CFG_PCH8_HFS,
1536 MEI_CFG_FW_VER_SUPP,
1537 MEI_CFG_DMA_128,
1538 MEI_CFG_FW_SPS,
1539};
1540
1541
1542
1543
1544static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
1545 MEI_CFG_KIND_ITOUCH,
1546 MEI_CFG_PCH8_HFS,
1547 MEI_CFG_FW_VER_SUPP,
1548 MEI_CFG_FW_SPS,
1549};
1550
1551
1552static const struct mei_cfg mei_me_pch15_cfg = {
1553 MEI_CFG_PCH8_HFS,
1554 MEI_CFG_FW_VER_SUPP,
1555 MEI_CFG_DMA_128,
1556 MEI_CFG_TRC,
1557};
1558
1559
1560static const struct mei_cfg mei_me_pch15_sps_cfg = {
1561 MEI_CFG_PCH8_HFS,
1562 MEI_CFG_FW_VER_SUPP,
1563 MEI_CFG_DMA_128,
1564 MEI_CFG_TRC,
1565 MEI_CFG_FW_SPS,
1566};
1567
1568
1569
1570
1571
1572static const struct mei_cfg *const mei_cfg_list[] = {
1573 [MEI_ME_UNDEF_CFG] = NULL,
1574 [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1575 [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1576 [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
1577 [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
1578 [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1579 [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1580 [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
1581 [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
1582 [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1583 [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
1584 [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1585 [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
1586 [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1587 [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
1588};
1589
1590const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1591{
1592 BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1593
1594 if (idx >= MEI_ME_NUM_CFG)
1595 return NULL;
1596
1597 return mei_cfg_list[idx];
1598};
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608struct mei_device *mei_me_dev_init(struct device *parent,
1609 const struct mei_cfg *cfg)
1610{
1611 struct mei_device *dev;
1612 struct mei_me_hw *hw;
1613 int i;
1614
1615 dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1616 if (!dev)
1617 return NULL;
1618
1619 hw = to_me_hw(dev);
1620
1621 for (i = 0; i < DMA_DSCR_NUM; i++)
1622 dev->dr_dscr[i].size = cfg->dma_size[i];
1623
1624 mei_device_init(dev, parent, &mei_me_hw_ops);
1625 hw->cfg = cfg;
1626
1627 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1628
1629 dev->kind = cfg->kind;
1630
1631 return dev;
1632}
1633
1634