1
2
3
4
5
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/workqueue.h>
17#include <linux/pm_domain.h>
18#include <linux/pm_runtime.h>
19
20#include <linux/mei.h>
21
22
23#include "mei_dev.h"
24#include "hw-txe.h"
25
26static const struct pci_device_id mei_txe_pci_tbl[] = {
27 {PCI_VDEVICE(INTEL, 0x0F18)},
28 {PCI_VDEVICE(INTEL, 0x2298)},
29
30 {0, }
31};
32MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
33
34#ifdef CONFIG_PM
35static inline void mei_txe_set_pm_domain(struct mei_device *dev);
36static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
37#else
38static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
39static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
40#endif
41
42
43
44
45
46
47
48
49
50static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
51{
52 struct mei_device *dev;
53 struct mei_txe_hw *hw;
54 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
55 int err;
56
57
58 err = pcim_enable_device(pdev);
59 if (err) {
60 dev_err(&pdev->dev, "failed to enable pci device.\n");
61 goto end;
62 }
63
64 pci_set_master(pdev);
65
66 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
67 if (err) {
68 dev_err(&pdev->dev, "failed to get pci regions.\n");
69 goto end;
70 }
71
72 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
73 if (err) {
74 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
75 if (err) {
76 dev_err(&pdev->dev, "No suitable DMA available.\n");
77 goto end;
78 }
79 }
80
81
82 dev = mei_txe_dev_init(pdev);
83 if (!dev) {
84 err = -ENOMEM;
85 goto end;
86 }
87 hw = to_txe_hw(dev);
88 hw->mem_addr = pcim_iomap_table(pdev);
89
90 pci_enable_msi(pdev);
91
92
93 mei_clear_interrupts(dev);
94
95
96 if (pci_dev_msi_enabled(pdev))
97 err = request_threaded_irq(pdev->irq,
98 NULL,
99 mei_txe_irq_thread_handler,
100 IRQF_ONESHOT, KBUILD_MODNAME, dev);
101 else
102 err = request_threaded_irq(pdev->irq,
103 mei_txe_irq_quick_handler,
104 mei_txe_irq_thread_handler,
105 IRQF_SHARED, KBUILD_MODNAME, dev);
106 if (err) {
107 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
108 pdev->irq);
109 goto end;
110 }
111
112 if (mei_start(dev)) {
113 dev_err(&pdev->dev, "init hw failure.\n");
114 err = -ENODEV;
115 goto release_irq;
116 }
117
118 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
119 pm_runtime_use_autosuspend(&pdev->dev);
120
121 err = mei_register(dev, &pdev->dev);
122 if (err)
123 goto stop;
124
125 pci_set_drvdata(pdev, dev);
126
127
128
129
130
131 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
132
133
134
135
136
137
138
139
140
141 mei_txe_set_pm_domain(dev);
142
143 pm_runtime_put_noidle(&pdev->dev);
144
145 return 0;
146
147stop:
148 mei_stop(dev);
149release_irq:
150 mei_cancel_work(dev);
151 mei_disable_interrupts(dev);
152 free_irq(pdev->irq, dev);
153end:
154 dev_err(&pdev->dev, "initialization failed.\n");
155 return err;
156}
157
158
159
160
161
162
163
164
165
166
167static void mei_txe_shutdown(struct pci_dev *pdev)
168{
169 struct mei_device *dev;
170
171 dev = pci_get_drvdata(pdev);
172 if (!dev)
173 return;
174
175 dev_dbg(&pdev->dev, "shutdown\n");
176 mei_stop(dev);
177
178 mei_txe_unset_pm_domain(dev);
179
180 mei_disable_interrupts(dev);
181 free_irq(pdev->irq, dev);
182}
183
184
185
186
187
188
189
190
191
192static void mei_txe_remove(struct pci_dev *pdev)
193{
194 struct mei_device *dev;
195
196 dev = pci_get_drvdata(pdev);
197 if (!dev) {
198 dev_err(&pdev->dev, "mei: dev == NULL\n");
199 return;
200 }
201
202 pm_runtime_get_noresume(&pdev->dev);
203
204 mei_stop(dev);
205
206 mei_txe_unset_pm_domain(dev);
207
208 mei_disable_interrupts(dev);
209 free_irq(pdev->irq, dev);
210
211 mei_deregister(dev);
212}
213
214
215#ifdef CONFIG_PM_SLEEP
216static int mei_txe_pci_suspend(struct device *device)
217{
218 struct pci_dev *pdev = to_pci_dev(device);
219 struct mei_device *dev = pci_get_drvdata(pdev);
220
221 if (!dev)
222 return -ENODEV;
223
224 dev_dbg(&pdev->dev, "suspend\n");
225
226 mei_stop(dev);
227
228 mei_disable_interrupts(dev);
229
230 free_irq(pdev->irq, dev);
231 pci_disable_msi(pdev);
232
233 return 0;
234}
235
236static int mei_txe_pci_resume(struct device *device)
237{
238 struct pci_dev *pdev = to_pci_dev(device);
239 struct mei_device *dev;
240 int err;
241
242 dev = pci_get_drvdata(pdev);
243 if (!dev)
244 return -ENODEV;
245
246 pci_enable_msi(pdev);
247
248 mei_clear_interrupts(dev);
249
250
251 if (pci_dev_msi_enabled(pdev))
252 err = request_threaded_irq(pdev->irq,
253 NULL,
254 mei_txe_irq_thread_handler,
255 IRQF_ONESHOT, KBUILD_MODNAME, dev);
256 else
257 err = request_threaded_irq(pdev->irq,
258 mei_txe_irq_quick_handler,
259 mei_txe_irq_thread_handler,
260 IRQF_SHARED, KBUILD_MODNAME, dev);
261 if (err) {
262 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
263 pdev->irq);
264 return err;
265 }
266
267 err = mei_restart(dev);
268
269 return err;
270}
271#endif
272
273#ifdef CONFIG_PM
274static int mei_txe_pm_runtime_idle(struct device *device)
275{
276 struct mei_device *dev;
277
278 dev_dbg(device, "rpm: txe: runtime_idle\n");
279
280 dev = dev_get_drvdata(device);
281 if (!dev)
282 return -ENODEV;
283 if (mei_write_is_idle(dev))
284 pm_runtime_autosuspend(device);
285
286 return -EBUSY;
287}
288static int mei_txe_pm_runtime_suspend(struct device *device)
289{
290 struct mei_device *dev;
291 int ret;
292
293 dev_dbg(device, "rpm: txe: runtime suspend\n");
294
295 dev = dev_get_drvdata(device);
296 if (!dev)
297 return -ENODEV;
298
299 mutex_lock(&dev->device_lock);
300
301 if (mei_write_is_idle(dev))
302 ret = mei_txe_aliveness_set_sync(dev, 0);
303 else
304 ret = -EAGAIN;
305
306
307
308 dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
309
310 mutex_unlock(&dev->device_lock);
311
312 if (ret && ret != -EAGAIN)
313 schedule_work(&dev->reset_work);
314
315 return ret;
316}
317
318static int mei_txe_pm_runtime_resume(struct device *device)
319{
320 struct mei_device *dev;
321 int ret;
322
323 dev_dbg(device, "rpm: txe: runtime resume\n");
324
325 dev = dev_get_drvdata(device);
326 if (!dev)
327 return -ENODEV;
328
329 mutex_lock(&dev->device_lock);
330
331 mei_enable_interrupts(dev);
332
333 ret = mei_txe_aliveness_set_sync(dev, 1);
334
335 mutex_unlock(&dev->device_lock);
336
337 dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
338
339 if (ret)
340 schedule_work(&dev->reset_work);
341
342 return ret;
343}
344
345
346
347
348
349
350static inline void mei_txe_set_pm_domain(struct mei_device *dev)
351{
352 struct pci_dev *pdev = to_pci_dev(dev->dev);
353
354 if (pdev->dev.bus && pdev->dev.bus->pm) {
355 dev->pg_domain.ops = *pdev->dev.bus->pm;
356
357 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
358 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
359 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
360
361 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
362 }
363}
364
365
366
367
368
369
370static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
371{
372
373 dev_pm_domain_set(dev->dev, NULL);
374}
375
376static const struct dev_pm_ops mei_txe_pm_ops = {
377 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
378 mei_txe_pci_resume)
379 SET_RUNTIME_PM_OPS(
380 mei_txe_pm_runtime_suspend,
381 mei_txe_pm_runtime_resume,
382 mei_txe_pm_runtime_idle)
383};
384
385#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
386#else
387#define MEI_TXE_PM_OPS NULL
388#endif
389
390
391
392
393static struct pci_driver mei_txe_driver = {
394 .name = KBUILD_MODNAME,
395 .id_table = mei_txe_pci_tbl,
396 .probe = mei_txe_probe,
397 .remove = mei_txe_remove,
398 .shutdown = mei_txe_shutdown,
399 .driver.pm = MEI_TXE_PM_OPS,
400};
401
402module_pci_driver(mei_txe_driver);
403
404MODULE_AUTHOR("Intel Corporation");
405MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
406MODULE_LICENSE("GPL v2");
407