1
2
3
4
5
6
7
8
9
10
11
12
13#include "ufshcd.h"
14#include <linux/pci.h>
15#include <linux/pm_runtime.h>
16#include <linux/pm_qos.h>
17#include <linux/debugfs.h>
18
19struct intel_host {
20 u32 active_ltr;
21 u32 idle_ltr;
22 struct dentry *debugfs_root;
23};
24
25static int ufs_intel_disable_lcc(struct ufs_hba *hba)
26{
27 u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
28 u32 lcc_enable = 0;
29
30 ufshcd_dme_get(hba, attr, &lcc_enable);
31 if (lcc_enable)
32 ufshcd_disable_host_tx_lcc(hba);
33
34 return 0;
35}
36
37static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
38 enum ufs_notify_change_status status)
39{
40 int err = 0;
41
42 switch (status) {
43 case PRE_CHANGE:
44 err = ufs_intel_disable_lcc(hba);
45 break;
46 case POST_CHANGE:
47 break;
48 default:
49 break;
50 }
51
52 return err;
53}
54
55#define INTEL_ACTIVELTR 0x804
56#define INTEL_IDLELTR 0x808
57
58#define INTEL_LTR_REQ BIT(15)
59#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
60#define INTEL_LTR_SCALE_1US (2 << 10)
61#define INTEL_LTR_SCALE_32US (3 << 10)
62#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
63
64static void intel_cache_ltr(struct ufs_hba *hba)
65{
66 struct intel_host *host = ufshcd_get_variant(hba);
67
68 host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
69 host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
70}
71
72static void intel_ltr_set(struct device *dev, s32 val)
73{
74 struct ufs_hba *hba = dev_get_drvdata(dev);
75 struct intel_host *host = ufshcd_get_variant(hba);
76 u32 ltr;
77
78 pm_runtime_get_sync(dev);
79
80
81
82
83
84
85 ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
86
87 if (val == PM_QOS_LATENCY_ANY || val < 0) {
88 ltr &= ~INTEL_LTR_REQ;
89 } else {
90 ltr |= INTEL_LTR_REQ;
91 ltr &= ~INTEL_LTR_SCALE_MASK;
92 ltr &= ~INTEL_LTR_VALUE_MASK;
93
94 if (val > INTEL_LTR_VALUE_MASK) {
95 val >>= 5;
96 if (val > INTEL_LTR_VALUE_MASK)
97 val = INTEL_LTR_VALUE_MASK;
98 ltr |= INTEL_LTR_SCALE_32US | val;
99 } else {
100 ltr |= INTEL_LTR_SCALE_1US | val;
101 }
102 }
103
104 if (ltr == host->active_ltr)
105 goto out;
106
107 writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
108 writel(ltr, hba->mmio_base + INTEL_IDLELTR);
109
110
111 intel_cache_ltr(hba);
112out:
113 pm_runtime_put(dev);
114}
115
116static void intel_ltr_expose(struct device *dev)
117{
118 dev->power.set_latency_tolerance = intel_ltr_set;
119 dev_pm_qos_expose_latency_tolerance(dev);
120}
121
122static void intel_ltr_hide(struct device *dev)
123{
124 dev_pm_qos_hide_latency_tolerance(dev);
125 dev->power.set_latency_tolerance = NULL;
126}
127
128static void intel_add_debugfs(struct ufs_hba *hba)
129{
130 struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
131 struct intel_host *host = ufshcd_get_variant(hba);
132
133 intel_cache_ltr(hba);
134
135 host->debugfs_root = dir;
136 debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
137 debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
138}
139
140static void intel_remove_debugfs(struct ufs_hba *hba)
141{
142 struct intel_host *host = ufshcd_get_variant(hba);
143
144 debugfs_remove_recursive(host->debugfs_root);
145}
146
147static int ufs_intel_common_init(struct ufs_hba *hba)
148{
149 struct intel_host *host;
150
151 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
152
153 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
154 if (!host)
155 return -ENOMEM;
156 ufshcd_set_variant(hba, host);
157 intel_ltr_expose(hba->dev);
158 intel_add_debugfs(hba);
159 return 0;
160}
161
162static void ufs_intel_common_exit(struct ufs_hba *hba)
163{
164 intel_remove_debugfs(hba);
165 intel_ltr_hide(hba->dev);
166}
167
168static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
169{
170
171
172
173
174
175 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
176 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
177 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
178 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
179 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
180 REG_UTP_TASK_REQ_LIST_BASE_L);
181 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
182 REG_UTP_TASK_REQ_LIST_BASE_H);
183
184 if (ufshcd_is_link_hibern8(hba)) {
185 int ret = ufshcd_uic_hibern8_exit(hba);
186
187 if (!ret) {
188 ufshcd_set_link_active(hba);
189 } else {
190 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
191 __func__, ret);
192
193
194
195
196 ufshcd_set_link_off(hba);
197 }
198 }
199
200 return 0;
201}
202
203static int ufs_intel_ehl_init(struct ufs_hba *hba)
204{
205 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
206 return ufs_intel_common_init(hba);
207}
208
209static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
210 .name = "intel-pci",
211 .init = ufs_intel_common_init,
212 .exit = ufs_intel_common_exit,
213 .link_startup_notify = ufs_intel_link_startup_notify,
214 .resume = ufs_intel_resume,
215};
216
217static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
218 .name = "intel-pci",
219 .init = ufs_intel_ehl_init,
220 .exit = ufs_intel_common_exit,
221 .link_startup_notify = ufs_intel_link_startup_notify,
222 .resume = ufs_intel_resume,
223};
224
225#ifdef CONFIG_PM_SLEEP
226
227
228
229
230
231
232
233static int ufshcd_pci_suspend(struct device *dev)
234{
235 return ufshcd_system_suspend(dev_get_drvdata(dev));
236}
237
238
239
240
241
242
243
244
245static int ufshcd_pci_resume(struct device *dev)
246{
247 return ufshcd_system_resume(dev_get_drvdata(dev));
248}
249
250
251
252
253
254
255
256
257static int ufshcd_pci_poweroff(struct device *dev)
258{
259 struct ufs_hba *hba = dev_get_drvdata(dev);
260 int spm_lvl = hba->spm_lvl;
261 int ret;
262
263
264
265
266
267 hba->spm_lvl = 5;
268 ret = ufshcd_system_suspend(hba);
269 hba->spm_lvl = spm_lvl;
270 return ret;
271}
272
273#endif
274
275#ifdef CONFIG_PM
276static int ufshcd_pci_runtime_suspend(struct device *dev)
277{
278 return ufshcd_runtime_suspend(dev_get_drvdata(dev));
279}
280static int ufshcd_pci_runtime_resume(struct device *dev)
281{
282 return ufshcd_runtime_resume(dev_get_drvdata(dev));
283}
284static int ufshcd_pci_runtime_idle(struct device *dev)
285{
286 return ufshcd_runtime_idle(dev_get_drvdata(dev));
287}
288#endif
289
290
291
292
293
294static void ufshcd_pci_shutdown(struct pci_dev *pdev)
295{
296 ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
297}
298
299
300
301
302
303
304static void ufshcd_pci_remove(struct pci_dev *pdev)
305{
306 struct ufs_hba *hba = pci_get_drvdata(pdev);
307
308 pm_runtime_forbid(&pdev->dev);
309 pm_runtime_get_noresume(&pdev->dev);
310 ufshcd_remove(hba);
311 ufshcd_dealloc_host(hba);
312}
313
314
315
316
317
318
319
320
321static int
322ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
323{
324 struct ufs_hba *hba;
325 void __iomem *mmio_base;
326 int err;
327
328 err = pcim_enable_device(pdev);
329 if (err) {
330 dev_err(&pdev->dev, "pcim_enable_device failed\n");
331 return err;
332 }
333
334 pci_set_master(pdev);
335
336 err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
337 if (err < 0) {
338 dev_err(&pdev->dev, "request and iomap failed\n");
339 return err;
340 }
341
342 mmio_base = pcim_iomap_table(pdev)[0];
343
344 err = ufshcd_alloc_host(&pdev->dev, &hba);
345 if (err) {
346 dev_err(&pdev->dev, "Allocation failed\n");
347 return err;
348 }
349
350 pci_set_drvdata(pdev, hba);
351
352 hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
353
354 err = ufshcd_init(hba, mmio_base, pdev->irq);
355 if (err) {
356 dev_err(&pdev->dev, "Initialization failed\n");
357 ufshcd_dealloc_host(hba);
358 return err;
359 }
360
361 pm_runtime_put_noidle(&pdev->dev);
362 pm_runtime_allow(&pdev->dev);
363
364 return 0;
365}
366
367static const struct dev_pm_ops ufshcd_pci_pm_ops = {
368#ifdef CONFIG_PM_SLEEP
369 .suspend = ufshcd_pci_suspend,
370 .resume = ufshcd_pci_resume,
371 .freeze = ufshcd_pci_suspend,
372 .thaw = ufshcd_pci_resume,
373 .poweroff = ufshcd_pci_poweroff,
374 .restore = ufshcd_pci_resume,
375#endif
376 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
377 ufshcd_pci_runtime_resume,
378 ufshcd_pci_runtime_idle)
379};
380
381static const struct pci_device_id ufshcd_pci_tbl[] = {
382 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
383 { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
384 { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
385 { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
386 { }
387};
388
389MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
390
391static struct pci_driver ufshcd_pci_driver = {
392 .name = UFSHCD,
393 .id_table = ufshcd_pci_tbl,
394 .probe = ufshcd_pci_probe,
395 .remove = ufshcd_pci_remove,
396 .shutdown = ufshcd_pci_shutdown,
397 .driver = {
398 .pm = &ufshcd_pci_pm_ops
399 },
400};
401
402module_pci_driver(ufshcd_pci_driver);
403
404MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
405MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
406MODULE_DESCRIPTION("UFS host controller PCI glue driver");
407MODULE_LICENSE("GPL");
408MODULE_VERSION(UFSHCD_DRIVER_VERSION);
409