1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/of_device.h>
10#include <linux/delay.h>
11#include <linux/mmc/mmc.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_opp.h>
14#include <linux/slab.h>
15#include <linux/iopoll.h>
16#include <linux/regulator/consumer.h>
17
18#include "sdhci-pltfm.h"
19#include "cqhci.h"
20
21#define CORE_MCI_VERSION 0x50
22#define CORE_VERSION_MAJOR_SHIFT 28
23#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
24#define CORE_VERSION_MINOR_MASK 0xff
25
26#define CORE_MCI_GENERICS 0x70
27#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
28
29#define HC_MODE_EN 0x1
30#define CORE_POWER 0x0
31#define CORE_SW_RST BIT(7)
32#define FF_CLK_SW_RST_DIS BIT(13)
33
34#define CORE_PWRCTL_BUS_OFF BIT(0)
35#define CORE_PWRCTL_BUS_ON BIT(1)
36#define CORE_PWRCTL_IO_LOW BIT(2)
37#define CORE_PWRCTL_IO_HIGH BIT(3)
38#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
39#define CORE_PWRCTL_IO_SUCCESS BIT(2)
40#define REQ_BUS_OFF BIT(0)
41#define REQ_BUS_ON BIT(1)
42#define REQ_IO_LOW BIT(2)
43#define REQ_IO_HIGH BIT(3)
44#define INT_MASK 0xf
45#define MAX_PHASES 16
46#define CORE_DLL_LOCK BIT(7)
47#define CORE_DDR_DLL_LOCK BIT(11)
48#define CORE_DLL_EN BIT(16)
49#define CORE_CDR_EN BIT(17)
50#define CORE_CK_OUT_EN BIT(18)
51#define CORE_CDR_EXT_EN BIT(19)
52#define CORE_DLL_PDN BIT(29)
53#define CORE_DLL_RST BIT(30)
54#define CORE_CMD_DAT_TRACK_SEL BIT(0)
55
56#define CORE_DDR_CAL_EN BIT(0)
57#define CORE_FLL_CYCLE_CNT BIT(18)
58#define CORE_DLL_CLOCK_DISABLE BIT(21)
59
60#define DLL_USR_CTL_POR_VAL 0x10800
61#define ENABLE_DLL_LOCK_STATUS BIT(26)
62#define FINE_TUNE_MODE_EN BIT(27)
63#define BIAS_OK_SIGNAL BIT(29)
64
65#define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
66#define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
67
68#define CORE_VENDOR_SPEC_POR_VAL 0xa9c
69#define CORE_CLK_PWRSAVE BIT(1)
70#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
71#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
72#define CORE_HC_MCLK_SEL_MASK (3 << 8)
73#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
74#define CORE_IO_PAD_PWR_SWITCH BIT(16)
75#define CORE_HC_SELECT_IN_EN BIT(18)
76#define CORE_HC_SELECT_IN_HS400 (6 << 19)
77#define CORE_HC_SELECT_IN_MASK (7 << 19)
78
79#define CORE_3_0V_SUPPORT BIT(25)
80#define CORE_1_8V_SUPPORT BIT(26)
81#define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
82
83#define CORE_CSR_CDC_CTLR_CFG0 0x130
84#define CORE_SW_TRIG_FULL_CALIB BIT(16)
85#define CORE_HW_AUTOCAL_ENA BIT(17)
86
87#define CORE_CSR_CDC_CTLR_CFG1 0x134
88#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
89#define CORE_TIMER_ENA BIT(16)
90
91#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
92#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
93#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
94#define CORE_CDC_OFFSET_CFG 0x14C
95#define CORE_CSR_CDC_DELAY_CFG 0x150
96#define CORE_CDC_SLAVE_DDA_CFG 0x160
97#define CORE_CSR_CDC_STATUS0 0x164
98#define CORE_CALIBRATION_DONE BIT(0)
99
100#define CORE_CDC_ERROR_CODE_MASK 0x7000000
101
102#define CORE_CSR_CDC_GEN_CFG 0x178
103#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
104#define CORE_CDC_SWITCH_RC_EN BIT(1)
105
106#define CORE_CDC_T4_DLY_SEL BIT(0)
107#define CORE_CMDIN_RCLK_EN BIT(1)
108#define CORE_START_CDC_TRAFFIC BIT(6)
109
110#define CORE_PWRSAVE_DLL BIT(3)
111
112#define DDR_CONFIG_POR_VAL 0x80040873
113
114
115#define INVALID_TUNING_PHASE -1
116#define SDHCI_MSM_MIN_CLOCK 400000
117#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
118
119#define CDR_SELEXT_SHIFT 20
120#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
121#define CMUX_SHIFT_PHASE_SHIFT 24
122#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
123
124#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
125
126
127#define MSM_PWR_IRQ_TIMEOUT_MS 5000
128
129#define msm_host_readl(msm_host, host, offset) \
130 msm_host->var_ops->msm_readl_relaxed(host, offset)
131
132#define msm_host_writel(msm_host, val, host, offset) \
133 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
134
135
136#define CQHCI_VENDOR_CFG1 0xA00
137#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
138
139struct sdhci_msm_offset {
140 u32 core_hc_mode;
141 u32 core_mci_data_cnt;
142 u32 core_mci_status;
143 u32 core_mci_fifo_cnt;
144 u32 core_mci_version;
145 u32 core_generics;
146 u32 core_testbus_config;
147 u32 core_testbus_sel2_bit;
148 u32 core_testbus_ena;
149 u32 core_testbus_sel2;
150 u32 core_pwrctl_status;
151 u32 core_pwrctl_mask;
152 u32 core_pwrctl_clear;
153 u32 core_pwrctl_ctl;
154 u32 core_sdcc_debug_reg;
155 u32 core_dll_config;
156 u32 core_dll_status;
157 u32 core_vendor_spec;
158 u32 core_vendor_spec_adma_err_addr0;
159 u32 core_vendor_spec_adma_err_addr1;
160 u32 core_vendor_spec_func2;
161 u32 core_vendor_spec_capabilities0;
162 u32 core_ddr_200_cfg;
163 u32 core_vendor_spec3;
164 u32 core_dll_config_2;
165 u32 core_dll_config_3;
166 u32 core_ddr_config_old;
167 u32 core_ddr_config;
168 u32 core_dll_usr_ctl;
169};
170
171static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
172 .core_mci_data_cnt = 0x35c,
173 .core_mci_status = 0x324,
174 .core_mci_fifo_cnt = 0x308,
175 .core_mci_version = 0x318,
176 .core_generics = 0x320,
177 .core_testbus_config = 0x32c,
178 .core_testbus_sel2_bit = 3,
179 .core_testbus_ena = (1 << 31),
180 .core_testbus_sel2 = (1 << 3),
181 .core_pwrctl_status = 0x240,
182 .core_pwrctl_mask = 0x244,
183 .core_pwrctl_clear = 0x248,
184 .core_pwrctl_ctl = 0x24c,
185 .core_sdcc_debug_reg = 0x358,
186 .core_dll_config = 0x200,
187 .core_dll_status = 0x208,
188 .core_vendor_spec = 0x20c,
189 .core_vendor_spec_adma_err_addr0 = 0x214,
190 .core_vendor_spec_adma_err_addr1 = 0x218,
191 .core_vendor_spec_func2 = 0x210,
192 .core_vendor_spec_capabilities0 = 0x21c,
193 .core_ddr_200_cfg = 0x224,
194 .core_vendor_spec3 = 0x250,
195 .core_dll_config_2 = 0x254,
196 .core_dll_config_3 = 0x258,
197 .core_ddr_config = 0x25c,
198 .core_dll_usr_ctl = 0x388,
199};
200
201static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
202 .core_hc_mode = 0x78,
203 .core_mci_data_cnt = 0x30,
204 .core_mci_status = 0x34,
205 .core_mci_fifo_cnt = 0x44,
206 .core_mci_version = 0x050,
207 .core_generics = 0x70,
208 .core_testbus_config = 0x0cc,
209 .core_testbus_sel2_bit = 4,
210 .core_testbus_ena = (1 << 3),
211 .core_testbus_sel2 = (1 << 4),
212 .core_pwrctl_status = 0xdc,
213 .core_pwrctl_mask = 0xe0,
214 .core_pwrctl_clear = 0xe4,
215 .core_pwrctl_ctl = 0xe8,
216 .core_sdcc_debug_reg = 0x124,
217 .core_dll_config = 0x100,
218 .core_dll_status = 0x108,
219 .core_vendor_spec = 0x10c,
220 .core_vendor_spec_adma_err_addr0 = 0x114,
221 .core_vendor_spec_adma_err_addr1 = 0x118,
222 .core_vendor_spec_func2 = 0x110,
223 .core_vendor_spec_capabilities0 = 0x11c,
224 .core_ddr_200_cfg = 0x184,
225 .core_vendor_spec3 = 0x1b0,
226 .core_dll_config_2 = 0x1b4,
227 .core_ddr_config_old = 0x1b8,
228 .core_ddr_config = 0x1bc,
229};
230
231struct sdhci_msm_variant_ops {
232 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
233 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
234 u32 offset);
235};
236
237
238
239
240
241struct sdhci_msm_variant_info {
242 bool mci_removed;
243 bool restore_dll_config;
244 bool uses_tassadar_dll;
245 const struct sdhci_msm_variant_ops *var_ops;
246 const struct sdhci_msm_offset *offset;
247};
248
249struct sdhci_msm_host {
250 struct platform_device *pdev;
251 void __iomem *core_mem;
252 int pwr_irq;
253 struct clk *bus_clk;
254 struct clk *xo_clk;
255 struct clk_bulk_data bulk_clks[4];
256 unsigned long clk_rate;
257 struct mmc_host *mmc;
258 struct opp_table *opp_table;
259 bool has_opp_table;
260 bool use_14lpp_dll_reset;
261 bool tuning_done;
262 bool calibration_done;
263 u8 saved_tuning_phase;
264 bool use_cdclp533;
265 u32 curr_pwr_state;
266 u32 curr_io_level;
267 wait_queue_head_t pwr_irq_wait;
268 bool pwr_irq_flag;
269 u32 caps_0;
270 bool mci_removed;
271 bool restore_dll_config;
272 const struct sdhci_msm_variant_ops *var_ops;
273 const struct sdhci_msm_offset *offset;
274 bool use_cdr;
275 u32 transfer_mode;
276 bool updated_ddr_cfg;
277 bool uses_tassadar_dll;
278 u32 dll_config;
279 u32 ddr_config;
280};
281
282static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
283{
284 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
285 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
286
287 return msm_host->offset;
288}
289
290
291
292
293
294static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
295 u32 offset)
296{
297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
298 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
299
300 return readl_relaxed(msm_host->core_mem + offset);
301}
302
303static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
304 u32 offset)
305{
306 return readl_relaxed(host->ioaddr + offset);
307}
308
309static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
310 struct sdhci_host *host, u32 offset)
311{
312 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
313 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
314
315 writel_relaxed(val, msm_host->core_mem + offset);
316}
317
318static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
319 struct sdhci_host *host, u32 offset)
320{
321 writel_relaxed(val, host->ioaddr + offset);
322}
323
324static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
325 unsigned int clock)
326{
327 struct mmc_ios ios = host->mmc->ios;
328
329
330
331
332
333
334 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
335 ios.timing == MMC_TIMING_MMC_DDR52 ||
336 ios.timing == MMC_TIMING_MMC_HS400 ||
337 host->flags & SDHCI_HS400_TUNING)
338 clock *= 2;
339 return clock;
340}
341
342static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
343 unsigned int clock)
344{
345 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
346 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
347 struct mmc_ios curr_ios = host->mmc->ios;
348 struct clk *core_clk = msm_host->bulk_clks[0].clk;
349 int rc;
350
351 clock = msm_get_clock_rate_for_bus_mode(host, clock);
352 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
353 if (rc) {
354 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
355 mmc_hostname(host->mmc), clock,
356 curr_ios.timing);
357 return;
358 }
359 msm_host->clk_rate = clock;
360 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
361 mmc_hostname(host->mmc), clk_get_rate(core_clk),
362 curr_ios.timing);
363}
364
365
366static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
367{
368 u32 wait_cnt = 50;
369 u8 ck_out_en;
370 struct mmc_host *mmc = host->mmc;
371 const struct sdhci_msm_offset *msm_offset =
372 sdhci_priv_msm_offset(host);
373
374
375 ck_out_en = !!(readl_relaxed(host->ioaddr +
376 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
377
378 while (ck_out_en != poll) {
379 if (--wait_cnt == 0) {
380 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
381 mmc_hostname(mmc), poll);
382 return -ETIMEDOUT;
383 }
384 udelay(1);
385
386 ck_out_en = !!(readl_relaxed(host->ioaddr +
387 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
388 }
389
390 return 0;
391}
392
393static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
394{
395 int rc;
396 static const u8 grey_coded_phase_table[] = {
397 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
398 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
399 };
400 unsigned long flags;
401 u32 config;
402 struct mmc_host *mmc = host->mmc;
403 const struct sdhci_msm_offset *msm_offset =
404 sdhci_priv_msm_offset(host);
405
406 if (phase > 0xf)
407 return -EINVAL;
408
409 spin_lock_irqsave(&host->lock, flags);
410
411 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
412 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
413 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
414 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
415
416
417 rc = msm_dll_poll_ck_out_en(host, 0);
418 if (rc)
419 goto err_out;
420
421
422
423
424
425 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
426 config &= ~CDR_SELEXT_MASK;
427 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
428 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
429
430 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
431 config |= CORE_CK_OUT_EN;
432 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
433
434
435 rc = msm_dll_poll_ck_out_en(host, 1);
436 if (rc)
437 goto err_out;
438
439 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
440 config |= CORE_CDR_EN;
441 config &= ~CORE_CDR_EXT_EN;
442 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
443 goto out;
444
445err_out:
446 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
447 mmc_hostname(mmc), phase);
448out:
449 spin_unlock_irqrestore(&host->lock, flags);
450 return rc;
451}
452
453
454
455
456
457
458
459
460
461
462
463static int msm_find_most_appropriate_phase(struct sdhci_host *host,
464 u8 *phase_table, u8 total_phases)
465{
466 int ret;
467 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
468 u8 phases_per_row[MAX_PHASES] = { 0 };
469 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
470 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
471 bool phase_0_found = false, phase_15_found = false;
472 struct mmc_host *mmc = host->mmc;
473
474 if (!total_phases || (total_phases > MAX_PHASES)) {
475 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
476 mmc_hostname(mmc), total_phases);
477 return -EINVAL;
478 }
479
480 for (cnt = 0; cnt < total_phases; cnt++) {
481 ranges[row_index][col_index] = phase_table[cnt];
482 phases_per_row[row_index] += 1;
483 col_index++;
484
485 if ((cnt + 1) == total_phases) {
486 continue;
487
488 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
489 row_index++;
490 col_index = 0;
491 }
492 }
493
494 if (row_index >= MAX_PHASES)
495 return -EINVAL;
496
497
498 if (!ranges[0][0]) {
499 phase_0_found = true;
500 phase_0_raw_index = 0;
501
502 for (cnt = 1; cnt <= row_index; cnt++) {
503 if (phases_per_row[cnt]) {
504 for (i = 0; i < phases_per_row[cnt]; i++) {
505 if (ranges[cnt][i] == 15) {
506 phase_15_found = true;
507 phase_15_raw_index = cnt;
508 break;
509 }
510 }
511 }
512 }
513 }
514
515
516 if (phase_0_found && phase_15_found) {
517
518 u8 phases_0 = phases_per_row[phase_0_raw_index];
519
520 u8 phases_15 = phases_per_row[phase_15_raw_index];
521
522 if (phases_0 + phases_15 >= MAX_PHASES)
523
524
525
526
527
528 return -EINVAL;
529
530
531 i = phases_15;
532 for (cnt = 0; cnt < phases_0; cnt++) {
533 ranges[phase_15_raw_index][i] =
534 ranges[phase_0_raw_index][cnt];
535 if (++i >= MAX_PHASES)
536 break;
537 }
538
539 phases_per_row[phase_0_raw_index] = 0;
540 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
541 }
542
543 for (cnt = 0; cnt <= row_index; cnt++) {
544 if (phases_per_row[cnt] > curr_max) {
545 curr_max = phases_per_row[cnt];
546 selected_row_index = cnt;
547 }
548 }
549
550 i = (curr_max * 3) / 4;
551 if (i)
552 i--;
553
554 ret = ranges[selected_row_index][i];
555
556 if (ret >= MAX_PHASES) {
557 ret = -EINVAL;
558 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
559 mmc_hostname(mmc), ret);
560 }
561
562 return ret;
563}
564
565static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
566{
567 u32 mclk_freq = 0, config;
568 const struct sdhci_msm_offset *msm_offset =
569 sdhci_priv_msm_offset(host);
570
571
572 if (host->clock <= 112000000)
573 mclk_freq = 0;
574 else if (host->clock <= 125000000)
575 mclk_freq = 1;
576 else if (host->clock <= 137000000)
577 mclk_freq = 2;
578 else if (host->clock <= 150000000)
579 mclk_freq = 3;
580 else if (host->clock <= 162000000)
581 mclk_freq = 4;
582 else if (host->clock <= 175000000)
583 mclk_freq = 5;
584 else if (host->clock <= 187000000)
585 mclk_freq = 6;
586 else if (host->clock <= 200000000)
587 mclk_freq = 7;
588
589 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
590 config &= ~CMUX_SHIFT_PHASE_MASK;
591 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
592 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
593}
594
595
596static int msm_init_cm_dll(struct sdhci_host *host)
597{
598 struct mmc_host *mmc = host->mmc;
599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
600 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
601 int wait_cnt = 50;
602 unsigned long flags, xo_clk = 0;
603 u32 config;
604 const struct sdhci_msm_offset *msm_offset =
605 msm_host->offset;
606
607 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
608 xo_clk = clk_get_rate(msm_host->xo_clk);
609
610 spin_lock_irqsave(&host->lock, flags);
611
612
613
614
615
616
617 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
618 config &= ~CORE_CLK_PWRSAVE;
619 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
620
621 if (msm_host->dll_config)
622 writel_relaxed(msm_host->dll_config,
623 host->ioaddr + msm_offset->core_dll_config);
624
625 if (msm_host->use_14lpp_dll_reset) {
626 config = readl_relaxed(host->ioaddr +
627 msm_offset->core_dll_config);
628 config &= ~CORE_CK_OUT_EN;
629 writel_relaxed(config, host->ioaddr +
630 msm_offset->core_dll_config);
631
632 config = readl_relaxed(host->ioaddr +
633 msm_offset->core_dll_config_2);
634 config |= CORE_DLL_CLOCK_DISABLE;
635 writel_relaxed(config, host->ioaddr +
636 msm_offset->core_dll_config_2);
637 }
638
639 config = readl_relaxed(host->ioaddr +
640 msm_offset->core_dll_config);
641 config |= CORE_DLL_RST;
642 writel_relaxed(config, host->ioaddr +
643 msm_offset->core_dll_config);
644
645 config = readl_relaxed(host->ioaddr +
646 msm_offset->core_dll_config);
647 config |= CORE_DLL_PDN;
648 writel_relaxed(config, host->ioaddr +
649 msm_offset->core_dll_config);
650
651 if (!msm_host->dll_config)
652 msm_cm_dll_set_freq(host);
653
654 if (msm_host->use_14lpp_dll_reset &&
655 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
656 u32 mclk_freq = 0;
657
658 config = readl_relaxed(host->ioaddr +
659 msm_offset->core_dll_config_2);
660 config &= CORE_FLL_CYCLE_CNT;
661 if (config)
662 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
663 xo_clk);
664 else
665 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
666 xo_clk);
667
668 config = readl_relaxed(host->ioaddr +
669 msm_offset->core_dll_config_2);
670 config &= ~(0xFF << 10);
671 config |= mclk_freq << 10;
672
673 writel_relaxed(config, host->ioaddr +
674 msm_offset->core_dll_config_2);
675
676 udelay(5);
677 }
678
679 config = readl_relaxed(host->ioaddr +
680 msm_offset->core_dll_config);
681 config &= ~CORE_DLL_RST;
682 writel_relaxed(config, host->ioaddr +
683 msm_offset->core_dll_config);
684
685 config = readl_relaxed(host->ioaddr +
686 msm_offset->core_dll_config);
687 config &= ~CORE_DLL_PDN;
688 writel_relaxed(config, host->ioaddr +
689 msm_offset->core_dll_config);
690
691 if (msm_host->use_14lpp_dll_reset) {
692 if (!msm_host->dll_config)
693 msm_cm_dll_set_freq(host);
694 config = readl_relaxed(host->ioaddr +
695 msm_offset->core_dll_config_2);
696 config &= ~CORE_DLL_CLOCK_DISABLE;
697 writel_relaxed(config, host->ioaddr +
698 msm_offset->core_dll_config_2);
699 }
700
701
702
703
704
705 if (msm_host->uses_tassadar_dll) {
706 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
707 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
708 writel_relaxed(config, host->ioaddr +
709 msm_offset->core_dll_usr_ctl);
710
711 config = readl_relaxed(host->ioaddr +
712 msm_offset->core_dll_config_3);
713 config &= ~0xFF;
714 if (msm_host->clk_rate < 150000000)
715 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
716 else
717 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
718 writel_relaxed(config, host->ioaddr +
719 msm_offset->core_dll_config_3);
720 }
721
722 config = readl_relaxed(host->ioaddr +
723 msm_offset->core_dll_config);
724 config |= CORE_DLL_EN;
725 writel_relaxed(config, host->ioaddr +
726 msm_offset->core_dll_config);
727
728 config = readl_relaxed(host->ioaddr +
729 msm_offset->core_dll_config);
730 config |= CORE_CK_OUT_EN;
731 writel_relaxed(config, host->ioaddr +
732 msm_offset->core_dll_config);
733
734
735 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
736 CORE_DLL_LOCK)) {
737
738 if (--wait_cnt == 0) {
739 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
740 mmc_hostname(mmc));
741 spin_unlock_irqrestore(&host->lock, flags);
742 return -ETIMEDOUT;
743 }
744 udelay(1);
745 }
746
747 spin_unlock_irqrestore(&host->lock, flags);
748 return 0;
749}
750
751static void msm_hc_select_default(struct sdhci_host *host)
752{
753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
754 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
755 u32 config;
756 const struct sdhci_msm_offset *msm_offset =
757 msm_host->offset;
758
759 if (!msm_host->use_cdclp533) {
760 config = readl_relaxed(host->ioaddr +
761 msm_offset->core_vendor_spec3);
762 config &= ~CORE_PWRSAVE_DLL;
763 writel_relaxed(config, host->ioaddr +
764 msm_offset->core_vendor_spec3);
765 }
766
767 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
768 config &= ~CORE_HC_MCLK_SEL_MASK;
769 config |= CORE_HC_MCLK_SEL_DFLT;
770 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
771
772
773
774
775
776
777
778
779 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
780 config &= ~CORE_HC_SELECT_IN_EN;
781 config &= ~CORE_HC_SELECT_IN_MASK;
782 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
783
784
785
786
787
788 wmb();
789}
790
791static void msm_hc_select_hs400(struct sdhci_host *host)
792{
793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
794 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
795 struct mmc_ios ios = host->mmc->ios;
796 u32 config, dll_lock;
797 int rc;
798 const struct sdhci_msm_offset *msm_offset =
799 msm_host->offset;
800
801
802 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
803 config &= ~CORE_HC_MCLK_SEL_MASK;
804 config |= CORE_HC_MCLK_SEL_HS400;
805
806 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
807
808
809
810
811 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
812 !msm_host->calibration_done) {
813 config = readl_relaxed(host->ioaddr +
814 msm_offset->core_vendor_spec);
815 config |= CORE_HC_SELECT_IN_HS400;
816 config |= CORE_HC_SELECT_IN_EN;
817 writel_relaxed(config, host->ioaddr +
818 msm_offset->core_vendor_spec);
819 }
820 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
821
822
823
824
825
826 rc = readl_relaxed_poll_timeout(host->ioaddr +
827 msm_offset->core_dll_status,
828 dll_lock,
829 (dll_lock &
830 (CORE_DLL_LOCK |
831 CORE_DDR_DLL_LOCK)), 10,
832 1000);
833 if (rc == -ETIMEDOUT)
834 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
835 mmc_hostname(host->mmc), dll_lock);
836 }
837
838
839
840
841 wmb();
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
863{
864 struct mmc_ios ios = host->mmc->ios;
865
866 if (ios.timing == MMC_TIMING_MMC_HS400 ||
867 host->flags & SDHCI_HS400_TUNING)
868 msm_hc_select_hs400(host);
869 else
870 msm_hc_select_default(host);
871}
872
873static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
874{
875 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
876 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
877 u32 config, calib_done;
878 int ret;
879 const struct sdhci_msm_offset *msm_offset =
880 msm_host->offset;
881
882 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
883
884
885
886
887
888 ret = msm_init_cm_dll(host);
889 if (ret)
890 goto out;
891
892
893 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
894 if (ret)
895 goto out;
896
897 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
898 config |= CORE_CMD_DAT_TRACK_SEL;
899 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
900
901 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
902 config &= ~CORE_CDC_T4_DLY_SEL;
903 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
904
905 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
906 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
907 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
908
909 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
910 config |= CORE_CDC_SWITCH_RC_EN;
911 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
912
913 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
914 config &= ~CORE_START_CDC_TRAFFIC;
915 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
916
917
918
919 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
920 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
921 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
922 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
923 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
924 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
925 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
926 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
927 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
928
929
930
931 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
932 config |= CORE_SW_TRIG_FULL_CALIB;
933 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
934
935 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
936 config &= ~CORE_SW_TRIG_FULL_CALIB;
937 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
938
939 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
940 config |= CORE_HW_AUTOCAL_ENA;
941 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
942
943 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
944 config |= CORE_TIMER_ENA;
945 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
946
947 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
948 calib_done,
949 (calib_done & CORE_CALIBRATION_DONE),
950 1, 50);
951
952 if (ret == -ETIMEDOUT) {
953 pr_err("%s: %s: CDC calibration was not completed\n",
954 mmc_hostname(host->mmc), __func__);
955 goto out;
956 }
957
958 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
959 & CORE_CDC_ERROR_CODE_MASK;
960 if (ret) {
961 pr_err("%s: %s: CDC error code %d\n",
962 mmc_hostname(host->mmc), __func__, ret);
963 ret = -EINVAL;
964 goto out;
965 }
966
967 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
968 config |= CORE_START_CDC_TRAFFIC;
969 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
970out:
971 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
972 __func__, ret);
973 return ret;
974}
975
976static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
977{
978 struct mmc_host *mmc = host->mmc;
979 u32 dll_status, config, ddr_cfg_offset;
980 int ret;
981 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
982 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
983 const struct sdhci_msm_offset *msm_offset =
984 sdhci_priv_msm_offset(host);
985
986 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
987
988
989
990
991
992
993
994
995 if (msm_host->updated_ddr_cfg)
996 ddr_cfg_offset = msm_offset->core_ddr_config;
997 else
998 ddr_cfg_offset = msm_offset->core_ddr_config_old;
999 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
1000
1001 if (mmc->ios.enhanced_strobe) {
1002 config = readl_relaxed(host->ioaddr +
1003 msm_offset->core_ddr_200_cfg);
1004 config |= CORE_CMDIN_RCLK_EN;
1005 writel_relaxed(config, host->ioaddr +
1006 msm_offset->core_ddr_200_cfg);
1007 }
1008
1009 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1010 config |= CORE_DDR_CAL_EN;
1011 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1012
1013 ret = readl_relaxed_poll_timeout(host->ioaddr +
1014 msm_offset->core_dll_status,
1015 dll_status,
1016 (dll_status & CORE_DDR_DLL_LOCK),
1017 10, 1000);
1018
1019 if (ret == -ETIMEDOUT) {
1020 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1021 mmc_hostname(host->mmc), __func__);
1022 goto out;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 if (!msm_host->use_14lpp_dll_reset) {
1034 config = readl_relaxed(host->ioaddr +
1035 msm_offset->core_vendor_spec3);
1036 config |= CORE_PWRSAVE_DLL;
1037 writel_relaxed(config, host->ioaddr +
1038 msm_offset->core_vendor_spec3);
1039 }
1040
1041
1042
1043
1044
1045 wmb();
1046out:
1047 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1048 __func__, ret);
1049 return ret;
1050}
1051
1052static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1053{
1054 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1055 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1056 struct mmc_host *mmc = host->mmc;
1057 int ret;
1058 u32 config;
1059 const struct sdhci_msm_offset *msm_offset =
1060 msm_host->offset;
1061
1062 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1063
1064
1065
1066
1067
1068 ret = msm_init_cm_dll(host);
1069 if (ret)
1070 goto out;
1071
1072 if (!mmc->ios.enhanced_strobe) {
1073
1074 ret = msm_config_cm_dll_phase(host,
1075 msm_host->saved_tuning_phase);
1076 if (ret)
1077 goto out;
1078 config = readl_relaxed(host->ioaddr +
1079 msm_offset->core_dll_config);
1080 config |= CORE_CMD_DAT_TRACK_SEL;
1081 writel_relaxed(config, host->ioaddr +
1082 msm_offset->core_dll_config);
1083 }
1084
1085 if (msm_host->use_cdclp533)
1086 ret = sdhci_msm_cdclp533_calibration(host);
1087 else
1088 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1089out:
1090 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1091 __func__, ret);
1092 return ret;
1093}
1094
1095static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1096{
1097 struct mmc_ios *ios = &host->mmc->ios;
1098
1099
1100
1101
1102
1103 if (host->clock <= CORE_FREQ_100MHZ ||
1104 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1105 ios->timing == MMC_TIMING_MMC_HS200 ||
1106 ios->timing == MMC_TIMING_UHS_SDR104) ||
1107 ios->enhanced_strobe)
1108 return false;
1109
1110 return true;
1111}
1112
1113static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1114{
1115 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1116 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1117 int ret;
1118
1119
1120
1121
1122
1123 if (!sdhci_msm_is_tuning_needed(host))
1124 return 0;
1125
1126
1127 ret = msm_init_cm_dll(host);
1128 if (ret)
1129 return ret;
1130
1131
1132 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1133
1134 return ret;
1135}
1136
1137static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1138{
1139 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1140 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1141 msm_offset->core_dll_config);
1142
1143 config = oldconfig;
1144 if (enable) {
1145 config |= CORE_CDR_EN;
1146 config &= ~CORE_CDR_EXT_EN;
1147 } else {
1148 config &= ~CORE_CDR_EN;
1149 config |= CORE_CDR_EXT_EN;
1150 }
1151
1152 if (config != oldconfig) {
1153 writel_relaxed(config, host->ioaddr +
1154 msm_offset->core_dll_config);
1155 }
1156}
1157
1158static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1159{
1160 struct sdhci_host *host = mmc_priv(mmc);
1161 int tuning_seq_cnt = 3;
1162 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1163 int rc;
1164 struct mmc_ios ios = host->mmc->ios;
1165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1166 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1167
1168 if (!sdhci_msm_is_tuning_needed(host)) {
1169 msm_host->use_cdr = false;
1170 sdhci_msm_set_cdr(host, false);
1171 return 0;
1172 }
1173
1174
1175 msm_host->use_cdr = true;
1176
1177
1178
1179
1180
1181 msm_host->tuning_done = 0;
1182
1183
1184
1185
1186
1187
1188 if (host->flags & SDHCI_HS400_TUNING) {
1189 sdhci_msm_hc_select_mode(host);
1190 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1191 host->flags &= ~SDHCI_HS400_TUNING;
1192 }
1193
1194retry:
1195
1196 rc = msm_init_cm_dll(host);
1197 if (rc)
1198 return rc;
1199
1200 phase = 0;
1201 do {
1202
1203 rc = msm_config_cm_dll_phase(host, phase);
1204 if (rc)
1205 return rc;
1206
1207 rc = mmc_send_tuning(mmc, opcode, NULL);
1208 if (!rc) {
1209
1210 tuned_phases[tuned_phase_cnt++] = phase;
1211 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1212 mmc_hostname(mmc), phase);
1213 }
1214 } while (++phase < ARRAY_SIZE(tuned_phases));
1215
1216 if (tuned_phase_cnt) {
1217 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1218 tuned_phase_cnt);
1219 if (rc < 0)
1220 return rc;
1221 else
1222 phase = rc;
1223
1224
1225
1226
1227
1228 rc = msm_config_cm_dll_phase(host, phase);
1229 if (rc)
1230 return rc;
1231 msm_host->saved_tuning_phase = phase;
1232 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1233 mmc_hostname(mmc), phase);
1234 } else {
1235 if (--tuning_seq_cnt)
1236 goto retry;
1237
1238 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1239 mmc_hostname(mmc));
1240 rc = -EIO;
1241 }
1242
1243 if (!rc)
1244 msm_host->tuning_done = true;
1245 return rc;
1246}
1247
1248
1249
1250
1251
1252
1253
1254static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1255{
1256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1257 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1258 int ret;
1259
1260 if (host->clock > CORE_FREQ_100MHZ &&
1261 (msm_host->tuning_done || ios->enhanced_strobe) &&
1262 !msm_host->calibration_done) {
1263 ret = sdhci_msm_hs400_dll_calibration(host);
1264 if (!ret)
1265 msm_host->calibration_done = true;
1266 else
1267 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1268 mmc_hostname(host->mmc), ret);
1269 }
1270}
1271
1272static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1273 unsigned int uhs)
1274{
1275 struct mmc_host *mmc = host->mmc;
1276 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1277 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1278 u16 ctrl_2;
1279 u32 config;
1280 const struct sdhci_msm_offset *msm_offset =
1281 msm_host->offset;
1282
1283 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1284
1285 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1286 switch (uhs) {
1287 case MMC_TIMING_UHS_SDR12:
1288 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1289 break;
1290 case MMC_TIMING_UHS_SDR25:
1291 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1292 break;
1293 case MMC_TIMING_UHS_SDR50:
1294 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1295 break;
1296 case MMC_TIMING_MMC_HS400:
1297 case MMC_TIMING_MMC_HS200:
1298 case MMC_TIMING_UHS_SDR104:
1299 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1300 break;
1301 case MMC_TIMING_UHS_DDR50:
1302 case MMC_TIMING_MMC_DDR52:
1303 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1304 break;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313 if (host->clock <= CORE_FREQ_100MHZ) {
1314 if (uhs == MMC_TIMING_MMC_HS400 ||
1315 uhs == MMC_TIMING_MMC_HS200 ||
1316 uhs == MMC_TIMING_UHS_SDR104)
1317 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1318
1319
1320
1321
1322 config = readl_relaxed(host->ioaddr +
1323 msm_offset->core_dll_config);
1324 config |= CORE_DLL_RST;
1325 writel_relaxed(config, host->ioaddr +
1326 msm_offset->core_dll_config);
1327
1328 config = readl_relaxed(host->ioaddr +
1329 msm_offset->core_dll_config);
1330 config |= CORE_DLL_PDN;
1331 writel_relaxed(config, host->ioaddr +
1332 msm_offset->core_dll_config);
1333
1334
1335
1336
1337
1338 msm_host->calibration_done = false;
1339 }
1340
1341 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1342 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1343 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1344
1345 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1346 sdhci_msm_hs400(host, &mmc->ios);
1347}
1348
1349static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1350{
1351 init_waitqueue_head(&msm_host->pwr_irq_wait);
1352}
1353
1354static inline void sdhci_msm_complete_pwr_irq_wait(
1355 struct sdhci_msm_host *msm_host)
1356{
1357 wake_up(&msm_host->pwr_irq_wait);
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1370{
1371 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1372 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1373 bool done = false;
1374 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1375 const struct sdhci_msm_offset *msm_offset =
1376 msm_host->offset;
1377
1378 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1379 mmc_hostname(host->mmc), __func__, req_type,
1380 msm_host->curr_pwr_state, msm_host->curr_io_level);
1381
1382
1383
1384
1385
1386
1387
1388 if (!msm_host->mci_removed)
1389 val = msm_host_readl(msm_host, host,
1390 msm_offset->core_generics);
1391 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1392 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1393 return;
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1409 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1410 mmc_hostname(host->mmc), req_type);
1411 return;
1412 }
1413 if ((req_type & msm_host->curr_pwr_state) ||
1414 (req_type & msm_host->curr_io_level))
1415 done = true;
1416
1417
1418
1419
1420
1421
1422 if (!done) {
1423 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1424 msm_host->pwr_irq_flag,
1425 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1426 dev_warn(&msm_host->pdev->dev,
1427 "%s: pwr_irq for req: (%d) timed out\n",
1428 mmc_hostname(host->mmc), req_type);
1429 }
1430 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1431 __func__, req_type);
1432}
1433
1434static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1435{
1436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1437 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1438 const struct sdhci_msm_offset *msm_offset =
1439 msm_host->offset;
1440
1441 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1442 mmc_hostname(host->mmc),
1443 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1444 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1445 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1446}
1447
1448static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1449{
1450 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1451 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1452 u32 irq_status, irq_ack = 0;
1453 int retry = 10;
1454 u32 pwr_state = 0, io_level = 0;
1455 u32 config;
1456 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1457
1458 irq_status = msm_host_readl(msm_host, host,
1459 msm_offset->core_pwrctl_status);
1460 irq_status &= INT_MASK;
1461
1462 msm_host_writel(msm_host, irq_status, host,
1463 msm_offset->core_pwrctl_clear);
1464
1465
1466
1467
1468
1469
1470
1471
1472 while (irq_status & msm_host_readl(msm_host, host,
1473 msm_offset->core_pwrctl_status)) {
1474 if (retry == 0) {
1475 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1476 mmc_hostname(host->mmc), irq_status);
1477 sdhci_msm_dump_pwr_ctrl_regs(host);
1478 WARN_ON(1);
1479 break;
1480 }
1481 msm_host_writel(msm_host, irq_status, host,
1482 msm_offset->core_pwrctl_clear);
1483 retry--;
1484 udelay(10);
1485 }
1486
1487
1488 if (irq_status & CORE_PWRCTL_BUS_ON) {
1489 pwr_state = REQ_BUS_ON;
1490 io_level = REQ_IO_HIGH;
1491 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1492 }
1493 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1494 pwr_state = REQ_BUS_OFF;
1495 io_level = REQ_IO_LOW;
1496 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1497 }
1498
1499 if (irq_status & CORE_PWRCTL_IO_LOW) {
1500 io_level = REQ_IO_LOW;
1501 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1502 }
1503 if (irq_status & CORE_PWRCTL_IO_HIGH) {
1504 io_level = REQ_IO_HIGH;
1505 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1506 }
1507
1508
1509
1510
1511
1512
1513 msm_host_writel(msm_host, irq_ack, host,
1514 msm_offset->core_pwrctl_ctl);
1515
1516
1517
1518
1519
1520 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1521 u32 new_config;
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533 config = readl_relaxed(host->ioaddr +
1534 msm_offset->core_vendor_spec);
1535 new_config = config;
1536
1537 if ((io_level & REQ_IO_HIGH) &&
1538 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1539 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1540 else if ((io_level & REQ_IO_LOW) ||
1541 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1542 new_config |= CORE_IO_PAD_PWR_SWITCH;
1543
1544 if (config ^ new_config)
1545 writel_relaxed(new_config, host->ioaddr +
1546 msm_offset->core_vendor_spec);
1547 }
1548
1549 if (pwr_state)
1550 msm_host->curr_pwr_state = pwr_state;
1551 if (io_level)
1552 msm_host->curr_io_level = io_level;
1553
1554 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1555 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1556 irq_ack);
1557}
1558
1559static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1560{
1561 struct sdhci_host *host = (struct sdhci_host *)data;
1562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1563 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1564
1565 sdhci_msm_handle_pwr_irq(host, irq);
1566 msm_host->pwr_irq_flag = 1;
1567 sdhci_msm_complete_pwr_irq_wait(msm_host);
1568
1569
1570 return IRQ_HANDLED;
1571}
1572
1573static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1574{
1575 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1576 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1577 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1578
1579 return clk_round_rate(core_clk, ULONG_MAX);
1580}
1581
1582static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1583{
1584 return SDHCI_MSM_MIN_CLOCK;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1596{
1597 u16 clk;
1598
1599
1600
1601
1602
1603
1604 host->mmc->actual_clock = 0;
1605
1606 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1607
1608 if (clock == 0)
1609 return;
1610
1611
1612
1613
1614
1615
1616 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1617 sdhci_enable_clk(host, clk);
1618}
1619
1620
1621static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1622{
1623 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1624 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1625
1626 if (!clock) {
1627 msm_host->clk_rate = clock;
1628 goto out;
1629 }
1630
1631 sdhci_msm_hc_select_mode(host);
1632
1633 msm_set_clock_rate_for_bus_mode(host, clock);
1634out:
1635 __sdhci_msm_set_clock(host, clock);
1636}
1637
1638
1639
1640
1641
1642
1643
1644static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1645{
1646 int cmd_error = 0;
1647 int data_error = 0;
1648
1649 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1650 return intmask;
1651
1652 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1653 return 0;
1654}
1655
1656static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1657{
1658 struct sdhci_host *host = mmc_priv(mmc);
1659 unsigned long flags;
1660 u32 ctrl;
1661
1662
1663
1664
1665
1666 if (host->flags & SDHCI_USE_64_BIT_DMA)
1667 host->desc_sz = 16;
1668
1669 spin_lock_irqsave(&host->lock, flags);
1670
1671
1672
1673
1674
1675
1676
1677 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1678 ctrl |= SDHCI_INT_RESPONSE;
1679 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
1680 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1681
1682 spin_unlock_irqrestore(&host->lock, flags);
1683
1684 sdhci_cqe_disable(mmc, recovery);
1685}
1686
1687static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1688 .enable = sdhci_cqe_enable,
1689 .disable = sdhci_msm_cqe_disable,
1690};
1691
1692static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1693 struct platform_device *pdev)
1694{
1695 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1696 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1697 struct cqhci_host *cq_host;
1698 bool dma64;
1699 u32 cqcfg;
1700 int ret;
1701
1702
1703
1704
1705
1706 if (host->caps & SDHCI_CAN_64BIT)
1707 host->alloc_desc_sz = 16;
1708
1709 ret = sdhci_setup_host(host);
1710 if (ret)
1711 return ret;
1712
1713 cq_host = cqhci_pltfm_init(pdev);
1714 if (IS_ERR(cq_host)) {
1715 ret = PTR_ERR(cq_host);
1716 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1717 goto cleanup;
1718 }
1719
1720 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1721 cq_host->ops = &sdhci_msm_cqhci_ops;
1722
1723 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1724
1725 ret = cqhci_init(cq_host, host->mmc, dma64);
1726 if (ret) {
1727 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1728 mmc_hostname(host->mmc), ret);
1729 goto cleanup;
1730 }
1731
1732
1733 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1734 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1735 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1736
1737
1738
1739
1740
1741
1742
1743 if (host->flags & SDHCI_USE_64_BIT_DMA)
1744 host->desc_sz = 12;
1745
1746 ret = __sdhci_add_host(host);
1747 if (ret)
1748 goto cleanup;
1749
1750 dev_info(&pdev->dev, "%s: CQE init: success\n",
1751 mmc_hostname(host->mmc));
1752 return ret;
1753
1754cleanup:
1755 sdhci_cleanup_host(host);
1756 return ret;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1767{
1768 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1769 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1770 u32 req_type = 0;
1771
1772 switch (reg) {
1773 case SDHCI_HOST_CONTROL2:
1774 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1775 REQ_IO_HIGH;
1776 break;
1777 case SDHCI_SOFTWARE_RESET:
1778 if (host->pwr && (val & SDHCI_RESET_ALL))
1779 req_type = REQ_BUS_OFF;
1780 break;
1781 case SDHCI_POWER_CONTROL:
1782 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1783 break;
1784 case SDHCI_TRANSFER_MODE:
1785 msm_host->transfer_mode = val;
1786 break;
1787 case SDHCI_COMMAND:
1788 if (!msm_host->use_cdr)
1789 break;
1790 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1791 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1792 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1793 sdhci_msm_set_cdr(host, true);
1794 else
1795 sdhci_msm_set_cdr(host, false);
1796 break;
1797 }
1798
1799 if (req_type) {
1800 msm_host->pwr_irq_flag = 0;
1801
1802
1803
1804
1805 mb();
1806 }
1807 return req_type;
1808}
1809
1810
1811static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1812{
1813 u32 req_type = 0;
1814
1815 req_type = __sdhci_msm_check_write(host, val, reg);
1816 writew_relaxed(val, host->ioaddr + reg);
1817
1818 if (req_type)
1819 sdhci_msm_check_power_status(host, req_type);
1820}
1821
1822
1823static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1824{
1825 u32 req_type = 0;
1826
1827 req_type = __sdhci_msm_check_write(host, val, reg);
1828
1829 writeb_relaxed(val, host->ioaddr + reg);
1830
1831 if (req_type)
1832 sdhci_msm_check_power_status(host, req_type);
1833}
1834
1835static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
1836{
1837 struct mmc_host *mmc = msm_host->mmc;
1838 struct regulator *supply = mmc->supply.vqmmc;
1839 u32 caps = 0, config;
1840 struct sdhci_host *host = mmc_priv(mmc);
1841 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1842
1843 if (!IS_ERR(mmc->supply.vqmmc)) {
1844 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
1845 caps |= CORE_1_8V_SUPPORT;
1846 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
1847 caps |= CORE_3_0V_SUPPORT;
1848
1849 if (!caps)
1850 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
1851 mmc_hostname(mmc));
1852 }
1853
1854 if (caps) {
1855
1856
1857
1858
1859 u32 io_level = msm_host->curr_io_level;
1860
1861 config = readl_relaxed(host->ioaddr +
1862 msm_offset->core_vendor_spec);
1863 config |= CORE_IO_PAD_PWR_SWITCH_EN;
1864
1865 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
1866 config &= ~CORE_IO_PAD_PWR_SWITCH;
1867 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
1868 config |= CORE_IO_PAD_PWR_SWITCH;
1869
1870 writel_relaxed(config,
1871 host->ioaddr + msm_offset->core_vendor_spec);
1872 }
1873 msm_host->caps_0 |= caps;
1874 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
1875}
1876
1877static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
1878{
1879 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
1880 cqhci_deactivate(host->mmc);
1881 sdhci_reset(host, mask);
1882}
1883
1884#define DRIVER_NAME "sdhci_msm"
1885#define SDHCI_MSM_DUMP(f, x...) \
1886 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
1887
1888void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
1889{
1890 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1891 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1892 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1893
1894 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
1895
1896 SDHCI_MSM_DUMP(
1897 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
1898 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
1899 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
1900 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
1901 SDHCI_MSM_DUMP(
1902 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
1903 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
1904 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
1905 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
1906 SDHCI_MSM_DUMP(
1907 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
1908 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
1909 readl_relaxed(host->ioaddr +
1910 msm_offset->core_vendor_spec_func2),
1911 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
1912}
1913
1914static const struct sdhci_msm_variant_ops mci_var_ops = {
1915 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
1916 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
1917};
1918
1919static const struct sdhci_msm_variant_ops v5_var_ops = {
1920 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
1921 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
1922};
1923
1924static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
1925 .var_ops = &mci_var_ops,
1926 .offset = &sdhci_msm_mci_offset,
1927};
1928
1929static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
1930 .mci_removed = true,
1931 .var_ops = &v5_var_ops,
1932 .offset = &sdhci_msm_v5_offset,
1933};
1934
1935static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
1936 .mci_removed = true,
1937 .restore_dll_config = true,
1938 .var_ops = &v5_var_ops,
1939 .offset = &sdhci_msm_v5_offset,
1940};
1941
1942static const struct sdhci_msm_variant_info sm8250_sdhci_var = {
1943 .mci_removed = true,
1944 .uses_tassadar_dll = true,
1945 .var_ops = &v5_var_ops,
1946 .offset = &sdhci_msm_v5_offset,
1947};
1948
1949static const struct of_device_id sdhci_msm_dt_match[] = {
1950 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
1951 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
1952 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
1953 {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
1954 {},
1955};
1956
1957MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1958
1959static const struct sdhci_ops sdhci_msm_ops = {
1960 .reset = sdhci_msm_reset,
1961 .set_clock = sdhci_msm_set_clock,
1962 .get_min_clock = sdhci_msm_get_min_clock,
1963 .get_max_clock = sdhci_msm_get_max_clock,
1964 .set_bus_width = sdhci_set_bus_width,
1965 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1966 .write_w = sdhci_msm_writew,
1967 .write_b = sdhci_msm_writeb,
1968 .irq = sdhci_msm_cqe_irq,
1969 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
1970};
1971
1972static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1973 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1974 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1975 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1976 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1977
1978 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1979 .ops = &sdhci_msm_ops,
1980};
1981
1982static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
1983 struct sdhci_host *host)
1984{
1985 struct device_node *node = pdev->dev.of_node;
1986 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1987 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1988
1989 if (of_property_read_u32(node, "qcom,ddr-config",
1990 &msm_host->ddr_config))
1991 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
1992
1993 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
1994}
1995
1996
1997static int sdhci_msm_probe(struct platform_device *pdev)
1998{
1999 struct sdhci_host *host;
2000 struct sdhci_pltfm_host *pltfm_host;
2001 struct sdhci_msm_host *msm_host;
2002 struct clk *clk;
2003 int ret;
2004 u16 host_version, core_minor;
2005 u32 core_version, config;
2006 u8 core_major;
2007 const struct sdhci_msm_offset *msm_offset;
2008 const struct sdhci_msm_variant_info *var_info;
2009 struct device_node *node = pdev->dev.of_node;
2010
2011 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2012 if (IS_ERR(host))
2013 return PTR_ERR(host);
2014
2015 host->sdma_boundary = 0;
2016 pltfm_host = sdhci_priv(host);
2017 msm_host = sdhci_pltfm_priv(pltfm_host);
2018 msm_host->mmc = host->mmc;
2019 msm_host->pdev = pdev;
2020
2021 ret = mmc_of_parse(host->mmc);
2022 if (ret)
2023 goto pltfm_free;
2024
2025
2026
2027
2028
2029 var_info = of_device_get_match_data(&pdev->dev);
2030
2031 msm_host->mci_removed = var_info->mci_removed;
2032 msm_host->restore_dll_config = var_info->restore_dll_config;
2033 msm_host->var_ops = var_info->var_ops;
2034 msm_host->offset = var_info->offset;
2035 msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll;
2036
2037 msm_offset = msm_host->offset;
2038
2039 sdhci_get_of_property(pdev);
2040 sdhci_msm_get_of_property(pdev, host);
2041
2042 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2043
2044
2045 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2046 if (!IS_ERR(msm_host->bus_clk)) {
2047
2048 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2049 if (ret)
2050 goto pltfm_free;
2051 ret = clk_prepare_enable(msm_host->bus_clk);
2052 if (ret)
2053 goto pltfm_free;
2054 }
2055
2056
2057 clk = devm_clk_get(&pdev->dev, "iface");
2058 if (IS_ERR(clk)) {
2059 ret = PTR_ERR(clk);
2060 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2061 goto bus_clk_disable;
2062 }
2063 msm_host->bulk_clks[1].clk = clk;
2064
2065
2066 clk = devm_clk_get(&pdev->dev, "core");
2067 if (IS_ERR(clk)) {
2068 ret = PTR_ERR(clk);
2069 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2070 goto bus_clk_disable;
2071 }
2072 msm_host->bulk_clks[0].clk = clk;
2073
2074 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
2075 if (IS_ERR(msm_host->opp_table)) {
2076 ret = PTR_ERR(msm_host->opp_table);
2077 goto bus_clk_disable;
2078 }
2079
2080
2081 ret = dev_pm_opp_of_add_table(&pdev->dev);
2082 if (!ret) {
2083 msm_host->has_opp_table = true;
2084 } else if (ret != -ENODEV) {
2085 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2086 goto opp_cleanup;
2087 }
2088
2089
2090 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2091 if (ret)
2092 dev_warn(&pdev->dev, "core clock boost failed\n");
2093
2094 clk = devm_clk_get(&pdev->dev, "cal");
2095 if (IS_ERR(clk))
2096 clk = NULL;
2097 msm_host->bulk_clks[2].clk = clk;
2098
2099 clk = devm_clk_get(&pdev->dev, "sleep");
2100 if (IS_ERR(clk))
2101 clk = NULL;
2102 msm_host->bulk_clks[3].clk = clk;
2103
2104 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2105 msm_host->bulk_clks);
2106 if (ret)
2107 goto opp_cleanup;
2108
2109
2110
2111
2112
2113 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2114 if (IS_ERR(msm_host->xo_clk)) {
2115 ret = PTR_ERR(msm_host->xo_clk);
2116 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2117 }
2118
2119 if (!msm_host->mci_removed) {
2120 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2121 if (IS_ERR(msm_host->core_mem)) {
2122 ret = PTR_ERR(msm_host->core_mem);
2123 goto clk_disable;
2124 }
2125 }
2126
2127
2128 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2129 host->ioaddr + msm_offset->core_vendor_spec);
2130
2131 if (!msm_host->mci_removed) {
2132
2133 msm_host_writel(msm_host, HC_MODE_EN, host,
2134 msm_offset->core_hc_mode);
2135 config = msm_host_readl(msm_host, host,
2136 msm_offset->core_hc_mode);
2137 config |= FF_CLK_SW_RST_DIS;
2138 msm_host_writel(msm_host, config, host,
2139 msm_offset->core_hc_mode);
2140 }
2141
2142 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2143 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2144 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2145 SDHCI_VENDOR_VER_SHIFT));
2146
2147 core_version = msm_host_readl(msm_host, host,
2148 msm_offset->core_mci_version);
2149 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2150 CORE_VERSION_MAJOR_SHIFT;
2151 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2152 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2153 core_version, core_major, core_minor);
2154
2155 if (core_major == 1 && core_minor >= 0x42)
2156 msm_host->use_14lpp_dll_reset = true;
2157
2158
2159
2160
2161
2162 if (core_major == 1 && core_minor < 0x34)
2163 msm_host->use_cdclp533 = true;
2164
2165
2166
2167
2168
2169 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2170 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2171 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2172 writel_relaxed(config, host->ioaddr +
2173 msm_offset->core_vendor_spec_capabilities0);
2174 }
2175
2176 if (core_major == 1 && core_minor >= 0x49)
2177 msm_host->updated_ddr_cfg = true;
2178
2179
2180
2181
2182
2183
2184
2185
2186 sdhci_msm_handle_pwr_irq(host, 0);
2187
2188
2189
2190
2191
2192 mb();
2193
2194
2195 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2196 if (msm_host->pwr_irq < 0) {
2197 ret = msm_host->pwr_irq;
2198 goto clk_disable;
2199 }
2200
2201 sdhci_msm_init_pwr_irq_wait(msm_host);
2202
2203 msm_host_writel(msm_host, INT_MASK, host,
2204 msm_offset->core_pwrctl_mask);
2205
2206 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2207 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2208 dev_name(&pdev->dev), host);
2209 if (ret) {
2210 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2211 goto clk_disable;
2212 }
2213
2214 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2215
2216 pm_runtime_get_noresume(&pdev->dev);
2217 pm_runtime_set_active(&pdev->dev);
2218 pm_runtime_enable(&pdev->dev);
2219 pm_runtime_set_autosuspend_delay(&pdev->dev,
2220 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2221 pm_runtime_use_autosuspend(&pdev->dev);
2222
2223 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2224 if (of_property_read_bool(node, "supports-cqe"))
2225 ret = sdhci_msm_cqe_add_host(host, pdev);
2226 else
2227 ret = sdhci_add_host(host);
2228 if (ret)
2229 goto pm_runtime_disable;
2230 sdhci_msm_set_regulator_caps(msm_host);
2231
2232 pm_runtime_mark_last_busy(&pdev->dev);
2233 pm_runtime_put_autosuspend(&pdev->dev);
2234
2235 return 0;
2236
2237pm_runtime_disable:
2238 pm_runtime_disable(&pdev->dev);
2239 pm_runtime_set_suspended(&pdev->dev);
2240 pm_runtime_put_noidle(&pdev->dev);
2241clk_disable:
2242 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2243 msm_host->bulk_clks);
2244opp_cleanup:
2245 if (msm_host->has_opp_table)
2246 dev_pm_opp_of_remove_table(&pdev->dev);
2247 dev_pm_opp_put_clkname(msm_host->opp_table);
2248bus_clk_disable:
2249 if (!IS_ERR(msm_host->bus_clk))
2250 clk_disable_unprepare(msm_host->bus_clk);
2251pltfm_free:
2252 sdhci_pltfm_free(pdev);
2253 return ret;
2254}
2255
2256static int sdhci_msm_remove(struct platform_device *pdev)
2257{
2258 struct sdhci_host *host = platform_get_drvdata(pdev);
2259 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2260 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2261 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2262 0xffffffff);
2263
2264 sdhci_remove_host(host, dead);
2265
2266 if (msm_host->has_opp_table)
2267 dev_pm_opp_of_remove_table(&pdev->dev);
2268 dev_pm_opp_put_clkname(msm_host->opp_table);
2269 pm_runtime_get_sync(&pdev->dev);
2270 pm_runtime_disable(&pdev->dev);
2271 pm_runtime_put_noidle(&pdev->dev);
2272
2273 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2274 msm_host->bulk_clks);
2275 if (!IS_ERR(msm_host->bus_clk))
2276 clk_disable_unprepare(msm_host->bus_clk);
2277 sdhci_pltfm_free(pdev);
2278 return 0;
2279}
2280
2281static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2282{
2283 struct sdhci_host *host = dev_get_drvdata(dev);
2284 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2285 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2286
2287
2288 dev_pm_opp_set_rate(dev, 0);
2289 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2290 msm_host->bulk_clks);
2291
2292 return 0;
2293}
2294
2295static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2296{
2297 struct sdhci_host *host = dev_get_drvdata(dev);
2298 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2299 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2300 int ret;
2301
2302 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2303 msm_host->bulk_clks);
2304 if (ret)
2305 return ret;
2306
2307
2308
2309
2310 if (msm_host->restore_dll_config && msm_host->clk_rate)
2311 ret = sdhci_msm_restore_sdr_dll_config(host);
2312
2313 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2314
2315 return ret;
2316}
2317
2318static const struct dev_pm_ops sdhci_msm_pm_ops = {
2319 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2320 pm_runtime_force_resume)
2321 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2322 sdhci_msm_runtime_resume,
2323 NULL)
2324};
2325
2326static struct platform_driver sdhci_msm_driver = {
2327 .probe = sdhci_msm_probe,
2328 .remove = sdhci_msm_remove,
2329 .driver = {
2330 .name = "sdhci_msm",
2331 .of_match_table = sdhci_msm_dt_match,
2332 .pm = &sdhci_msm_pm_ops,
2333 },
2334};
2335
2336module_platform_driver(sdhci_msm_driver);
2337
2338MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2339MODULE_LICENSE("GPL v2");
2340