1
2
3
4
5
6
7
8
9#include <linux/arm-smccc.h>
10#include <linux/bitfield.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/of_device.h>
14#include <linux/phy/phy.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/consumer.h>
17#include <linux/reset.h>
18#include <linux/soc/mediatek/mtk_sip_svc.h>
19
20#include "ufshcd.h"
21#include "ufshcd-crypto.h"
22#include "ufshcd-pltfrm.h"
23#include "ufs_quirks.h"
24#include "unipro.h"
25#include "ufs-mediatek.h"
26
27#define CREATE_TRACE_POINTS
28#include "ufs-mediatek-trace.h"
29
30#define ufs_mtk_smc(cmd, val, res) \
31 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32 cmd, val, 0, 0, 0, 0, 0, &(res))
33
34#define ufs_mtk_va09_pwr_ctrl(res, on) \
35 ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36
37#define ufs_mtk_crypto_ctrl(res, enable) \
38 ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39
40#define ufs_mtk_ref_clk_notify(on, res) \
41 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42
43#define ufs_mtk_device_reset_ctrl(high, res) \
44 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45
46static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49 UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51 END_FIX
52};
53
54static const struct of_device_id ufs_mtk_of_match[] = {
55 { .compatible = "mediatek,mt8183-ufshci" },
56 {},
57};
58
59static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60{
61 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62
63 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64}
65
66static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67{
68 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69
70 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71}
72
73static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74{
75 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76
77 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78}
79
80static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
81{
82 u32 tmp;
83
84 if (enable) {
85 ufshcd_dme_get(hba,
86 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
87 tmp = tmp |
88 (1 << RX_SYMBOL_CLK_GATE_EN) |
89 (1 << SYS_CLK_GATE_EN) |
90 (1 << TX_CLK_GATE_EN);
91 ufshcd_dme_set(hba,
92 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
93
94 ufshcd_dme_get(hba,
95 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
96 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
97 ufshcd_dme_set(hba,
98 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
99 } else {
100 ufshcd_dme_get(hba,
101 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
102 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
103 (1 << SYS_CLK_GATE_EN) |
104 (1 << TX_CLK_GATE_EN));
105 ufshcd_dme_set(hba,
106 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
107
108 ufshcd_dme_get(hba,
109 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
110 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
111 ufshcd_dme_set(hba,
112 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
113 }
114}
115
116static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
117{
118 struct arm_smccc_res res;
119
120 ufs_mtk_crypto_ctrl(res, 1);
121 if (res.a0) {
122 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
123 __func__, res.a0);
124 hba->caps &= ~UFSHCD_CAP_CRYPTO;
125 }
126}
127
128static void ufs_mtk_host_reset(struct ufs_hba *hba)
129{
130 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
131
132 reset_control_assert(host->hci_reset);
133 reset_control_assert(host->crypto_reset);
134 reset_control_assert(host->unipro_reset);
135
136 usleep_range(100, 110);
137
138 reset_control_deassert(host->unipro_reset);
139 reset_control_deassert(host->crypto_reset);
140 reset_control_deassert(host->hci_reset);
141}
142
143static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
144 struct reset_control **rc,
145 char *str)
146{
147 *rc = devm_reset_control_get(hba->dev, str);
148 if (IS_ERR(*rc)) {
149 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
150 str, PTR_ERR(*rc));
151 *rc = NULL;
152 }
153}
154
155static void ufs_mtk_init_reset(struct ufs_hba *hba)
156{
157 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
158
159 ufs_mtk_init_reset_control(hba, &host->hci_reset,
160 "hci_rst");
161 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
162 "unipro_rst");
163 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
164 "crypto_rst");
165}
166
167static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
168 enum ufs_notify_change_status status)
169{
170 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
171 unsigned long flags;
172
173 if (status == PRE_CHANGE) {
174 if (host->unipro_lpm) {
175 hba->vps->hba_enable_delay_us = 0;
176 } else {
177 hba->vps->hba_enable_delay_us = 600;
178 ufs_mtk_host_reset(hba);
179 }
180
181 if (hba->caps & UFSHCD_CAP_CRYPTO)
182 ufs_mtk_crypto_enable(hba);
183
184 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
185 spin_lock_irqsave(hba->host->host_lock, flags);
186 ufshcd_writel(hba, 0,
187 REG_AUTO_HIBERNATE_IDLE_TIMER);
188 spin_unlock_irqrestore(hba->host->host_lock,
189 flags);
190
191 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192 hba->ahit = 0;
193 }
194 }
195
196 return 0;
197}
198
199static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200{
201 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202 struct device *dev = hba->dev;
203 struct device_node *np = dev->of_node;
204 int err = 0;
205
206 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207
208 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209
210
211
212
213 err = -EPROBE_DEFER;
214 dev_info(dev,
215 "%s: required phy hasn't probed yet. err = %d\n",
216 __func__, err);
217 } else if (IS_ERR(host->mphy)) {
218 err = PTR_ERR(host->mphy);
219 if (err != -ENODEV) {
220 dev_info(dev, "%s: PHY get failed %d\n", __func__,
221 err);
222 }
223 }
224
225 if (err)
226 host->mphy = NULL;
227
228
229
230
231 if (err == -ENODEV)
232 err = 0;
233
234 return err;
235}
236
237static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238{
239 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 struct arm_smccc_res res;
241 ktime_t timeout, time_checked;
242 u32 value;
243
244 if (host->ref_clk_enabled == on)
245 return 0;
246
247 if (on) {
248 ufs_mtk_ref_clk_notify(on, res);
249 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
250 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251 } else {
252 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253 }
254
255
256 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257 do {
258 time_checked = ktime_get();
259 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260
261
262 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263 goto out;
264
265 usleep_range(100, 200);
266 } while (ktime_before(time_checked, timeout));
267
268 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269
270 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271
272 return -ETIMEDOUT;
273
274out:
275 host->ref_clk_enabled = on;
276 if (!on) {
277 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
278 ufs_mtk_ref_clk_notify(on, res);
279 }
280
281 return 0;
282}
283
284static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285 u16 gating_us, u16 ungating_us)
286{
287 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288
289 if (hba->dev_info.clk_gating_wait_us) {
290 host->ref_clk_gating_wait_us =
291 hba->dev_info.clk_gating_wait_us;
292 } else {
293 host->ref_clk_gating_wait_us = gating_us;
294 }
295
296 host->ref_clk_ungating_wait_us = ungating_us;
297}
298
299static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
300 unsigned long max_wait_ms)
301{
302 ktime_t timeout, time_checked;
303 u32 val;
304
305 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
306 do {
307 time_checked = ktime_get();
308 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
309 val = ufshcd_readl(hba, REG_UFS_PROBE);
310 val = val >> 28;
311
312 if (val == state)
313 return 0;
314
315
316 usleep_range(100, 200);
317 } while (ktime_before(time_checked, timeout));
318
319 if (val == state)
320 return 0;
321
322 return -ETIMEDOUT;
323}
324
325static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
326{
327 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328 struct phy *mphy = host->mphy;
329 struct arm_smccc_res res;
330 int ret = 0;
331
332 if (!mphy || !(on ^ host->mphy_powered_on))
333 return 0;
334
335 if (on) {
336 if (ufs_mtk_is_va09_supported(hba)) {
337 ret = regulator_enable(host->reg_va09);
338 if (ret < 0)
339 goto out;
340
341 usleep_range(200, 210);
342 ufs_mtk_va09_pwr_ctrl(res, 1);
343 }
344 phy_power_on(mphy);
345 } else {
346 phy_power_off(mphy);
347 if (ufs_mtk_is_va09_supported(hba)) {
348 ufs_mtk_va09_pwr_ctrl(res, 0);
349 ret = regulator_disable(host->reg_va09);
350 if (ret < 0)
351 goto out;
352 }
353 }
354out:
355 if (ret) {
356 dev_info(hba->dev,
357 "failed to %s va09: %d\n",
358 on ? "enable" : "disable",
359 ret);
360 } else {
361 host->mphy_powered_on = on;
362 }
363
364 return ret;
365}
366
367static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
368 struct clk **clk_out)
369{
370 struct clk *clk;
371 int err = 0;
372
373 clk = devm_clk_get(dev, name);
374 if (IS_ERR(clk))
375 err = PTR_ERR(clk);
376 else
377 *clk_out = clk;
378
379 return err;
380}
381
382static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
383{
384 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385 struct ufs_mtk_crypt_cfg *cfg;
386 struct regulator *reg;
387 int volt, ret;
388
389 if (!ufs_mtk_is_boost_crypt_enabled(hba))
390 return;
391
392 cfg = host->crypt;
393 volt = cfg->vcore_volt;
394 reg = cfg->reg_vcore;
395
396 ret = clk_prepare_enable(cfg->clk_crypt_mux);
397 if (ret) {
398 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
399 ret);
400 return;
401 }
402
403 if (boost) {
404 ret = regulator_set_voltage(reg, volt, INT_MAX);
405 if (ret) {
406 dev_info(hba->dev,
407 "failed to set vcore to %d\n", volt);
408 goto out;
409 }
410
411 ret = clk_set_parent(cfg->clk_crypt_mux,
412 cfg->clk_crypt_perf);
413 if (ret) {
414 dev_info(hba->dev,
415 "failed to set clk_crypt_perf\n");
416 regulator_set_voltage(reg, 0, INT_MAX);
417 goto out;
418 }
419 } else {
420 ret = clk_set_parent(cfg->clk_crypt_mux,
421 cfg->clk_crypt_lp);
422 if (ret) {
423 dev_info(hba->dev,
424 "failed to set clk_crypt_lp\n");
425 goto out;
426 }
427
428 ret = regulator_set_voltage(reg, 0, INT_MAX);
429 if (ret) {
430 dev_info(hba->dev,
431 "failed to set vcore to MIN\n");
432 }
433 }
434out:
435 clk_disable_unprepare(cfg->clk_crypt_mux);
436}
437
438static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
439 struct clk **clk)
440{
441 int ret;
442
443 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
444 if (ret) {
445 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
446 name, ret);
447 }
448
449 return ret;
450}
451
452static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
453{
454 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
455 struct ufs_mtk_crypt_cfg *cfg;
456 struct device *dev = hba->dev;
457 struct regulator *reg;
458 u32 volt;
459
460 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
461 GFP_KERNEL);
462 if (!host->crypt)
463 goto disable_caps;
464
465 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
466 if (IS_ERR(reg)) {
467 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
468 PTR_ERR(reg));
469 goto disable_caps;
470 }
471
472 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
473 &volt)) {
474 dev_info(dev, "failed to get boost-crypt-vcore-min");
475 goto disable_caps;
476 }
477
478 cfg = host->crypt;
479 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
480 &cfg->clk_crypt_mux))
481 goto disable_caps;
482
483 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
484 &cfg->clk_crypt_lp))
485 goto disable_caps;
486
487 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
488 &cfg->clk_crypt_perf))
489 goto disable_caps;
490
491 cfg->reg_vcore = reg;
492 cfg->vcore_volt = volt;
493 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
494
495disable_caps:
496 return;
497}
498
499static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
500{
501 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
502
503 host->reg_va09 = regulator_get(hba->dev, "va09");
504 if (!host->reg_va09)
505 dev_info(hba->dev, "failed to get va09");
506 else
507 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
508}
509
510static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
511{
512 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
513 struct device_node *np = hba->dev->of_node;
514
515 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
516 ufs_mtk_init_boost_crypt(hba);
517
518 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
519 ufs_mtk_init_va09_pwr_ctrl(hba);
520
521 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
522 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
523
524 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
525 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
526
527 dev_info(hba->dev, "caps: 0x%x", host->caps);
528}
529
530static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
531{
532 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
533
534 ufs_mtk_boost_crypt(hba, up);
535 ufs_mtk_setup_ref_clk(hba, up);
536
537 if (up)
538 phy_power_on(host->mphy);
539 else
540 phy_power_off(host->mphy);
541}
542
543
544
545
546
547
548
549
550
551static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
552 enum ufs_notify_change_status status)
553{
554 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
555 bool clk_pwr_off = false;
556 int ret = 0;
557
558
559
560
561
562
563 if (!host)
564 return 0;
565
566 if (!on && status == PRE_CHANGE) {
567 if (ufshcd_is_link_off(hba)) {
568 clk_pwr_off = true;
569 } else if (ufshcd_is_link_hibern8(hba) ||
570 (!ufshcd_can_hibern8_during_gating(hba) &&
571 ufshcd_is_auto_hibern8_enabled(hba))) {
572
573
574
575
576
577 ret = ufs_mtk_wait_link_state(hba,
578 VS_LINK_HIBERN8,
579 15);
580 if (!ret)
581 clk_pwr_off = true;
582 }
583
584 if (clk_pwr_off)
585 ufs_mtk_scale_perf(hba, false);
586 } else if (on && status == POST_CHANGE) {
587 ufs_mtk_scale_perf(hba, true);
588 }
589
590 return ret;
591}
592
593static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
594{
595 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596 int ret, ver = 0;
597
598 if (host->hw_ver.major)
599 return;
600
601
602 host->hw_ver.major = 2;
603
604 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
605 if (!ret) {
606 if (ver >= UFS_UNIPRO_VER_1_8) {
607 host->hw_ver.major = 3;
608
609
610
611
612 if (hba->ufs_version < ufshci_version(3, 0))
613 hba->ufs_version = ufshci_version(3, 0);
614 }
615 }
616}
617
618static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
619{
620 return hba->ufs_version;
621}
622
623
624
625
626
627
628
629
630
631
632
633static int ufs_mtk_init(struct ufs_hba *hba)
634{
635 const struct of_device_id *id;
636 struct device *dev = hba->dev;
637 struct ufs_mtk_host *host;
638 int err = 0;
639
640 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
641 if (!host) {
642 err = -ENOMEM;
643 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
644 goto out;
645 }
646
647 host->hba = hba;
648 ufshcd_set_variant(hba, host);
649
650 id = of_match_device(ufs_mtk_of_match, dev);
651 if (!id) {
652 err = -EINVAL;
653 goto out;
654 }
655
656
657 ufs_mtk_init_host_caps(hba);
658
659 err = ufs_mtk_bind_mphy(hba);
660 if (err)
661 goto out_variant_clear;
662
663 ufs_mtk_init_reset(hba);
664
665
666 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
667
668
669 hba->caps |= UFSHCD_CAP_CLK_GATING;
670
671
672 hba->caps |= UFSHCD_CAP_CRYPTO;
673
674
675 hba->caps |= UFSHCD_CAP_WB_EN;
676 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
677 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
678
679 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
680 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
681
682
683
684
685
686
687
688
689 ufs_mtk_mphy_power_on(hba, true);
690 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
691
692 goto out;
693
694out_variant_clear:
695 ufshcd_set_variant(hba, NULL);
696out:
697 return err;
698}
699
700static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
701 struct ufs_pa_layer_attr *dev_max_params,
702 struct ufs_pa_layer_attr *dev_req_params)
703{
704 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
705 struct ufs_dev_params host_cap;
706 int ret;
707
708 ufshcd_init_pwr_dev_param(&host_cap);
709 host_cap.hs_rx_gear = UFS_HS_G4;
710 host_cap.hs_tx_gear = UFS_HS_G4;
711
712 ret = ufshcd_get_pwr_dev_param(&host_cap,
713 dev_max_params,
714 dev_req_params);
715 if (ret) {
716 pr_info("%s: failed to determine capabilities\n",
717 __func__);
718 }
719
720 if (host->hw_ver.major >= 3) {
721 ret = ufshcd_dme_configure_adapt(hba,
722 dev_req_params->gear_tx,
723 PA_INITIAL_ADAPT);
724 }
725
726 return ret;
727}
728
729static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
730 enum ufs_notify_change_status stage,
731 struct ufs_pa_layer_attr *dev_max_params,
732 struct ufs_pa_layer_attr *dev_req_params)
733{
734 int ret = 0;
735
736 switch (stage) {
737 case PRE_CHANGE:
738 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
739 dev_req_params);
740 break;
741 case POST_CHANGE:
742 break;
743 default:
744 ret = -EINVAL;
745 break;
746 }
747
748 return ret;
749}
750
751static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
752{
753 int ret;
754 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
755
756 ret = ufshcd_dme_set(hba,
757 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
758 lpm ? 1 : 0);
759 if (!ret || !lpm) {
760
761
762
763
764
765 host->unipro_lpm = lpm;
766 }
767
768 return ret;
769}
770
771static int ufs_mtk_pre_link(struct ufs_hba *hba)
772{
773 int ret;
774 u32 tmp;
775
776 ufs_mtk_get_controller_version(hba);
777
778 ret = ufs_mtk_unipro_set_lpm(hba, false);
779 if (ret)
780 return ret;
781
782
783
784
785
786
787 ret = ufshcd_disable_host_tx_lcc(hba);
788 if (ret)
789 return ret;
790
791
792 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
793 if (ret)
794 return ret;
795
796 tmp &= ~(1 << 6);
797
798 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
799
800 return ret;
801}
802
803static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
804{
805 unsigned long flags;
806 u32 ah_ms;
807
808 if (ufshcd_is_clkgating_allowed(hba)) {
809 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
810 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
811 hba->ahit);
812 else
813 ah_ms = 10;
814 spin_lock_irqsave(hba->host->host_lock, flags);
815 hba->clk_gating.delay_ms = ah_ms + 5;
816 spin_unlock_irqrestore(hba->host->host_lock, flags);
817 }
818}
819
820static int ufs_mtk_post_link(struct ufs_hba *hba)
821{
822
823 ufs_mtk_cfg_unipro_cg(hba, true);
824
825
826 if (ufshcd_is_auto_hibern8_supported(hba))
827 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
828 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
829
830 ufs_mtk_setup_clk_gating(hba);
831
832 return 0;
833}
834
835static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
836 enum ufs_notify_change_status stage)
837{
838 int ret = 0;
839
840 switch (stage) {
841 case PRE_CHANGE:
842 ret = ufs_mtk_pre_link(hba);
843 break;
844 case POST_CHANGE:
845 ret = ufs_mtk_post_link(hba);
846 break;
847 default:
848 ret = -EINVAL;
849 break;
850 }
851
852 return ret;
853}
854
855static int ufs_mtk_device_reset(struct ufs_hba *hba)
856{
857 struct arm_smccc_res res;
858
859
860 ufshcd_hba_stop(hba);
861
862 ufs_mtk_device_reset_ctrl(0, res);
863
864
865
866
867
868
869
870
871 usleep_range(10, 15);
872
873 ufs_mtk_device_reset_ctrl(1, res);
874
875
876 usleep_range(10000, 15000);
877
878 dev_info(hba->dev, "device reset done\n");
879
880 return 0;
881}
882
883static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
884{
885 int err;
886
887 err = ufshcd_hba_enable(hba);
888 if (err)
889 return err;
890
891 err = ufs_mtk_unipro_set_lpm(hba, false);
892 if (err)
893 return err;
894
895 err = ufshcd_uic_hibern8_exit(hba);
896 if (!err)
897 ufshcd_set_link_active(hba);
898 else
899 return err;
900
901 err = ufshcd_make_hba_operational(hba);
902 if (err)
903 return err;
904
905 return 0;
906}
907
908static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
909{
910 int err;
911
912 err = ufs_mtk_unipro_set_lpm(hba, true);
913 if (err) {
914
915 ufs_mtk_unipro_set_lpm(hba, false);
916 return err;
917 }
918
919 return 0;
920}
921
922static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
923{
924 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
925 return;
926
927 if (lpm && !hba->vreg_info.vcc->enabled)
928 regulator_set_mode(hba->vreg_info.vccq2->reg,
929 REGULATOR_MODE_IDLE);
930 else if (!lpm)
931 regulator_set_mode(hba->vreg_info.vccq2->reg,
932 REGULATOR_MODE_NORMAL);
933}
934
935static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
936{
937 int err;
938 struct arm_smccc_res res;
939
940 if (ufshcd_is_link_hibern8(hba)) {
941 err = ufs_mtk_link_set_lpm(hba);
942 if (err)
943 goto fail;
944 }
945
946 if (!ufshcd_is_link_active(hba)) {
947
948
949
950
951
952 ufs_mtk_vreg_set_lpm(hba, true);
953 err = ufs_mtk_mphy_power_on(hba, false);
954 if (err)
955 goto fail;
956 }
957
958 if (ufshcd_is_link_off(hba))
959 ufs_mtk_device_reset_ctrl(0, res);
960
961 return 0;
962fail:
963
964
965
966
967
968 ufshcd_set_link_off(hba);
969 return -EAGAIN;
970}
971
972static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
973{
974 int err;
975
976 err = ufs_mtk_mphy_power_on(hba, true);
977 if (err)
978 goto fail;
979
980 ufs_mtk_vreg_set_lpm(hba, false);
981
982 if (ufshcd_is_link_hibern8(hba)) {
983 err = ufs_mtk_link_set_hpm(hba);
984 if (err)
985 goto fail;
986 }
987
988 return 0;
989fail:
990 return ufshcd_link_recovery(hba);
991}
992
993static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
994{
995 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
996
997 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
998
999 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1000 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1001 "MPHY Ctrl ");
1002
1003
1004 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
1005 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1006}
1007
1008static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1009{
1010 struct ufs_dev_info *dev_info = &hba->dev_info;
1011 u16 mid = dev_info->wmanufacturerid;
1012
1013 if (mid == UFS_VENDOR_SAMSUNG)
1014 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1015
1016
1017
1018
1019
1020
1021 if (mid == UFS_VENDOR_SAMSUNG)
1022 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1023 else if (mid == UFS_VENDOR_SKHYNIX)
1024 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1025 else if (mid == UFS_VENDOR_TOSHIBA)
1026 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1027
1028 return 0;
1029}
1030
1031static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1032{
1033 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1034
1035 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1036 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1037 hba->vreg_info.vcc->always_on = true;
1038
1039
1040
1041
1042 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1043 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1044 }
1045}
1046
1047static void ufs_mtk_event_notify(struct ufs_hba *hba,
1048 enum ufs_event_type evt, void *data)
1049{
1050 unsigned int val = *(u32 *)data;
1051
1052 trace_ufs_mtk_event(evt, val);
1053}
1054
1055
1056
1057
1058
1059
1060
1061static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1062 .name = "mediatek.ufshci",
1063 .init = ufs_mtk_init,
1064 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1065 .setup_clocks = ufs_mtk_setup_clocks,
1066 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1067 .link_startup_notify = ufs_mtk_link_startup_notify,
1068 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1069 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1070 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1071 .suspend = ufs_mtk_suspend,
1072 .resume = ufs_mtk_resume,
1073 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1074 .device_reset = ufs_mtk_device_reset,
1075 .event_notify = ufs_mtk_event_notify,
1076};
1077
1078
1079
1080
1081
1082
1083
1084static int ufs_mtk_probe(struct platform_device *pdev)
1085{
1086 int err;
1087 struct device *dev = &pdev->dev;
1088 struct device_node *reset_node;
1089 struct platform_device *reset_pdev;
1090 struct device_link *link;
1091
1092 reset_node = of_find_compatible_node(NULL, NULL,
1093 "ti,syscon-reset");
1094 if (!reset_node) {
1095 dev_notice(dev, "find ti,syscon-reset fail\n");
1096 goto skip_reset;
1097 }
1098 reset_pdev = of_find_device_by_node(reset_node);
1099 if (!reset_pdev) {
1100 dev_notice(dev, "find reset_pdev fail\n");
1101 goto skip_reset;
1102 }
1103 link = device_link_add(dev, &reset_pdev->dev,
1104 DL_FLAG_AUTOPROBE_CONSUMER);
1105 if (!link) {
1106 dev_notice(dev, "add reset device_link fail\n");
1107 goto skip_reset;
1108 }
1109
1110 if (link->status == DL_STATE_DORMANT) {
1111 err = -EPROBE_DEFER;
1112 goto out;
1113 }
1114
1115skip_reset:
1116
1117 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1118
1119out:
1120 if (err)
1121 dev_info(dev, "probe failed %d\n", err);
1122
1123 of_node_put(reset_node);
1124 return err;
1125}
1126
1127
1128
1129
1130
1131
1132
1133static int ufs_mtk_remove(struct platform_device *pdev)
1134{
1135 struct ufs_hba *hba = platform_get_drvdata(pdev);
1136
1137 pm_runtime_get_sync(&(pdev)->dev);
1138 ufshcd_remove(hba);
1139 return 0;
1140}
1141
1142static const struct dev_pm_ops ufs_mtk_pm_ops = {
1143 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1144 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1145 .prepare = ufshcd_suspend_prepare,
1146 .complete = ufshcd_resume_complete,
1147};
1148
1149static struct platform_driver ufs_mtk_pltform = {
1150 .probe = ufs_mtk_probe,
1151 .remove = ufs_mtk_remove,
1152 .shutdown = ufshcd_pltfrm_shutdown,
1153 .driver = {
1154 .name = "ufshcd-mtk",
1155 .pm = &ufs_mtk_pm_ops,
1156 .of_match_table = ufs_mtk_of_match,
1157 },
1158};
1159
1160MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1161MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1162MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1163MODULE_LICENSE("GPL v2");
1164
1165module_platform_driver(ufs_mtk_pltform);
1166