1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/async.h>
41#include <linux/devfreq.h>
42#include <linux/nls.h>
43#include <linux/of.h>
44#include "ufshcd.h"
45#include "ufs_quirks.h"
46#include "unipro.h"
47
48#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 UTP_TASK_REQ_COMPL |\
50 UFSHCD_ERROR_MASK)
51
52#define UIC_CMD_TIMEOUT 500
53
54
55#define NOP_OUT_RETRIES 10
56
57#define NOP_OUT_TIMEOUT 30
58
59
60#define QUERY_REQ_RETRIES 10
61
62#define QUERY_REQ_TIMEOUT 30
63
64
65
66
67
68#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600
69
70
71#define TM_CMD_TIMEOUT 100
72
73
74#define UFS_UIC_COMMAND_RETRIES 3
75
76
77#define DME_LINKSTARTUP_RETRIES 3
78
79
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
82
83#define MAX_HOST_RESET_RETRIES 5
84
85
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
88
89#define INT_AGGR_DEF_TO 0x02
90
91#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
92 ({ \
93 int _ret; \
94 if (_on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 else \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
98 _ret; \
99 })
100
101static u32 ufs_query_desc_max_size[] = {
102 QUERY_DESC_DEVICE_MAX_SIZE,
103 QUERY_DESC_CONFIGURAION_MAX_SIZE,
104 QUERY_DESC_UNIT_MAX_SIZE,
105 QUERY_DESC_RFU_MAX_SIZE,
106 QUERY_DESC_INTERCONNECT_MAX_SIZE,
107 QUERY_DESC_STRING_MAX_SIZE,
108 QUERY_DESC_RFU_MAX_SIZE,
109 QUERY_DESC_GEOMETRY_MAX_SIZE,
110 QUERY_DESC_POWER_MAX_SIZE,
111 QUERY_DESC_RFU_MAX_SIZE,
112};
113
114enum {
115 UFSHCD_MAX_CHANNEL = 0,
116 UFSHCD_MAX_ID = 1,
117 UFSHCD_CMD_PER_LUN = 32,
118 UFSHCD_CAN_QUEUE = 32,
119};
120
121
122enum {
123 UFSHCD_STATE_RESET,
124 UFSHCD_STATE_ERROR,
125 UFSHCD_STATE_OPERATIONAL,
126};
127
128
129enum {
130 UFSHCD_EH_IN_PROGRESS = (1 << 0),
131};
132
133
134enum {
135 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0),
136 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1),
137 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2),
138 UFSHCD_UIC_NL_ERROR = (1 << 3),
139 UFSHCD_UIC_TL_ERROR = (1 << 4),
140 UFSHCD_UIC_DME_ERROR = (1 << 5),
141};
142
143
144enum {
145 UFSHCD_INT_DISABLE,
146 UFSHCD_INT_ENABLE,
147 UFSHCD_INT_CLEAR,
148};
149
150#define ufshcd_set_eh_in_progress(h) \
151 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
152#define ufshcd_eh_in_progress(h) \
153 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
154#define ufshcd_clear_eh_in_progress(h) \
155 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
156
157#define ufshcd_set_ufs_dev_active(h) \
158 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
159#define ufshcd_set_ufs_dev_sleep(h) \
160 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
161#define ufshcd_set_ufs_dev_poweroff(h) \
162 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
163#define ufshcd_is_ufs_dev_active(h) \
164 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
165#define ufshcd_is_ufs_dev_sleep(h) \
166 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
167#define ufshcd_is_ufs_dev_poweroff(h) \
168 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
169
170static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
171 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
173 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
174 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
175 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
177};
178
179static inline enum ufs_dev_pwr_mode
180ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
181{
182 return ufs_pm_lvl_states[lvl].dev_state;
183}
184
185static inline enum uic_link_state
186ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].link_state;
189}
190
191static void ufshcd_tmc_handler(struct ufs_hba *hba);
192static void ufshcd_async_scan(void *data, async_cookie_t cookie);
193static int ufshcd_reset_and_restore(struct ufs_hba *hba);
194static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
195static void ufshcd_hba_exit(struct ufs_hba *hba);
196static int ufshcd_probe_hba(struct ufs_hba *hba);
197static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
198 bool skip_ref_clk);
199static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
200static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
201static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
202static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
203static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
204static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
205static irqreturn_t ufshcd_intr(int irq, void *__hba);
206static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
207 struct ufs_pa_layer_attr *desired_pwr_mode);
208static int ufshcd_change_power_mode(struct ufs_hba *hba,
209 struct ufs_pa_layer_attr *pwr_mode);
210static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
211{
212 return tag >= 0 && tag < hba->nutrs;
213}
214
215static inline int ufshcd_enable_irq(struct ufs_hba *hba)
216{
217 int ret = 0;
218
219 if (!hba->is_irq_enabled) {
220 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
221 hba);
222 if (ret)
223 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
224 __func__, ret);
225 hba->is_irq_enabled = true;
226 }
227
228 return ret;
229}
230
231static inline void ufshcd_disable_irq(struct ufs_hba *hba)
232{
233 if (hba->is_irq_enabled) {
234 free_irq(hba->irq, hba);
235 hba->is_irq_enabled = false;
236 }
237}
238
239
240static inline void ufshcd_remove_non_printable(char *val)
241{
242 if (!val)
243 return;
244
245 if (*val < 0x20 || *val > 0x7e)
246 *val = ' ';
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
262 u32 val, unsigned long interval_us,
263 unsigned long timeout_ms, bool can_sleep)
264{
265 int err = 0;
266 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
267
268
269 val = val & mask;
270
271 while ((ufshcd_readl(hba, reg) & mask) != val) {
272 if (can_sleep)
273 usleep_range(interval_us, interval_us + 50);
274 else
275 udelay(interval_us);
276 if (time_after(jiffies, timeout)) {
277 if ((ufshcd_readl(hba, reg) & mask) != val)
278 err = -ETIMEDOUT;
279 break;
280 }
281 }
282
283 return err;
284}
285
286
287
288
289
290
291
292static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
293{
294 if (hba->ufs_version == UFSHCI_VERSION_10)
295 return INTERRUPT_MASK_ALL_VER_10;
296 else
297 return INTERRUPT_MASK_ALL_VER_11;
298}
299
300
301
302
303
304
305
306static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
307{
308 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
309 return ufshcd_vops_get_ufs_hci_version(hba);
310
311 return ufshcd_readl(hba, REG_UFS_VERSION);
312}
313
314
315
316
317
318
319
320
321static inline int ufshcd_is_device_present(struct ufs_hba *hba)
322{
323 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
324 DEVICE_PRESENT) ? 1 : 0;
325}
326
327
328
329
330
331
332
333
334static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
335{
336 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
337}
338
339
340
341
342
343
344
345
346static inline int
347ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
348{
349 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
350}
351
352
353
354
355
356
357
358
359
360
361static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
362{
363 int tag;
364 bool ret = false;
365
366 if (!free_slot)
367 goto out;
368
369 do {
370 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
371 if (tag >= hba->nutmrs)
372 goto out;
373 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
374
375 *free_slot = tag;
376 ret = true;
377out:
378 return ret;
379}
380
381static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
382{
383 clear_bit_unlock(slot, &hba->tm_slots_in_use);
384}
385
386
387
388
389
390
391static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
392{
393 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
394}
395
396
397
398
399
400
401static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
402{
403 __clear_bit(tag, &hba->outstanding_reqs);
404}
405
406
407
408
409
410
411
412static inline int ufshcd_get_lists_status(u32 reg)
413{
414
415
416
417
418
419
420
421
422
423 return ((reg & 0xFF) >> 1) ^ 0x07;
424}
425
426
427
428
429
430
431
432
433static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
434{
435 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
436 MASK_UIC_COMMAND_RESULT;
437}
438
439
440
441
442
443
444
445
446static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
447{
448 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
449}
450
451
452
453
454
455static inline int
456ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
457{
458 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
459}
460
461
462
463
464
465
466
467
468static inline int
469ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
470{
471 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
472}
473
474
475
476
477
478
479
480
481static inline unsigned int
482ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
483{
484 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
485 MASK_RSP_UPIU_DATA_SEG_LEN;
486}
487
488
489
490
491
492
493
494
495
496
497static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
498{
499 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
500 MASK_RSP_EXCEPTION_EVENT ? true : false;
501}
502
503
504
505
506
507static inline void
508ufshcd_reset_intr_aggr(struct ufs_hba *hba)
509{
510 ufshcd_writel(hba, INT_AGGR_ENABLE |
511 INT_AGGR_COUNTER_AND_TIMER_RESET,
512 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
513}
514
515
516
517
518
519
520
521static inline void
522ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
523{
524 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
525 INT_AGGR_COUNTER_THLD_VAL(cnt) |
526 INT_AGGR_TIMEOUT_VAL(tmout),
527 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
528}
529
530
531
532
533
534static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
535{
536 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
537}
538
539
540
541
542
543
544
545static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
546{
547 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
548 REG_UTP_TASK_REQ_LIST_RUN_STOP);
549 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
550 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
551}
552
553
554
555
556
557static inline void ufshcd_hba_start(struct ufs_hba *hba)
558{
559 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
560}
561
562
563
564
565
566
567
568static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
569{
570 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
571}
572
573u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
574{
575
576 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
577 (hba->ufs_version == UFSHCI_VERSION_11))
578 return UFS_UNIPRO_VER_1_41;
579 else
580 return UFS_UNIPRO_VER_1_6;
581}
582EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
583
584static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
585{
586
587
588
589
590
591
592
593
594
595 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
596 return true;
597 else
598 return false;
599}
600
601static void ufshcd_ungate_work(struct work_struct *work)
602{
603 int ret;
604 unsigned long flags;
605 struct ufs_hba *hba = container_of(work, struct ufs_hba,
606 clk_gating.ungate_work);
607
608 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
609
610 spin_lock_irqsave(hba->host->host_lock, flags);
611 if (hba->clk_gating.state == CLKS_ON) {
612 spin_unlock_irqrestore(hba->host->host_lock, flags);
613 goto unblock_reqs;
614 }
615
616 spin_unlock_irqrestore(hba->host->host_lock, flags);
617 ufshcd_setup_clocks(hba, true);
618
619
620 if (ufshcd_can_hibern8_during_gating(hba)) {
621
622 hba->clk_gating.is_suspended = true;
623 if (ufshcd_is_link_hibern8(hba)) {
624 ret = ufshcd_uic_hibern8_exit(hba);
625 if (ret)
626 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
627 __func__, ret);
628 else
629 ufshcd_set_link_active(hba);
630 }
631 hba->clk_gating.is_suspended = false;
632 }
633unblock_reqs:
634 if (ufshcd_is_clkscaling_enabled(hba))
635 devfreq_resume_device(hba->devfreq);
636 scsi_unblock_requests(hba->host);
637}
638
639
640
641
642
643
644
645int ufshcd_hold(struct ufs_hba *hba, bool async)
646{
647 int rc = 0;
648 unsigned long flags;
649
650 if (!ufshcd_is_clkgating_allowed(hba))
651 goto out;
652 spin_lock_irqsave(hba->host->host_lock, flags);
653 hba->clk_gating.active_reqs++;
654
655 if (ufshcd_eh_in_progress(hba)) {
656 spin_unlock_irqrestore(hba->host->host_lock, flags);
657 return 0;
658 }
659
660start:
661 switch (hba->clk_gating.state) {
662 case CLKS_ON:
663 break;
664 case REQ_CLKS_OFF:
665 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
666 hba->clk_gating.state = CLKS_ON;
667 break;
668 }
669
670
671
672
673
674 case CLKS_OFF:
675 scsi_block_requests(hba->host);
676 hba->clk_gating.state = REQ_CLKS_ON;
677 schedule_work(&hba->clk_gating.ungate_work);
678
679
680
681
682 case REQ_CLKS_ON:
683 if (async) {
684 rc = -EAGAIN;
685 hba->clk_gating.active_reqs--;
686 break;
687 }
688
689 spin_unlock_irqrestore(hba->host->host_lock, flags);
690 flush_work(&hba->clk_gating.ungate_work);
691
692 spin_lock_irqsave(hba->host->host_lock, flags);
693 goto start;
694 default:
695 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
696 __func__, hba->clk_gating.state);
697 break;
698 }
699 spin_unlock_irqrestore(hba->host->host_lock, flags);
700out:
701 return rc;
702}
703EXPORT_SYMBOL_GPL(ufshcd_hold);
704
705static void ufshcd_gate_work(struct work_struct *work)
706{
707 struct ufs_hba *hba = container_of(work, struct ufs_hba,
708 clk_gating.gate_work.work);
709 unsigned long flags;
710
711 spin_lock_irqsave(hba->host->host_lock, flags);
712 if (hba->clk_gating.is_suspended) {
713 hba->clk_gating.state = CLKS_ON;
714 goto rel_lock;
715 }
716
717 if (hba->clk_gating.active_reqs
718 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
719 || hba->lrb_in_use || hba->outstanding_tasks
720 || hba->active_uic_cmd || hba->uic_async_done)
721 goto rel_lock;
722
723 spin_unlock_irqrestore(hba->host->host_lock, flags);
724
725
726 if (ufshcd_can_hibern8_during_gating(hba)) {
727 if (ufshcd_uic_hibern8_enter(hba)) {
728 hba->clk_gating.state = CLKS_ON;
729 goto out;
730 }
731 ufshcd_set_link_hibern8(hba);
732 }
733
734 if (ufshcd_is_clkscaling_enabled(hba)) {
735 devfreq_suspend_device(hba->devfreq);
736 hba->clk_scaling.window_start_t = 0;
737 }
738
739 if (!ufshcd_is_link_active(hba))
740 ufshcd_setup_clocks(hba, false);
741 else
742
743 __ufshcd_setup_clocks(hba, false, true);
744
745
746
747
748
749
750
751
752
753
754 spin_lock_irqsave(hba->host->host_lock, flags);
755 if (hba->clk_gating.state == REQ_CLKS_OFF)
756 hba->clk_gating.state = CLKS_OFF;
757
758rel_lock:
759 spin_unlock_irqrestore(hba->host->host_lock, flags);
760out:
761 return;
762}
763
764
765static void __ufshcd_release(struct ufs_hba *hba)
766{
767 if (!ufshcd_is_clkgating_allowed(hba))
768 return;
769
770 hba->clk_gating.active_reqs--;
771
772 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
773 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
774 || hba->lrb_in_use || hba->outstanding_tasks
775 || hba->active_uic_cmd || hba->uic_async_done
776 || ufshcd_eh_in_progress(hba))
777 return;
778
779 hba->clk_gating.state = REQ_CLKS_OFF;
780 schedule_delayed_work(&hba->clk_gating.gate_work,
781 msecs_to_jiffies(hba->clk_gating.delay_ms));
782}
783
784void ufshcd_release(struct ufs_hba *hba)
785{
786 unsigned long flags;
787
788 spin_lock_irqsave(hba->host->host_lock, flags);
789 __ufshcd_release(hba);
790 spin_unlock_irqrestore(hba->host->host_lock, flags);
791}
792EXPORT_SYMBOL_GPL(ufshcd_release);
793
794static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct ufs_hba *hba = dev_get_drvdata(dev);
798
799 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
800}
801
802static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
803 struct device_attribute *attr, const char *buf, size_t count)
804{
805 struct ufs_hba *hba = dev_get_drvdata(dev);
806 unsigned long flags, value;
807
808 if (kstrtoul(buf, 0, &value))
809 return -EINVAL;
810
811 spin_lock_irqsave(hba->host->host_lock, flags);
812 hba->clk_gating.delay_ms = value;
813 spin_unlock_irqrestore(hba->host->host_lock, flags);
814 return count;
815}
816
817static void ufshcd_init_clk_gating(struct ufs_hba *hba)
818{
819 if (!ufshcd_is_clkgating_allowed(hba))
820 return;
821
822 hba->clk_gating.delay_ms = 150;
823 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
824 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
825
826 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
827 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
828 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
829 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
830 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
831 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
832 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
833}
834
835static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
836{
837 if (!ufshcd_is_clkgating_allowed(hba))
838 return;
839 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
840 cancel_work_sync(&hba->clk_gating.ungate_work);
841 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
842}
843
844
845static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
846{
847 if (!ufshcd_is_clkscaling_enabled(hba))
848 return;
849
850 if (!hba->clk_scaling.is_busy_started) {
851 hba->clk_scaling.busy_start_t = ktime_get();
852 hba->clk_scaling.is_busy_started = true;
853 }
854}
855
856static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
857{
858 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
859
860 if (!ufshcd_is_clkscaling_enabled(hba))
861 return;
862
863 if (!hba->outstanding_reqs && scaling->is_busy_started) {
864 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
865 scaling->busy_start_t));
866 scaling->busy_start_t = ktime_set(0, 0);
867 scaling->is_busy_started = false;
868 }
869}
870
871
872
873
874
875static inline
876void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
877{
878 ufshcd_clk_scaling_start_busy(hba);
879 __set_bit(task_tag, &hba->outstanding_reqs);
880 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
881}
882
883
884
885
886
887static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
888{
889 int len;
890 if (lrbp->sense_buffer &&
891 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
892 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
893 memcpy(lrbp->sense_buffer,
894 lrbp->ucd_rsp_ptr->sr.sense_data,
895 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
896 }
897}
898
899
900
901
902
903
904
905static
906int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
907{
908 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
909
910 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
911
912
913 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
914 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
915 GENERAL_UPIU_REQUEST_SIZE;
916 u16 resp_len;
917 u16 buf_len;
918
919
920 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
921 MASK_QUERY_DATA_SEG_LEN;
922 buf_len = be16_to_cpu(
923 hba->dev_cmd.query.request.upiu_req.length);
924 if (likely(buf_len >= resp_len)) {
925 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
926 } else {
927 dev_warn(hba->dev,
928 "%s: Response size is bigger than buffer",
929 __func__);
930 return -EINVAL;
931 }
932 }
933
934 return 0;
935}
936
937
938
939
940
941static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
942{
943 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
944
945
946 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
947 hba->nutmrs =
948 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
949}
950
951
952
953
954
955
956
957static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
958{
959 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
960 return true;
961 else
962 return false;
963}
964
965
966
967
968
969
970
971
972static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
973{
974 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
975}
976
977
978
979
980
981
982
983
984static inline void
985ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
986{
987 WARN_ON(hba->active_uic_cmd);
988
989 hba->active_uic_cmd = uic_cmd;
990
991
992 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
993 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
994 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
995
996
997 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
998 REG_UIC_COMMAND);
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int
1010ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1011{
1012 int ret;
1013 unsigned long flags;
1014
1015 if (wait_for_completion_timeout(&uic_cmd->done,
1016 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1017 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1018 else
1019 ret = -ETIMEDOUT;
1020
1021 spin_lock_irqsave(hba->host->host_lock, flags);
1022 hba->active_uic_cmd = NULL;
1023 spin_unlock_irqrestore(hba->host->host_lock, flags);
1024
1025 return ret;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static int
1039__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1040 bool completion)
1041{
1042 if (!ufshcd_ready_for_uic_cmd(hba)) {
1043 dev_err(hba->dev,
1044 "Controller not ready to accept UIC commands\n");
1045 return -EIO;
1046 }
1047
1048 if (completion)
1049 init_completion(&uic_cmd->done);
1050
1051 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1052
1053 return 0;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063static int
1064ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1065{
1066 int ret;
1067 unsigned long flags;
1068
1069 ufshcd_hold(hba, false);
1070 mutex_lock(&hba->uic_cmd_mutex);
1071 ufshcd_add_delay_before_dme_cmd(hba);
1072
1073 spin_lock_irqsave(hba->host->host_lock, flags);
1074 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1075 spin_unlock_irqrestore(hba->host->host_lock, flags);
1076 if (!ret)
1077 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1078
1079 mutex_unlock(&hba->uic_cmd_mutex);
1080
1081 ufshcd_release(hba);
1082 return ret;
1083}
1084
1085
1086
1087
1088
1089
1090
1091static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1092{
1093 struct ufshcd_sg_entry *prd_table;
1094 struct scatterlist *sg;
1095 struct scsi_cmnd *cmd;
1096 int sg_segments;
1097 int i;
1098
1099 cmd = lrbp->cmd;
1100 sg_segments = scsi_dma_map(cmd);
1101 if (sg_segments < 0)
1102 return sg_segments;
1103
1104 if (sg_segments) {
1105 lrbp->utr_descriptor_ptr->prd_table_length =
1106 cpu_to_le16((u16) (sg_segments));
1107
1108 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1109
1110 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1111 prd_table[i].size =
1112 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1113 prd_table[i].base_addr =
1114 cpu_to_le32(lower_32_bits(sg->dma_address));
1115 prd_table[i].upper_addr =
1116 cpu_to_le32(upper_32_bits(sg->dma_address));
1117 prd_table[i].reserved = 0;
1118 }
1119 } else {
1120 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1121 }
1122
1123 return 0;
1124}
1125
1126
1127
1128
1129
1130
1131static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1132{
1133 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1134
1135 if (hba->ufs_version == UFSHCI_VERSION_10) {
1136 u32 rw;
1137 rw = set & INTERRUPT_MASK_RW_VER_10;
1138 set = rw | ((set ^ intrs) & intrs);
1139 } else {
1140 set |= intrs;
1141 }
1142
1143 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1144}
1145
1146
1147
1148
1149
1150
1151static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1152{
1153 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1154
1155 if (hba->ufs_version == UFSHCI_VERSION_10) {
1156 u32 rw;
1157 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1158 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1159 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1160
1161 } else {
1162 set &= ~intrs;
1163 }
1164
1165 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1176 u32 *upiu_flags, enum dma_data_direction cmd_dir)
1177{
1178 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1179 u32 data_direction;
1180 u32 dword_0;
1181
1182 if (cmd_dir == DMA_FROM_DEVICE) {
1183 data_direction = UTP_DEVICE_TO_HOST;
1184 *upiu_flags = UPIU_CMD_FLAGS_READ;
1185 } else if (cmd_dir == DMA_TO_DEVICE) {
1186 data_direction = UTP_HOST_TO_DEVICE;
1187 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1188 } else {
1189 data_direction = UTP_NO_DATA_TRANSFER;
1190 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1191 }
1192
1193 dword_0 = data_direction | (lrbp->command_type
1194 << UPIU_COMMAND_TYPE_OFFSET);
1195 if (lrbp->intr_cmd)
1196 dword_0 |= UTP_REQ_DESC_INT_CMD;
1197
1198
1199 req_desc->header.dword_0 = cpu_to_le32(dword_0);
1200
1201 req_desc->header.dword_1 = 0;
1202
1203
1204
1205
1206
1207 req_desc->header.dword_2 =
1208 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1209
1210 req_desc->header.dword_3 = 0;
1211
1212 req_desc->prd_table_length = 0;
1213}
1214
1215
1216
1217
1218
1219
1220
1221static
1222void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1223{
1224 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1225 unsigned short cdb_len;
1226
1227
1228 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1229 UPIU_TRANSACTION_COMMAND, upiu_flags,
1230 lrbp->lun, lrbp->task_tag);
1231 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1232 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1233
1234
1235 ucd_req_ptr->header.dword_2 = 0;
1236
1237 ucd_req_ptr->sc.exp_data_transfer_len =
1238 cpu_to_be32(lrbp->cmd->sdb.length);
1239
1240 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1241 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1242 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1243
1244 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1255 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1256{
1257 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1258 struct ufs_query *query = &hba->dev_cmd.query;
1259 u16 len = be16_to_cpu(query->request.upiu_req.length);
1260 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1261
1262
1263 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1264 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1265 lrbp->lun, lrbp->task_tag);
1266 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1267 0, query->request.query_func, 0, 0);
1268
1269
1270 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
1271 0, 0, len >> 8, (u8)len);
1272
1273
1274 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1275 QUERY_OSF_SIZE);
1276
1277
1278 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1279 memcpy(descp, query->descriptor, len);
1280
1281 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1282}
1283
1284static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1285{
1286 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1287
1288 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1289
1290
1291 ucd_req_ptr->header.dword_0 =
1292 UPIU_HEADER_DWORD(
1293 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1294
1295 ucd_req_ptr->header.dword_1 = 0;
1296 ucd_req_ptr->header.dword_2 = 0;
1297
1298 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1299}
1300
1301
1302
1303
1304
1305
1306
1307static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1308{
1309 u32 upiu_flags;
1310 int ret = 0;
1311
1312 if (hba->ufs_version == UFSHCI_VERSION_20)
1313 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1314 else
1315 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1316
1317 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1318 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1319 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1320 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1321 ufshcd_prepare_utp_nop_upiu(lrbp);
1322 else
1323 ret = -EINVAL;
1324
1325 return ret;
1326}
1327
1328
1329
1330
1331
1332
1333
1334static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1335{
1336 u32 upiu_flags;
1337 int ret = 0;
1338
1339 if (hba->ufs_version == UFSHCI_VERSION_20)
1340 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1341 else
1342 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1343
1344 if (likely(lrbp->cmd)) {
1345 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1346 lrbp->cmd->sc_data_direction);
1347 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1348 } else {
1349 ret = -EINVAL;
1350 }
1351
1352 return ret;
1353}
1354
1355
1356
1357
1358
1359
1360
1361static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1362{
1363 if (scsi_is_wlun(scsi_lun))
1364 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1365 | UFS_UPIU_WLUN_ID;
1366 else
1367 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1368}
1369
1370
1371
1372
1373
1374
1375
1376static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1377{
1378 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1389{
1390 struct ufshcd_lrb *lrbp;
1391 struct ufs_hba *hba;
1392 unsigned long flags;
1393 int tag;
1394 int err = 0;
1395
1396 hba = shost_priv(host);
1397
1398 tag = cmd->request->tag;
1399 if (!ufshcd_valid_tag(hba, tag)) {
1400 dev_err(hba->dev,
1401 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1402 __func__, tag, cmd, cmd->request);
1403 BUG();
1404 }
1405
1406 spin_lock_irqsave(hba->host->host_lock, flags);
1407 switch (hba->ufshcd_state) {
1408 case UFSHCD_STATE_OPERATIONAL:
1409 break;
1410 case UFSHCD_STATE_RESET:
1411 err = SCSI_MLQUEUE_HOST_BUSY;
1412 goto out_unlock;
1413 case UFSHCD_STATE_ERROR:
1414 set_host_byte(cmd, DID_ERROR);
1415 cmd->scsi_done(cmd);
1416 goto out_unlock;
1417 default:
1418 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1419 __func__, hba->ufshcd_state);
1420 set_host_byte(cmd, DID_BAD_TARGET);
1421 cmd->scsi_done(cmd);
1422 goto out_unlock;
1423 }
1424
1425
1426 if (ufshcd_eh_in_progress(hba)) {
1427 set_host_byte(cmd, DID_ERROR);
1428 cmd->scsi_done(cmd);
1429 goto out_unlock;
1430 }
1431 spin_unlock_irqrestore(hba->host->host_lock, flags);
1432
1433
1434 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1435
1436
1437
1438
1439
1440
1441 err = SCSI_MLQUEUE_HOST_BUSY;
1442 goto out;
1443 }
1444
1445 err = ufshcd_hold(hba, true);
1446 if (err) {
1447 err = SCSI_MLQUEUE_HOST_BUSY;
1448 clear_bit_unlock(tag, &hba->lrb_in_use);
1449 goto out;
1450 }
1451 WARN_ON(hba->clk_gating.state != CLKS_ON);
1452
1453 lrbp = &hba->lrb[tag];
1454
1455 WARN_ON(lrbp->cmd);
1456 lrbp->cmd = cmd;
1457 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1458 lrbp->sense_buffer = cmd->sense_buffer;
1459 lrbp->task_tag = tag;
1460 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1461 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
1462
1463 ufshcd_comp_scsi_upiu(hba, lrbp);
1464
1465 err = ufshcd_map_sg(lrbp);
1466 if (err) {
1467 lrbp->cmd = NULL;
1468 clear_bit_unlock(tag, &hba->lrb_in_use);
1469 goto out;
1470 }
1471
1472
1473 spin_lock_irqsave(hba->host->host_lock, flags);
1474 ufshcd_send_command(hba, tag);
1475out_unlock:
1476 spin_unlock_irqrestore(hba->host->host_lock, flags);
1477out:
1478 return err;
1479}
1480
1481static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1482 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1483{
1484 lrbp->cmd = NULL;
1485 lrbp->sense_bufflen = 0;
1486 lrbp->sense_buffer = NULL;
1487 lrbp->task_tag = tag;
1488 lrbp->lun = 0;
1489 lrbp->intr_cmd = true;
1490 hba->dev_cmd.type = cmd_type;
1491
1492 return ufshcd_comp_devman_upiu(hba, lrbp);
1493}
1494
1495static int
1496ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1497{
1498 int err = 0;
1499 unsigned long flags;
1500 u32 mask = 1 << tag;
1501
1502
1503 spin_lock_irqsave(hba->host->host_lock, flags);
1504 ufshcd_utrl_clear(hba, tag);
1505 spin_unlock_irqrestore(hba->host->host_lock, flags);
1506
1507
1508
1509
1510
1511 err = ufshcd_wait_for_register(hba,
1512 REG_UTP_TRANSFER_REQ_DOOR_BELL,
1513 mask, ~mask, 1000, 1000, true);
1514
1515 return err;
1516}
1517
1518static int
1519ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1520{
1521 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1522
1523
1524 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1525 UPIU_RSP_CODE_OFFSET;
1526 return query_res->response;
1527}
1528
1529
1530
1531
1532
1533
1534static int
1535ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1536{
1537 int resp;
1538 int err = 0;
1539
1540 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1541
1542 switch (resp) {
1543 case UPIU_TRANSACTION_NOP_IN:
1544 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1545 err = -EINVAL;
1546 dev_err(hba->dev, "%s: unexpected response %x\n",
1547 __func__, resp);
1548 }
1549 break;
1550 case UPIU_TRANSACTION_QUERY_RSP:
1551 err = ufshcd_check_query_response(hba, lrbp);
1552 if (!err)
1553 err = ufshcd_copy_query_response(hba, lrbp);
1554 break;
1555 case UPIU_TRANSACTION_REJECT_UPIU:
1556
1557 err = -EPERM;
1558 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1559 __func__);
1560 break;
1561 default:
1562 err = -EINVAL;
1563 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1564 __func__, resp);
1565 break;
1566 }
1567
1568 return err;
1569}
1570
1571static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1572 struct ufshcd_lrb *lrbp, int max_timeout)
1573{
1574 int err = 0;
1575 unsigned long time_left;
1576 unsigned long flags;
1577
1578 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1579 msecs_to_jiffies(max_timeout));
1580
1581 spin_lock_irqsave(hba->host->host_lock, flags);
1582 hba->dev_cmd.complete = NULL;
1583 if (likely(time_left)) {
1584 err = ufshcd_get_tr_ocs(lrbp);
1585 if (!err)
1586 err = ufshcd_dev_cmd_completion(hba, lrbp);
1587 }
1588 spin_unlock_irqrestore(hba->host->host_lock, flags);
1589
1590 if (!time_left) {
1591 err = -ETIMEDOUT;
1592 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1593 __func__, lrbp->task_tag);
1594 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1595
1596 err = -EAGAIN;
1597
1598
1599
1600
1601
1602 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
1603 }
1604
1605 return err;
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1620{
1621 int tag;
1622 bool ret = false;
1623 unsigned long tmp;
1624
1625 if (!tag_out)
1626 goto out;
1627
1628 do {
1629 tmp = ~hba->lrb_in_use;
1630 tag = find_last_bit(&tmp, hba->nutrs);
1631 if (tag >= hba->nutrs)
1632 goto out;
1633 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1634
1635 *tag_out = tag;
1636 ret = true;
1637out:
1638 return ret;
1639}
1640
1641static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1642{
1643 clear_bit_unlock(tag, &hba->lrb_in_use);
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1656 enum dev_cmd_type cmd_type, int timeout)
1657{
1658 struct ufshcd_lrb *lrbp;
1659 int err;
1660 int tag;
1661 struct completion wait;
1662 unsigned long flags;
1663
1664
1665
1666
1667
1668
1669 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1670
1671 init_completion(&wait);
1672 lrbp = &hba->lrb[tag];
1673 WARN_ON(lrbp->cmd);
1674 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1675 if (unlikely(err))
1676 goto out_put_tag;
1677
1678 hba->dev_cmd.complete = &wait;
1679
1680
1681 wmb();
1682 spin_lock_irqsave(hba->host->host_lock, flags);
1683 ufshcd_send_command(hba, tag);
1684 spin_unlock_irqrestore(hba->host->host_lock, flags);
1685
1686 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1687
1688out_put_tag:
1689 ufshcd_put_dev_cmd_tag(hba, tag);
1690 wake_up(&hba->dev_cmd.tag_wq);
1691 return err;
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static inline void ufshcd_init_query(struct ufs_hba *hba,
1705 struct ufs_query_req **request, struct ufs_query_res **response,
1706 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1707{
1708 *request = &hba->dev_cmd.query.request;
1709 *response = &hba->dev_cmd.query.response;
1710 memset(*request, 0, sizeof(struct ufs_query_req));
1711 memset(*response, 0, sizeof(struct ufs_query_res));
1712 (*request)->upiu_req.opcode = opcode;
1713 (*request)->upiu_req.idn = idn;
1714 (*request)->upiu_req.index = index;
1715 (*request)->upiu_req.selector = selector;
1716}
1717
1718static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1719 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1720{
1721 int ret;
1722 int retries;
1723
1724 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1725 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1726 if (ret)
1727 dev_dbg(hba->dev,
1728 "%s: failed with error %d, retries %d\n",
1729 __func__, ret, retries);
1730 else
1731 break;
1732 }
1733
1734 if (ret)
1735 dev_err(hba->dev,
1736 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1737 __func__, opcode, idn, ret, retries);
1738 return ret;
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1751 enum flag_idn idn, bool *flag_res)
1752{
1753 struct ufs_query_req *request = NULL;
1754 struct ufs_query_res *response = NULL;
1755 int err, index = 0, selector = 0;
1756 int timeout = QUERY_REQ_TIMEOUT;
1757
1758 BUG_ON(!hba);
1759
1760 ufshcd_hold(hba, false);
1761 mutex_lock(&hba->dev_cmd.lock);
1762 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1763 selector);
1764
1765 switch (opcode) {
1766 case UPIU_QUERY_OPCODE_SET_FLAG:
1767 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1768 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1769 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1770 break;
1771 case UPIU_QUERY_OPCODE_READ_FLAG:
1772 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1773 if (!flag_res) {
1774
1775 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1776 __func__);
1777 err = -EINVAL;
1778 goto out_unlock;
1779 }
1780 break;
1781 default:
1782 dev_err(hba->dev,
1783 "%s: Expected query flag opcode but got = %d\n",
1784 __func__, opcode);
1785 err = -EINVAL;
1786 goto out_unlock;
1787 }
1788
1789 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1790 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1791
1792 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1793
1794 if (err) {
1795 dev_err(hba->dev,
1796 "%s: Sending flag query for idn %d failed, err = %d\n",
1797 __func__, idn, err);
1798 goto out_unlock;
1799 }
1800
1801 if (flag_res)
1802 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1803 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1804
1805out_unlock:
1806 mutex_unlock(&hba->dev_cmd.lock);
1807 ufshcd_release(hba);
1808 return err;
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1823 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1824{
1825 struct ufs_query_req *request = NULL;
1826 struct ufs_query_res *response = NULL;
1827 int err;
1828
1829 BUG_ON(!hba);
1830
1831 ufshcd_hold(hba, false);
1832 if (!attr_val) {
1833 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1834 __func__, opcode);
1835 err = -EINVAL;
1836 goto out;
1837 }
1838
1839 mutex_lock(&hba->dev_cmd.lock);
1840 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1841 selector);
1842
1843 switch (opcode) {
1844 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1845 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1846 request->upiu_req.value = cpu_to_be32(*attr_val);
1847 break;
1848 case UPIU_QUERY_OPCODE_READ_ATTR:
1849 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1850 break;
1851 default:
1852 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1853 __func__, opcode);
1854 err = -EINVAL;
1855 goto out_unlock;
1856 }
1857
1858 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1859
1860 if (err) {
1861 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1862 __func__, opcode, idn, err);
1863 goto out_unlock;
1864 }
1865
1866 *attr_val = be32_to_cpu(response->upiu_res.value);
1867
1868out_unlock:
1869 mutex_unlock(&hba->dev_cmd.lock);
1870out:
1871 ufshcd_release(hba);
1872 return err;
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1889 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1890 u32 *attr_val)
1891{
1892 int ret = 0;
1893 u32 retries;
1894
1895 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1896 ret = ufshcd_query_attr(hba, opcode, idn, index,
1897 selector, attr_val);
1898 if (ret)
1899 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
1900 __func__, ret, retries);
1901 else
1902 break;
1903 }
1904
1905 if (ret)
1906 dev_err(hba->dev,
1907 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
1908 __func__, idn, ret, QUERY_REQ_RETRIES);
1909 return ret;
1910}
1911
1912static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1913 enum query_opcode opcode, enum desc_idn idn, u8 index,
1914 u8 selector, u8 *desc_buf, int *buf_len)
1915{
1916 struct ufs_query_req *request = NULL;
1917 struct ufs_query_res *response = NULL;
1918 int err;
1919
1920 BUG_ON(!hba);
1921
1922 ufshcd_hold(hba, false);
1923 if (!desc_buf) {
1924 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1925 __func__, opcode);
1926 err = -EINVAL;
1927 goto out;
1928 }
1929
1930 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1931 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1932 __func__, *buf_len);
1933 err = -EINVAL;
1934 goto out;
1935 }
1936
1937 mutex_lock(&hba->dev_cmd.lock);
1938 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1939 selector);
1940 hba->dev_cmd.query.descriptor = desc_buf;
1941 request->upiu_req.length = cpu_to_be16(*buf_len);
1942
1943 switch (opcode) {
1944 case UPIU_QUERY_OPCODE_WRITE_DESC:
1945 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1946 break;
1947 case UPIU_QUERY_OPCODE_READ_DESC:
1948 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1949 break;
1950 default:
1951 dev_err(hba->dev,
1952 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1953 __func__, opcode);
1954 err = -EINVAL;
1955 goto out_unlock;
1956 }
1957
1958 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1959
1960 if (err) {
1961 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1962 __func__, opcode, idn, err);
1963 goto out_unlock;
1964 }
1965
1966 hba->dev_cmd.query.descriptor = NULL;
1967 *buf_len = be16_to_cpu(response->upiu_res.length);
1968
1969out_unlock:
1970 mutex_unlock(&hba->dev_cmd.lock);
1971out:
1972 ufshcd_release(hba);
1973 return err;
1974}
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1992 enum query_opcode opcode, enum desc_idn idn, u8 index,
1993 u8 selector, u8 *desc_buf, int *buf_len)
1994{
1995 int err;
1996 int retries;
1997
1998 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1999 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2000 selector, desc_buf, buf_len);
2001 if (!err || err == -EINVAL)
2002 break;
2003 }
2004
2005 return err;
2006}
2007EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020static int ufshcd_read_desc_param(struct ufs_hba *hba,
2021 enum desc_idn desc_id,
2022 int desc_index,
2023 u32 param_offset,
2024 u8 *param_read_buf,
2025 u32 param_size)
2026{
2027 int ret;
2028 u8 *desc_buf;
2029 u32 buff_len;
2030 bool is_kmalloc = true;
2031
2032
2033 if (desc_id >= QUERY_DESC_IDN_MAX)
2034 return -EINVAL;
2035
2036 buff_len = ufs_query_desc_max_size[desc_id];
2037 if ((param_offset + param_size) > buff_len)
2038 return -EINVAL;
2039
2040 if (!param_offset && (param_size == buff_len)) {
2041
2042 desc_buf = param_read_buf;
2043 is_kmalloc = false;
2044 } else {
2045
2046 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2047 if (!desc_buf)
2048 return -ENOMEM;
2049 }
2050
2051 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2052 desc_id, desc_index, 0, desc_buf,
2053 &buff_len);
2054
2055 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
2056 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
2057 ufs_query_desc_max_size[desc_id])
2058 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2059 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2060 __func__, desc_id, param_offset, buff_len, ret);
2061 if (!ret)
2062 ret = -EINVAL;
2063
2064 goto out;
2065 }
2066
2067 if (is_kmalloc)
2068 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2069out:
2070 if (is_kmalloc)
2071 kfree(desc_buf);
2072 return ret;
2073}
2074
2075static inline int ufshcd_read_desc(struct ufs_hba *hba,
2076 enum desc_idn desc_id,
2077 int desc_index,
2078 u8 *buf,
2079 u32 size)
2080{
2081 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2082}
2083
2084static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2085 u8 *buf,
2086 u32 size)
2087{
2088 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2089}
2090
2091int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2092{
2093 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2094}
2095EXPORT_SYMBOL(ufshcd_read_device_desc);
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2108 u32 size, bool ascii)
2109{
2110 int err = 0;
2111
2112 err = ufshcd_read_desc(hba,
2113 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2114
2115 if (err) {
2116 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2117 __func__, QUERY_REQ_RETRIES, err);
2118 goto out;
2119 }
2120
2121 if (ascii) {
2122 int desc_len;
2123 int ascii_len;
2124 int i;
2125 char *buff_ascii;
2126
2127 desc_len = buf[0];
2128
2129 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2130 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2131 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2132 __func__);
2133 err = -ENOMEM;
2134 goto out;
2135 }
2136
2137 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2138 if (!buff_ascii) {
2139 err = -ENOMEM;
2140 goto out;
2141 }
2142
2143
2144
2145
2146
2147 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2148 desc_len - QUERY_DESC_HDR_SIZE,
2149 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2150
2151
2152 for (i = 0; i < ascii_len; i++)
2153 ufshcd_remove_non_printable(&buff_ascii[i]);
2154
2155 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2156 size - QUERY_DESC_HDR_SIZE);
2157 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2158 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
2159 kfree(buff_ascii);
2160 }
2161out:
2162 return err;
2163}
2164EXPORT_SYMBOL(ufshcd_read_string_desc);
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2177 int lun,
2178 enum unit_desc_param param_offset,
2179 u8 *param_read_buf,
2180 u32 param_size)
2181{
2182
2183
2184
2185
2186 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
2187 return -EOPNOTSUPP;
2188
2189 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2190 param_offset, param_read_buf, param_size);
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206static int ufshcd_memory_alloc(struct ufs_hba *hba)
2207{
2208 size_t utmrdl_size, utrdl_size, ucdl_size;
2209
2210
2211 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2212 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2213 ucdl_size,
2214 &hba->ucdl_dma_addr,
2215 GFP_KERNEL);
2216
2217
2218
2219
2220
2221
2222
2223 if (!hba->ucdl_base_addr ||
2224 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
2225 dev_err(hba->dev,
2226 "Command Descriptor Memory allocation failed\n");
2227 goto out;
2228 }
2229
2230
2231
2232
2233
2234 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2235 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2236 utrdl_size,
2237 &hba->utrdl_dma_addr,
2238 GFP_KERNEL);
2239 if (!hba->utrdl_base_addr ||
2240 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
2241 dev_err(hba->dev,
2242 "Transfer Descriptor Memory allocation failed\n");
2243 goto out;
2244 }
2245
2246
2247
2248
2249
2250 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2251 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2252 utmrdl_size,
2253 &hba->utmrdl_dma_addr,
2254 GFP_KERNEL);
2255 if (!hba->utmrdl_base_addr ||
2256 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
2257 dev_err(hba->dev,
2258 "Task Management Descriptor Memory allocation failed\n");
2259 goto out;
2260 }
2261
2262
2263 hba->lrb = devm_kzalloc(hba->dev,
2264 hba->nutrs * sizeof(struct ufshcd_lrb),
2265 GFP_KERNEL);
2266 if (!hba->lrb) {
2267 dev_err(hba->dev, "LRB Memory allocation failed\n");
2268 goto out;
2269 }
2270 return 0;
2271out:
2272 return -ENOMEM;
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2289{
2290 struct utp_transfer_cmd_desc *cmd_descp;
2291 struct utp_transfer_req_desc *utrdlp;
2292 dma_addr_t cmd_desc_dma_addr;
2293 dma_addr_t cmd_desc_element_addr;
2294 u16 response_offset;
2295 u16 prdt_offset;
2296 int cmd_desc_size;
2297 int i;
2298
2299 utrdlp = hba->utrdl_base_addr;
2300 cmd_descp = hba->ucdl_base_addr;
2301
2302 response_offset =
2303 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2304 prdt_offset =
2305 offsetof(struct utp_transfer_cmd_desc, prd_table);
2306
2307 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2308 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2309
2310 for (i = 0; i < hba->nutrs; i++) {
2311
2312 cmd_desc_element_addr =
2313 (cmd_desc_dma_addr + (cmd_desc_size * i));
2314 utrdlp[i].command_desc_base_addr_lo =
2315 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2316 utrdlp[i].command_desc_base_addr_hi =
2317 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2318
2319
2320 utrdlp[i].response_upiu_offset =
2321 cpu_to_le16((response_offset >> 2));
2322 utrdlp[i].prd_table_offset =
2323 cpu_to_le16((prdt_offset >> 2));
2324 utrdlp[i].response_upiu_length =
2325 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2326
2327 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2328 hba->lrb[i].ucd_req_ptr =
2329 (struct utp_upiu_req *)(cmd_descp + i);
2330 hba->lrb[i].ucd_rsp_ptr =
2331 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2332 hba->lrb[i].ucd_prdt_ptr =
2333 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2334 }
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2349{
2350 struct uic_command uic_cmd = {0};
2351 int ret;
2352
2353 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2354
2355 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2356 if (ret)
2357 dev_err(hba->dev,
2358 "dme-link-startup: error code %d\n", ret);
2359 return ret;
2360}
2361
2362static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2363{
2364 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2365 unsigned long min_sleep_time_us;
2366
2367 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2368 return;
2369
2370
2371
2372
2373
2374 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2375 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2376 } else {
2377 unsigned long delta =
2378 (unsigned long) ktime_to_us(
2379 ktime_sub(ktime_get(),
2380 hba->last_dme_cmd_tstamp));
2381
2382 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2383 min_sleep_time_us =
2384 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2385 else
2386 return;
2387 }
2388
2389
2390 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2404 u8 attr_set, u32 mib_val, u8 peer)
2405{
2406 struct uic_command uic_cmd = {0};
2407 static const char *const action[] = {
2408 "dme-set",
2409 "dme-peer-set"
2410 };
2411 const char *set = action[!!peer];
2412 int ret;
2413 int retries = UFS_UIC_COMMAND_RETRIES;
2414
2415 uic_cmd.command = peer ?
2416 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2417 uic_cmd.argument1 = attr_sel;
2418 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2419 uic_cmd.argument3 = mib_val;
2420
2421 do {
2422
2423 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2424 if (ret)
2425 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2426 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2427 } while (ret && peer && --retries);
2428
2429 if (!retries)
2430 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2431 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2432 retries);
2433
2434 return ret;
2435}
2436EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2448 u32 *mib_val, u8 peer)
2449{
2450 struct uic_command uic_cmd = {0};
2451 static const char *const action[] = {
2452 "dme-get",
2453 "dme-peer-get"
2454 };
2455 const char *get = action[!!peer];
2456 int ret;
2457 int retries = UFS_UIC_COMMAND_RETRIES;
2458 struct ufs_pa_layer_attr orig_pwr_info;
2459 struct ufs_pa_layer_attr temp_pwr_info;
2460 bool pwr_mode_change = false;
2461
2462 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2463 orig_pwr_info = hba->pwr_info;
2464 temp_pwr_info = orig_pwr_info;
2465
2466 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2467 orig_pwr_info.pwr_rx == FAST_MODE) {
2468 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2469 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2470 pwr_mode_change = true;
2471 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2472 orig_pwr_info.pwr_rx == SLOW_MODE) {
2473 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2474 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2475 pwr_mode_change = true;
2476 }
2477 if (pwr_mode_change) {
2478 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2479 if (ret)
2480 goto out;
2481 }
2482 }
2483
2484 uic_cmd.command = peer ?
2485 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2486 uic_cmd.argument1 = attr_sel;
2487
2488 do {
2489
2490 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2491 if (ret)
2492 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2493 get, UIC_GET_ATTR_ID(attr_sel), ret);
2494 } while (ret && peer && --retries);
2495
2496 if (!retries)
2497 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2498 get, UIC_GET_ATTR_ID(attr_sel), retries);
2499
2500 if (mib_val && !ret)
2501 *mib_val = uic_cmd.argument3;
2502
2503 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2504 && pwr_mode_change)
2505 ufshcd_change_power_mode(hba, &orig_pwr_info);
2506out:
2507 return ret;
2508}
2509EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2528{
2529 struct completion uic_async_done;
2530 unsigned long flags;
2531 u8 status;
2532 int ret;
2533 bool reenable_intr = false;
2534
2535 mutex_lock(&hba->uic_cmd_mutex);
2536 init_completion(&uic_async_done);
2537 ufshcd_add_delay_before_dme_cmd(hba);
2538
2539 spin_lock_irqsave(hba->host->host_lock, flags);
2540 hba->uic_async_done = &uic_async_done;
2541 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2542 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2543
2544
2545
2546
2547 wmb();
2548 reenable_intr = true;
2549 }
2550 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2551 spin_unlock_irqrestore(hba->host->host_lock, flags);
2552 if (ret) {
2553 dev_err(hba->dev,
2554 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2555 cmd->command, cmd->argument3, ret);
2556 goto out;
2557 }
2558
2559 if (!wait_for_completion_timeout(hba->uic_async_done,
2560 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2561 dev_err(hba->dev,
2562 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2563 cmd->command, cmd->argument3);
2564 ret = -ETIMEDOUT;
2565 goto out;
2566 }
2567
2568 status = ufshcd_get_upmcrs(hba);
2569 if (status != PWR_LOCAL) {
2570 dev_err(hba->dev,
2571 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
2572 cmd->command, status);
2573 ret = (status != PWR_OK) ? status : -1;
2574 }
2575out:
2576 spin_lock_irqsave(hba->host->host_lock, flags);
2577 hba->active_uic_cmd = NULL;
2578 hba->uic_async_done = NULL;
2579 if (reenable_intr)
2580 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
2581 spin_unlock_irqrestore(hba->host->host_lock, flags);
2582 mutex_unlock(&hba->uic_cmd_mutex);
2583
2584 return ret;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2596{
2597 struct uic_command uic_cmd = {0};
2598 int ret;
2599
2600 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2601 ret = ufshcd_dme_set(hba,
2602 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2603 if (ret) {
2604 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2605 __func__, ret);
2606 goto out;
2607 }
2608 }
2609
2610 uic_cmd.command = UIC_CMD_DME_SET;
2611 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2612 uic_cmd.argument3 = mode;
2613 ufshcd_hold(hba, false);
2614 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2615 ufshcd_release(hba);
2616
2617out:
2618 return ret;
2619}
2620
2621static int ufshcd_link_recovery(struct ufs_hba *hba)
2622{
2623 int ret;
2624 unsigned long flags;
2625
2626 spin_lock_irqsave(hba->host->host_lock, flags);
2627 hba->ufshcd_state = UFSHCD_STATE_RESET;
2628 ufshcd_set_eh_in_progress(hba);
2629 spin_unlock_irqrestore(hba->host->host_lock, flags);
2630
2631 ret = ufshcd_host_reset_and_restore(hba);
2632
2633 spin_lock_irqsave(hba->host->host_lock, flags);
2634 if (ret)
2635 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2636 ufshcd_clear_eh_in_progress(hba);
2637 spin_unlock_irqrestore(hba->host->host_lock, flags);
2638
2639 if (ret)
2640 dev_err(hba->dev, "%s: link recovery failed, err %d",
2641 __func__, ret);
2642
2643 return ret;
2644}
2645
2646static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2647{
2648 int ret;
2649 struct uic_command uic_cmd = {0};
2650
2651 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2652 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2653
2654 if (ret) {
2655 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2656 __func__, ret);
2657
2658
2659
2660
2661
2662 if (ufshcd_link_recovery(hba))
2663 ret = -ENOLINK;
2664 }
2665
2666 return ret;
2667}
2668
2669static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2670{
2671 int ret = 0, retries;
2672
2673 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
2674 ret = __ufshcd_uic_hibern8_enter(hba);
2675 if (!ret || ret == -ENOLINK)
2676 goto out;
2677 }
2678out:
2679 return ret;
2680}
2681
2682static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2683{
2684 struct uic_command uic_cmd = {0};
2685 int ret;
2686
2687 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2688 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2689 if (ret) {
2690 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2691 __func__, ret);
2692 ret = ufshcd_link_recovery(hba);
2693 }
2694
2695 return ret;
2696}
2697
2698
2699
2700
2701
2702
2703static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2704{
2705 hba->pwr_info.gear_rx = UFS_PWM_G1;
2706 hba->pwr_info.gear_tx = UFS_PWM_G1;
2707 hba->pwr_info.lane_rx = 1;
2708 hba->pwr_info.lane_tx = 1;
2709 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2710 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2711 hba->pwr_info.hs_rate = 0;
2712}
2713
2714
2715
2716
2717
2718static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2719{
2720 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2721
2722 if (hba->max_pwr_info.is_valid)
2723 return 0;
2724
2725 pwr_info->pwr_tx = FASTAUTO_MODE;
2726 pwr_info->pwr_rx = FASTAUTO_MODE;
2727 pwr_info->hs_rate = PA_HS_MODE_B;
2728
2729
2730 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2731 &pwr_info->lane_rx);
2732 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2733 &pwr_info->lane_tx);
2734
2735 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2736 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2737 __func__,
2738 pwr_info->lane_rx,
2739 pwr_info->lane_tx);
2740 return -EINVAL;
2741 }
2742
2743
2744
2745
2746
2747
2748 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2749 if (!pwr_info->gear_rx) {
2750 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2751 &pwr_info->gear_rx);
2752 if (!pwr_info->gear_rx) {
2753 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2754 __func__, pwr_info->gear_rx);
2755 return -EINVAL;
2756 }
2757 pwr_info->pwr_rx = SLOWAUTO_MODE;
2758 }
2759
2760 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2761 &pwr_info->gear_tx);
2762 if (!pwr_info->gear_tx) {
2763 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2764 &pwr_info->gear_tx);
2765 if (!pwr_info->gear_tx) {
2766 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2767 __func__, pwr_info->gear_tx);
2768 return -EINVAL;
2769 }
2770 pwr_info->pwr_tx = SLOWAUTO_MODE;
2771 }
2772
2773 hba->max_pwr_info.is_valid = true;
2774 return 0;
2775}
2776
2777static int ufshcd_change_power_mode(struct ufs_hba *hba,
2778 struct ufs_pa_layer_attr *pwr_mode)
2779{
2780 int ret;
2781
2782
2783 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2784 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2785 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2786 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2787 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2788 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2789 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2790 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2791 return 0;
2792 }
2793
2794
2795
2796
2797
2798
2799
2800 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2801 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2802 pwr_mode->lane_rx);
2803 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2804 pwr_mode->pwr_rx == FAST_MODE)
2805 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2806 else
2807 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2808
2809 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2810 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2811 pwr_mode->lane_tx);
2812 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2813 pwr_mode->pwr_tx == FAST_MODE)
2814 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2815 else
2816 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2817
2818 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2819 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2820 pwr_mode->pwr_rx == FAST_MODE ||
2821 pwr_mode->pwr_tx == FAST_MODE)
2822 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2823 pwr_mode->hs_rate);
2824
2825 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2826 | pwr_mode->pwr_tx);
2827
2828 if (ret) {
2829 dev_err(hba->dev,
2830 "%s: power mode change failed %d\n", __func__, ret);
2831 } else {
2832 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2833 pwr_mode);
2834
2835 memcpy(&hba->pwr_info, pwr_mode,
2836 sizeof(struct ufs_pa_layer_attr));
2837 }
2838
2839 return ret;
2840}
2841
2842
2843
2844
2845
2846
2847static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2848 struct ufs_pa_layer_attr *desired_pwr_mode)
2849{
2850 struct ufs_pa_layer_attr final_params = { 0 };
2851 int ret;
2852
2853 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
2854 desired_pwr_mode, &final_params);
2855
2856 if (ret)
2857 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2858
2859 ret = ufshcd_change_power_mode(hba, &final_params);
2860
2861 return ret;
2862}
2863
2864
2865
2866
2867
2868
2869
2870static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2871{
2872 int i;
2873 int err;
2874 bool flag_res = 1;
2875
2876 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2877 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
2878 if (err) {
2879 dev_err(hba->dev,
2880 "%s setting fDeviceInit flag failed with error %d\n",
2881 __func__, err);
2882 goto out;
2883 }
2884
2885
2886 for (i = 0; i < 1000 && !err && flag_res; i++)
2887 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2888 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2889
2890 if (err)
2891 dev_err(hba->dev,
2892 "%s reading fDeviceInit flag failed with error %d\n",
2893 __func__, err);
2894 else if (flag_res)
2895 dev_err(hba->dev,
2896 "%s fDeviceInit was not cleared by the device\n",
2897 __func__);
2898
2899out:
2900 return err;
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2916{
2917 int err = 0;
2918 u32 reg;
2919
2920
2921 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2922
2923
2924 if (ufshcd_is_intr_aggr_allowed(hba))
2925 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2926 else
2927 ufshcd_disable_intr_aggr(hba);
2928
2929
2930 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2931 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2932 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2933 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2934 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2935 REG_UTP_TASK_REQ_LIST_BASE_L);
2936 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2937 REG_UTP_TASK_REQ_LIST_BASE_H);
2938
2939
2940
2941
2942
2943 wmb();
2944
2945
2946
2947
2948 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
2949 if (!(ufshcd_get_lists_status(reg))) {
2950 ufshcd_enable_run_stop_reg(hba);
2951 } else {
2952 dev_err(hba->dev,
2953 "Host controller not ready to process requests");
2954 err = -EIO;
2955 goto out;
2956 }
2957
2958out:
2959 return err;
2960}
2961
2962
2963
2964
2965
2966
2967static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
2968{
2969 int err;
2970
2971 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
2972 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
2973 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
2974 10, 1, can_sleep);
2975 if (err)
2976 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989static int ufshcd_hba_enable(struct ufs_hba *hba)
2990{
2991 int retry;
2992
2993
2994
2995
2996
2997
2998
2999 if (!ufshcd_is_hba_active(hba))
3000
3001 ufshcd_hba_stop(hba, true);
3002
3003
3004 ufshcd_set_link_off(hba);
3005
3006 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
3007
3008
3009 ufshcd_hba_start(hba);
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 msleep(1);
3022
3023
3024 retry = 10;
3025 while (ufshcd_is_hba_active(hba)) {
3026 if (retry) {
3027 retry--;
3028 } else {
3029 dev_err(hba->dev,
3030 "Controller enable failed\n");
3031 return -EIO;
3032 }
3033 msleep(5);
3034 }
3035
3036
3037 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
3038
3039 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
3040
3041 return 0;
3042}
3043
3044static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3045{
3046 int tx_lanes, i, err = 0;
3047
3048 if (!peer)
3049 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3050 &tx_lanes);
3051 else
3052 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3053 &tx_lanes);
3054 for (i = 0; i < tx_lanes; i++) {
3055 if (!peer)
3056 err = ufshcd_dme_set(hba,
3057 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3058 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3059 0);
3060 else
3061 err = ufshcd_dme_peer_set(hba,
3062 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3063 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3064 0);
3065 if (err) {
3066 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3067 __func__, peer, i, err);
3068 break;
3069 }
3070 }
3071
3072 return err;
3073}
3074
3075static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3076{
3077 return ufshcd_disable_tx_lcc(hba, true);
3078}
3079
3080
3081
3082
3083
3084
3085
3086static int ufshcd_link_startup(struct ufs_hba *hba)
3087{
3088 int ret;
3089 int retries = DME_LINKSTARTUP_RETRIES;
3090
3091 do {
3092 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
3093
3094 ret = ufshcd_dme_link_startup(hba);
3095
3096
3097 if (!ret && !ufshcd_is_device_present(hba)) {
3098 dev_err(hba->dev, "%s: Device not present\n", __func__);
3099 ret = -ENXIO;
3100 goto out;
3101 }
3102
3103
3104
3105
3106
3107
3108 if (ret && ufshcd_hba_enable(hba))
3109 goto out;
3110 } while (ret && retries--);
3111
3112 if (ret)
3113
3114 goto out;
3115
3116 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3117 ret = ufshcd_disable_device_tx_lcc(hba);
3118 if (ret)
3119 goto out;
3120 }
3121
3122
3123 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3124 if (ret)
3125 goto out;
3126
3127 ret = ufshcd_make_hba_operational(hba);
3128out:
3129 if (ret)
3130 dev_err(hba->dev, "link startup failed %d\n", ret);
3131 return ret;
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3145{
3146 int err = 0;
3147 int retries;
3148
3149 ufshcd_hold(hba, false);
3150 mutex_lock(&hba->dev_cmd.lock);
3151 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3152 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3153 NOP_OUT_TIMEOUT);
3154
3155 if (!err || err == -ETIMEDOUT)
3156 break;
3157
3158 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3159 }
3160 mutex_unlock(&hba->dev_cmd.lock);
3161 ufshcd_release(hba);
3162
3163 if (err)
3164 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3165 return err;
3166}
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3178{
3179 int ret = 0;
3180 u8 lun_qdepth;
3181 struct ufs_hba *hba;
3182
3183 hba = shost_priv(sdev->host);
3184
3185 lun_qdepth = hba->nutrs;
3186 ret = ufshcd_read_unit_desc_param(hba,
3187 ufshcd_scsi_to_upiu_lun(sdev->lun),
3188 UNIT_DESC_PARAM_LU_Q_DEPTH,
3189 &lun_qdepth,
3190 sizeof(lun_qdepth));
3191
3192
3193 if (ret == -EOPNOTSUPP)
3194 lun_qdepth = 1;
3195 else if (!lun_qdepth)
3196
3197 lun_qdepth = hba->nutrs;
3198 else
3199 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3200
3201 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3202 __func__, lun_qdepth);
3203 scsi_change_queue_depth(sdev, lun_qdepth);
3204}
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3218 u8 lun,
3219 u8 *b_lu_write_protect)
3220{
3221 int ret;
3222
3223 if (!b_lu_write_protect)
3224 ret = -EINVAL;
3225
3226
3227
3228
3229
3230 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3231 ret = -ENOTSUPP;
3232 else
3233 ret = ufshcd_read_unit_desc_param(hba,
3234 lun,
3235 UNIT_DESC_PARAM_LU_WR_PROTECT,
3236 b_lu_write_protect,
3237 sizeof(*b_lu_write_protect));
3238 return ret;
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3249 struct scsi_device *sdev)
3250{
3251 if (hba->dev_info.f_power_on_wp_en &&
3252 !hba->dev_info.is_lu_power_on_wp) {
3253 u8 b_lu_write_protect;
3254
3255 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3256 &b_lu_write_protect) &&
3257 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3258 hba->dev_info.is_lu_power_on_wp = true;
3259 }
3260}
3261
3262
3263
3264
3265
3266
3267
3268static int ufshcd_slave_alloc(struct scsi_device *sdev)
3269{
3270 struct ufs_hba *hba;
3271
3272 hba = shost_priv(sdev->host);
3273
3274
3275 sdev->use_10_for_ms = 1;
3276
3277
3278 sdev->allow_restart = 1;
3279
3280
3281 sdev->no_report_opcodes = 1;
3282
3283
3284 ufshcd_set_queue_depth(sdev);
3285
3286 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3287
3288 return 0;
3289}
3290
3291
3292
3293
3294
3295
3296
3297
3298static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
3299{
3300 struct ufs_hba *hba = shost_priv(sdev->host);
3301
3302 if (depth > hba->nutrs)
3303 depth = hba->nutrs;
3304 return scsi_change_queue_depth(sdev, depth);
3305}
3306
3307
3308
3309
3310
3311static int ufshcd_slave_configure(struct scsi_device *sdev)
3312{
3313 struct request_queue *q = sdev->request_queue;
3314
3315 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3316 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3317
3318 return 0;
3319}
3320
3321
3322
3323
3324
3325static void ufshcd_slave_destroy(struct scsi_device *sdev)
3326{
3327 struct ufs_hba *hba;
3328
3329 hba = shost_priv(sdev->host);
3330
3331 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3332 unsigned long flags;
3333
3334 spin_lock_irqsave(hba->host->host_lock, flags);
3335 hba->sdev_ufs_device = NULL;
3336 spin_unlock_irqrestore(hba->host->host_lock, flags);
3337 }
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
3349{
3350 struct utp_task_req_desc *task_req_descp;
3351 struct utp_upiu_task_rsp *task_rsp_upiup;
3352 unsigned long flags;
3353 int ocs_value;
3354 int task_result;
3355
3356 spin_lock_irqsave(hba->host->host_lock, flags);
3357
3358
3359 __clear_bit(index, &hba->outstanding_tasks);
3360
3361 task_req_descp = hba->utmrdl_base_addr;
3362 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3363
3364 if (ocs_value == OCS_SUCCESS) {
3365 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3366 task_req_descp[index].task_rsp_upiu;
3367 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
3368 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
3369 if (resp)
3370 *resp = (u8)task_result;
3371 } else {
3372 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3373 __func__, ocs_value);
3374 }
3375 spin_unlock_irqrestore(hba->host->host_lock, flags);
3376
3377 return ocs_value;
3378}
3379
3380
3381
3382
3383
3384
3385
3386
3387static inline int
3388ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3389{
3390 int result = 0;
3391
3392 switch (scsi_status) {
3393 case SAM_STAT_CHECK_CONDITION:
3394 ufshcd_copy_sense_data(lrbp);
3395 case SAM_STAT_GOOD:
3396 result |= DID_OK << 16 |
3397 COMMAND_COMPLETE << 8 |
3398 scsi_status;
3399 break;
3400 case SAM_STAT_TASK_SET_FULL:
3401 case SAM_STAT_BUSY:
3402 case SAM_STAT_TASK_ABORTED:
3403 ufshcd_copy_sense_data(lrbp);
3404 result |= scsi_status;
3405 break;
3406 default:
3407 result |= DID_ERROR << 16;
3408 break;
3409 }
3410
3411 return result;
3412}
3413
3414
3415
3416
3417
3418
3419
3420
3421static inline int
3422ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3423{
3424 int result = 0;
3425 int scsi_status;
3426 int ocs;
3427
3428
3429 ocs = ufshcd_get_tr_ocs(lrbp);
3430
3431 switch (ocs) {
3432 case OCS_SUCCESS:
3433 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3434
3435 switch (result) {
3436 case UPIU_TRANSACTION_RESPONSE:
3437
3438
3439
3440
3441 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3442
3443
3444
3445
3446
3447 scsi_status = result & MASK_SCSI_STATUS;
3448 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462 if (!hba->pm_op_in_progress &&
3463 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
3464 schedule_work(&hba->eeh_work);
3465 break;
3466 case UPIU_TRANSACTION_REJECT_UPIU:
3467
3468 result = DID_ERROR << 16;
3469 dev_err(hba->dev,
3470 "Reject UPIU not fully implemented\n");
3471 break;
3472 default:
3473 result = DID_ERROR << 16;
3474 dev_err(hba->dev,
3475 "Unexpected request response code = %x\n",
3476 result);
3477 break;
3478 }
3479 break;
3480 case OCS_ABORTED:
3481 result |= DID_ABORT << 16;
3482 break;
3483 case OCS_INVALID_COMMAND_STATUS:
3484 result |= DID_REQUEUE << 16;
3485 break;
3486 case OCS_INVALID_CMD_TABLE_ATTR:
3487 case OCS_INVALID_PRDT_ATTR:
3488 case OCS_MISMATCH_DATA_BUF_SIZE:
3489 case OCS_MISMATCH_RESP_UPIU_SIZE:
3490 case OCS_PEER_COMM_FAILURE:
3491 case OCS_FATAL_ERROR:
3492 default:
3493 result |= DID_ERROR << 16;
3494 dev_err(hba->dev,
3495 "OCS error from controller = %x\n", ocs);
3496 break;
3497 }
3498
3499 return result;
3500}
3501
3502
3503
3504
3505
3506
3507static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3508{
3509 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
3510 hba->active_uic_cmd->argument2 |=
3511 ufshcd_get_uic_cmd_result(hba);
3512 hba->active_uic_cmd->argument3 =
3513 ufshcd_get_dme_attr_val(hba);
3514 complete(&hba->active_uic_cmd->done);
3515 }
3516
3517 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3518 complete(hba->uic_async_done);
3519}
3520
3521
3522
3523
3524
3525
3526static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3527 unsigned long completed_reqs)
3528{
3529 struct ufshcd_lrb *lrbp;
3530 struct scsi_cmnd *cmd;
3531 int result;
3532 int index;
3533
3534 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3535 lrbp = &hba->lrb[index];
3536 cmd = lrbp->cmd;
3537 if (cmd) {
3538 result = ufshcd_transfer_rsp_status(hba, lrbp);
3539 scsi_dma_unmap(cmd);
3540 cmd->result = result;
3541
3542 lrbp->cmd = NULL;
3543 clear_bit_unlock(index, &hba->lrb_in_use);
3544
3545 cmd->scsi_done(cmd);
3546 __ufshcd_release(hba);
3547 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3548 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
3549 if (hba->dev_cmd.complete)
3550 complete(hba->dev_cmd.complete);
3551 }
3552 }
3553
3554
3555 hba->outstanding_reqs ^= completed_reqs;
3556
3557 ufshcd_clk_scaling_update_busy(hba);
3558
3559
3560 wake_up(&hba->dev_cmd.tag_wq);
3561}
3562
3563
3564
3565
3566
3567static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3568{
3569 unsigned long completed_reqs;
3570 u32 tr_doorbell;
3571
3572
3573
3574
3575
3576
3577
3578
3579 if (ufshcd_is_intr_aggr_allowed(hba))
3580 ufshcd_reset_intr_aggr(hba);
3581
3582 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3583 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3584
3585 __ufshcd_transfer_req_compl(hba, completed_reqs);
3586}
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3599{
3600 int err = 0;
3601 u32 val;
3602
3603 if (!(hba->ee_ctrl_mask & mask))
3604 goto out;
3605
3606 val = hba->ee_ctrl_mask & ~mask;
3607 val &= 0xFFFF;
3608 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3609 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3610 if (!err)
3611 hba->ee_ctrl_mask &= ~mask;
3612out:
3613 return err;
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3627{
3628 int err = 0;
3629 u32 val;
3630
3631 if (hba->ee_ctrl_mask & mask)
3632 goto out;
3633
3634 val = hba->ee_ctrl_mask | mask;
3635 val &= 0xFFFF;
3636 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3637 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3638 if (!err)
3639 hba->ee_ctrl_mask |= mask;
3640out:
3641 return err;
3642}
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3656{
3657 int err = 0;
3658
3659 if (hba->auto_bkops_enabled)
3660 goto out;
3661
3662 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3663 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3664 if (err) {
3665 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3666 __func__, err);
3667 goto out;
3668 }
3669
3670 hba->auto_bkops_enabled = true;
3671
3672
3673 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3674 if (err)
3675 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3676 __func__, err);
3677out:
3678 return err;
3679}
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3694{
3695 int err = 0;
3696
3697 if (!hba->auto_bkops_enabled)
3698 goto out;
3699
3700
3701
3702
3703
3704 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3705 if (err) {
3706 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3707 __func__, err);
3708 goto out;
3709 }
3710
3711 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3712 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3713 if (err) {
3714 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3715 __func__, err);
3716 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3717 goto out;
3718 }
3719
3720 hba->auto_bkops_enabled = false;
3721out:
3722 return err;
3723}
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3734{
3735 hba->auto_bkops_enabled = false;
3736 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3737 ufshcd_enable_auto_bkops(hba);
3738}
3739
3740static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3741{
3742 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3743 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3763 enum bkops_status status)
3764{
3765 int err;
3766 u32 curr_status = 0;
3767
3768 err = ufshcd_get_bkops_status(hba, &curr_status);
3769 if (err) {
3770 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3771 __func__, err);
3772 goto out;
3773 } else if (curr_status > BKOPS_STATUS_MAX) {
3774 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3775 __func__, curr_status);
3776 err = -EINVAL;
3777 goto out;
3778 }
3779
3780 if (curr_status >= status)
3781 err = ufshcd_enable_auto_bkops(hba);
3782 else
3783 err = ufshcd_disable_auto_bkops(hba);
3784out:
3785 return err;
3786}
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3799{
3800 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
3801}
3802
3803static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3804{
3805 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3806 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3807}
3808
3809static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3810{
3811 int err;
3812 u32 curr_status = 0;
3813
3814 if (hba->is_urgent_bkops_lvl_checked)
3815 goto enable_auto_bkops;
3816
3817 err = ufshcd_get_bkops_status(hba, &curr_status);
3818 if (err) {
3819 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3820 __func__, err);
3821 goto out;
3822 }
3823
3824
3825
3826
3827
3828
3829
3830 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
3831 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
3832 __func__, curr_status);
3833
3834 hba->urgent_bkops_lvl = curr_status;
3835 hba->is_urgent_bkops_lvl_checked = true;
3836 }
3837
3838enable_auto_bkops:
3839 err = ufshcd_enable_auto_bkops(hba);
3840out:
3841 if (err < 0)
3842 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3843 __func__, err);
3844}
3845
3846
3847
3848
3849
3850
3851
3852
3853static void ufshcd_exception_event_handler(struct work_struct *work)
3854{
3855 struct ufs_hba *hba;
3856 int err;
3857 u32 status = 0;
3858 hba = container_of(work, struct ufs_hba, eeh_work);
3859
3860 pm_runtime_get_sync(hba->dev);
3861 err = ufshcd_get_ee_status(hba, &status);
3862 if (err) {
3863 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3864 __func__, err);
3865 goto out;
3866 }
3867
3868 status &= hba->ee_ctrl_mask;
3869
3870 if (status & MASK_EE_URGENT_BKOPS)
3871 ufshcd_bkops_exception_event_handler(hba);
3872
3873out:
3874 pm_runtime_put_sync(hba->dev);
3875 return;
3876}
3877
3878
3879static void ufshcd_complete_requests(struct ufs_hba *hba)
3880{
3881 ufshcd_transfer_req_compl(hba);
3882 ufshcd_tmc_handler(hba);
3883}
3884
3885
3886
3887
3888
3889
3890
3891
3892static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
3893{
3894 unsigned long flags;
3895 bool err_handling = true;
3896
3897 spin_lock_irqsave(hba->host->host_lock, flags);
3898
3899
3900
3901
3902 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
3903 goto out;
3904
3905 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
3906 ((hba->saved_err & UIC_ERROR) &&
3907 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
3908 goto out;
3909
3910 if ((hba->saved_err & UIC_ERROR) &&
3911 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
3912 int err;
3913
3914
3915
3916 spin_unlock_irqrestore(hba->host->host_lock, flags);
3917 msleep(50);
3918 spin_lock_irqsave(hba->host->host_lock, flags);
3919
3920
3921
3922
3923
3924 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3925 ((hba->saved_err & UIC_ERROR) &&
3926 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
3927 goto out;
3928
3929
3930
3931
3932
3933
3934
3935
3936 spin_unlock_irqrestore(hba->host->host_lock, flags);
3937 err = ufshcd_verify_dev_init(hba);
3938 spin_lock_irqsave(hba->host->host_lock, flags);
3939
3940 if (err)
3941 goto out;
3942
3943
3944 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
3945 hba->saved_err &= ~UIC_ERROR;
3946
3947 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
3948 if (!hba->saved_uic_err) {
3949 err_handling = false;
3950 goto out;
3951 }
3952 }
3953out:
3954 spin_unlock_irqrestore(hba->host->host_lock, flags);
3955 return err_handling;
3956}
3957
3958
3959
3960
3961
3962static void ufshcd_err_handler(struct work_struct *work)
3963{
3964 struct ufs_hba *hba;
3965 unsigned long flags;
3966 u32 err_xfer = 0;
3967 u32 err_tm = 0;
3968 int err = 0;
3969 int tag;
3970 bool needs_reset = false;
3971
3972 hba = container_of(work, struct ufs_hba, eh_work);
3973
3974 pm_runtime_get_sync(hba->dev);
3975 ufshcd_hold(hba, false);
3976
3977 spin_lock_irqsave(hba->host->host_lock, flags);
3978 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
3979 goto out;
3980
3981 hba->ufshcd_state = UFSHCD_STATE_RESET;
3982 ufshcd_set_eh_in_progress(hba);
3983
3984
3985 ufshcd_complete_requests(hba);
3986
3987 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
3988 bool ret;
3989
3990 spin_unlock_irqrestore(hba->host->host_lock, flags);
3991
3992 ret = ufshcd_quirk_dl_nac_errors(hba);
3993 spin_lock_irqsave(hba->host->host_lock, flags);
3994 if (!ret)
3995 goto skip_err_handling;
3996 }
3997 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3998 ((hba->saved_err & UIC_ERROR) &&
3999 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4000 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4001 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4002 needs_reset = true;
4003
4004
4005
4006
4007
4008
4009 if (needs_reset)
4010 goto skip_pending_xfer_clear;
4011
4012
4013 spin_unlock_irqrestore(hba->host->host_lock, flags);
4014
4015 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4016 if (ufshcd_clear_cmd(hba, tag)) {
4017 err_xfer = true;
4018 goto lock_skip_pending_xfer_clear;
4019 }
4020 }
4021
4022
4023 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4024 if (ufshcd_clear_tm_cmd(hba, tag)) {
4025 err_tm = true;
4026 goto lock_skip_pending_xfer_clear;
4027 }
4028 }
4029
4030lock_skip_pending_xfer_clear:
4031 spin_lock_irqsave(hba->host->host_lock, flags);
4032
4033
4034 ufshcd_complete_requests(hba);
4035
4036 if (err_xfer || err_tm)
4037 needs_reset = true;
4038
4039skip_pending_xfer_clear:
4040
4041 if (needs_reset) {
4042 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4043
4044
4045
4046
4047
4048
4049
4050
4051 if (hba->outstanding_reqs == max_doorbells)
4052 __ufshcd_transfer_req_compl(hba,
4053 (1UL << (hba->nutrs - 1)));
4054
4055 spin_unlock_irqrestore(hba->host->host_lock, flags);
4056 err = ufshcd_reset_and_restore(hba);
4057 spin_lock_irqsave(hba->host->host_lock, flags);
4058 if (err) {
4059 dev_err(hba->dev, "%s: reset and restore failed\n",
4060 __func__);
4061 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4062 }
4063
4064
4065
4066
4067 scsi_report_bus_reset(hba->host, 0);
4068 hba->saved_err = 0;
4069 hba->saved_uic_err = 0;
4070 }
4071
4072skip_err_handling:
4073 if (!needs_reset) {
4074 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4075 if (hba->saved_err || hba->saved_uic_err)
4076 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4077 __func__, hba->saved_err, hba->saved_uic_err);
4078 }
4079
4080 ufshcd_clear_eh_in_progress(hba);
4081
4082out:
4083 spin_unlock_irqrestore(hba->host->host_lock, flags);
4084 scsi_unblock_requests(hba->host);
4085 ufshcd_release(hba);
4086 pm_runtime_put_sync(hba->dev);
4087}
4088
4089
4090
4091
4092
4093static void ufshcd_update_uic_error(struct ufs_hba *hba)
4094{
4095 u32 reg;
4096
4097
4098 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4099 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4100 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
4101 else if (hba->dev_quirks &
4102 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4103 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4104 hba->uic_error |=
4105 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4106 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4107 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4108 }
4109
4110
4111 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4112 if (reg)
4113 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4114
4115 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4116 if (reg)
4117 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4118
4119 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4120 if (reg)
4121 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4122
4123 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4124 __func__, hba->uic_error);
4125}
4126
4127
4128
4129
4130
4131static void ufshcd_check_errors(struct ufs_hba *hba)
4132{
4133 bool queue_eh_work = false;
4134
4135 if (hba->errors & INT_FATAL_ERRORS)
4136 queue_eh_work = true;
4137
4138 if (hba->errors & UIC_ERROR) {
4139 hba->uic_error = 0;
4140 ufshcd_update_uic_error(hba);
4141 if (hba->uic_error)
4142 queue_eh_work = true;
4143 }
4144
4145 if (queue_eh_work) {
4146
4147
4148
4149
4150 hba->saved_err |= hba->errors;
4151 hba->saved_uic_err |= hba->uic_error;
4152
4153
4154 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4155
4156 scsi_block_requests(hba->host);
4157
4158 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4159 schedule_work(&hba->eh_work);
4160 }
4161 }
4162
4163
4164
4165
4166
4167
4168}
4169
4170
4171
4172
4173
4174static void ufshcd_tmc_handler(struct ufs_hba *hba)
4175{
4176 u32 tm_doorbell;
4177
4178 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4179 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
4180 wake_up(&hba->tm_wq);
4181}
4182
4183
4184
4185
4186
4187
4188static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4189{
4190 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4191 if (hba->errors)
4192 ufshcd_check_errors(hba);
4193
4194 if (intr_status & UFSHCD_UIC_MASK)
4195 ufshcd_uic_cmd_compl(hba, intr_status);
4196
4197 if (intr_status & UTP_TASK_REQ_COMPL)
4198 ufshcd_tmc_handler(hba);
4199
4200 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4201 ufshcd_transfer_req_compl(hba);
4202}
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212static irqreturn_t ufshcd_intr(int irq, void *__hba)
4213{
4214 u32 intr_status, enabled_intr_status;
4215 irqreturn_t retval = IRQ_NONE;
4216 struct ufs_hba *hba = __hba;
4217
4218 spin_lock(hba->host->host_lock);
4219 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
4220 enabled_intr_status =
4221 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
4222
4223 if (intr_status)
4224 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
4225
4226 if (enabled_intr_status) {
4227 ufshcd_sl_intr(hba, enabled_intr_status);
4228 retval = IRQ_HANDLED;
4229 }
4230 spin_unlock(hba->host->host_lock);
4231 return retval;
4232}
4233
4234static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4235{
4236 int err = 0;
4237 u32 mask = 1 << tag;
4238 unsigned long flags;
4239
4240 if (!test_bit(tag, &hba->outstanding_tasks))
4241 goto out;
4242
4243 spin_lock_irqsave(hba->host->host_lock, flags);
4244 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4245 spin_unlock_irqrestore(hba->host->host_lock, flags);
4246
4247
4248 err = ufshcd_wait_for_register(hba,
4249 REG_UTP_TASK_REQ_DOOR_BELL,
4250 mask, 0, 1000, 1000, true);
4251out:
4252 return err;
4253}
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4266 u8 tm_function, u8 *tm_response)
4267{
4268 struct utp_task_req_desc *task_req_descp;
4269 struct utp_upiu_task_req *task_req_upiup;
4270 struct Scsi_Host *host;
4271 unsigned long flags;
4272 int free_slot;
4273 int err;
4274 int task_tag;
4275
4276 host = hba->host;
4277
4278
4279
4280
4281
4282
4283 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
4284 ufshcd_hold(hba, false);
4285
4286 spin_lock_irqsave(host->host_lock, flags);
4287 task_req_descp = hba->utmrdl_base_addr;
4288 task_req_descp += free_slot;
4289
4290
4291 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4292 task_req_descp->header.dword_2 =
4293 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4294
4295
4296 task_req_upiup =
4297 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
4298 task_tag = hba->nutrs + free_slot;
4299 task_req_upiup->header.dword_0 =
4300 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
4301 lun_id, task_tag);
4302 task_req_upiup->header.dword_1 =
4303 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
4304
4305
4306
4307
4308 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4309 task_req_upiup->input_param2 = cpu_to_be32(task_id);
4310
4311
4312 __set_bit(free_slot, &hba->outstanding_tasks);
4313
4314
4315 wmb();
4316
4317 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
4318
4319 spin_unlock_irqrestore(host->host_lock, flags);
4320
4321
4322 err = wait_event_timeout(hba->tm_wq,
4323 test_bit(free_slot, &hba->tm_condition),
4324 msecs_to_jiffies(TM_CMD_TIMEOUT));
4325 if (!err) {
4326 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4327 __func__, tm_function);
4328 if (ufshcd_clear_tm_cmd(hba, free_slot))
4329 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4330 __func__, free_slot);
4331 err = -ETIMEDOUT;
4332 } else {
4333 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
4334 }
4335
4336 clear_bit(free_slot, &hba->tm_condition);
4337 ufshcd_put_tm_slot(hba, free_slot);
4338 wake_up(&hba->tm_tag_wq);
4339
4340 ufshcd_release(hba);
4341 return err;
4342}
4343
4344
4345
4346
4347
4348
4349
4350
4351static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
4352{
4353 struct Scsi_Host *host;
4354 struct ufs_hba *hba;
4355 unsigned int tag;
4356 u32 pos;
4357 int err;
4358 u8 resp = 0xF;
4359 struct ufshcd_lrb *lrbp;
4360 unsigned long flags;
4361
4362 host = cmd->device->host;
4363 hba = shost_priv(host);
4364 tag = cmd->request->tag;
4365
4366 lrbp = &hba->lrb[tag];
4367 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4368 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4369 if (!err)
4370 err = resp;
4371 goto out;
4372 }
4373
4374
4375 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4376 if (hba->lrb[pos].lun == lrbp->lun) {
4377 err = ufshcd_clear_cmd(hba, pos);
4378 if (err)
4379 break;
4380 }
4381 }
4382 spin_lock_irqsave(host->host_lock, flags);
4383 ufshcd_transfer_req_compl(hba);
4384 spin_unlock_irqrestore(host->host_lock, flags);
4385out:
4386 if (!err) {
4387 err = SUCCESS;
4388 } else {
4389 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4390 err = FAILED;
4391 }
4392 return err;
4393}
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407static int ufshcd_abort(struct scsi_cmnd *cmd)
4408{
4409 struct Scsi_Host *host;
4410 struct ufs_hba *hba;
4411 unsigned long flags;
4412 unsigned int tag;
4413 int err = 0;
4414 int poll_cnt;
4415 u8 resp = 0xF;
4416 struct ufshcd_lrb *lrbp;
4417 u32 reg;
4418
4419 host = cmd->device->host;
4420 hba = shost_priv(host);
4421 tag = cmd->request->tag;
4422 if (!ufshcd_valid_tag(hba, tag)) {
4423 dev_err(hba->dev,
4424 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4425 __func__, tag, cmd, cmd->request);
4426 BUG();
4427 }
4428
4429 ufshcd_hold(hba, false);
4430 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4431
4432 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4433 dev_err(hba->dev,
4434 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4435 __func__, tag, hba->outstanding_reqs, reg);
4436 goto out;
4437 }
4438
4439 if (!(reg & (1 << tag))) {
4440 dev_err(hba->dev,
4441 "%s: cmd was completed, but without a notifying intr, tag = %d",
4442 __func__, tag);
4443 }
4444
4445 lrbp = &hba->lrb[tag];
4446 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4447 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4448 UFS_QUERY_TASK, &resp);
4449 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4450
4451 break;
4452 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4453
4454
4455
4456
4457 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4458 if (reg & (1 << tag)) {
4459
4460 usleep_range(100, 200);
4461 continue;
4462 }
4463
4464 goto out;
4465 } else {
4466 if (!err)
4467 err = resp;
4468 goto out;
4469 }
4470 }
4471
4472 if (!poll_cnt) {
4473 err = -EBUSY;
4474 goto out;
4475 }
4476
4477 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4478 UFS_ABORT_TASK, &resp);
4479 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4480 if (!err)
4481 err = resp;
4482 goto out;
4483 }
4484
4485 err = ufshcd_clear_cmd(hba, tag);
4486 if (err)
4487 goto out;
4488
4489 scsi_dma_unmap(cmd);
4490
4491 spin_lock_irqsave(host->host_lock, flags);
4492 ufshcd_outstanding_req_clear(hba, tag);
4493 hba->lrb[tag].cmd = NULL;
4494 spin_unlock_irqrestore(host->host_lock, flags);
4495
4496 clear_bit_unlock(tag, &hba->lrb_in_use);
4497 wake_up(&hba->dev_cmd.tag_wq);
4498
4499out:
4500 if (!err) {
4501 err = SUCCESS;
4502 } else {
4503 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4504 err = FAILED;
4505 }
4506
4507
4508
4509
4510
4511 ufshcd_release(hba);
4512 return err;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4526{
4527 int err;
4528 unsigned long flags;
4529
4530
4531 spin_lock_irqsave(hba->host->host_lock, flags);
4532 ufshcd_hba_stop(hba, false);
4533 spin_unlock_irqrestore(hba->host->host_lock, flags);
4534
4535 err = ufshcd_hba_enable(hba);
4536 if (err)
4537 goto out;
4538
4539
4540 err = ufshcd_probe_hba(hba);
4541
4542 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
4543 err = -EIO;
4544out:
4545 if (err)
4546 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4547
4548 return err;
4549}
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4561{
4562 int err = 0;
4563 unsigned long flags;
4564 int retries = MAX_HOST_RESET_RETRIES;
4565
4566 do {
4567 err = ufshcd_host_reset_and_restore(hba);
4568 } while (err && --retries);
4569
4570
4571
4572
4573
4574 spin_lock_irqsave(hba->host->host_lock, flags);
4575 ufshcd_transfer_req_compl(hba);
4576 ufshcd_tmc_handler(hba);
4577 spin_unlock_irqrestore(hba->host->host_lock, flags);
4578
4579 return err;
4580}
4581
4582
4583
4584
4585
4586
4587
4588static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4589{
4590 int err;
4591 unsigned long flags;
4592 struct ufs_hba *hba;
4593
4594 hba = shost_priv(cmd->device->host);
4595
4596 ufshcd_hold(hba, false);
4597
4598
4599
4600
4601
4602
4603 do {
4604 spin_lock_irqsave(hba->host->host_lock, flags);
4605 if (!(work_pending(&hba->eh_work) ||
4606 hba->ufshcd_state == UFSHCD_STATE_RESET))
4607 break;
4608 spin_unlock_irqrestore(hba->host->host_lock, flags);
4609 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4610 flush_work(&hba->eh_work);
4611 } while (1);
4612
4613 hba->ufshcd_state = UFSHCD_STATE_RESET;
4614 ufshcd_set_eh_in_progress(hba);
4615 spin_unlock_irqrestore(hba->host->host_lock, flags);
4616
4617 err = ufshcd_reset_and_restore(hba);
4618
4619 spin_lock_irqsave(hba->host->host_lock, flags);
4620 if (!err) {
4621 err = SUCCESS;
4622 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4623 } else {
4624 err = FAILED;
4625 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4626 }
4627 ufshcd_clear_eh_in_progress(hba);
4628 spin_unlock_irqrestore(hba->host->host_lock, flags);
4629
4630 ufshcd_release(hba);
4631 return err;
4632}
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4643{
4644 int i;
4645 int curr_uA;
4646 u16 data;
4647 u16 unit;
4648
4649 for (i = start_scan; i >= 0; i--) {
4650 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4651 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4652 ATTR_ICC_LVL_UNIT_OFFSET;
4653 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4654 switch (unit) {
4655 case UFSHCD_NANO_AMP:
4656 curr_uA = curr_uA / 1000;
4657 break;
4658 case UFSHCD_MILI_AMP:
4659 curr_uA = curr_uA * 1000;
4660 break;
4661 case UFSHCD_AMP:
4662 curr_uA = curr_uA * 1000 * 1000;
4663 break;
4664 case UFSHCD_MICRO_AMP:
4665 default:
4666 break;
4667 }
4668 if (sup_curr_uA >= curr_uA)
4669 break;
4670 }
4671 if (i < 0) {
4672 i = 0;
4673 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4674 }
4675
4676 return (u32)i;
4677}
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4689 u8 *desc_buf, int len)
4690{
4691 u32 icc_level = 0;
4692
4693 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4694 !hba->vreg_info.vccq2) {
4695 dev_err(hba->dev,
4696 "%s: Regulator capability was not set, actvIccLevel=%d",
4697 __func__, icc_level);
4698 goto out;
4699 }
4700
4701 if (hba->vreg_info.vcc)
4702 icc_level = ufshcd_get_max_icc_level(
4703 hba->vreg_info.vcc->max_uA,
4704 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4705 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4706
4707 if (hba->vreg_info.vccq)
4708 icc_level = ufshcd_get_max_icc_level(
4709 hba->vreg_info.vccq->max_uA,
4710 icc_level,
4711 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4712
4713 if (hba->vreg_info.vccq2)
4714 icc_level = ufshcd_get_max_icc_level(
4715 hba->vreg_info.vccq2->max_uA,
4716 icc_level,
4717 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4718out:
4719 return icc_level;
4720}
4721
4722static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4723{
4724 int ret;
4725 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4726 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4727
4728 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4729 if (ret) {
4730 dev_err(hba->dev,
4731 "%s: Failed reading power descriptor.len = %d ret = %d",
4732 __func__, buff_len, ret);
4733 return;
4734 }
4735
4736 hba->init_prefetch_data.icc_level =
4737 ufshcd_find_max_sup_active_icc_level(hba,
4738 desc_buf, buff_len);
4739 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4740 __func__, hba->init_prefetch_data.icc_level);
4741
4742 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4743 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4744 &hba->init_prefetch_data.icc_level);
4745
4746 if (ret)
4747 dev_err(hba->dev,
4748 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4749 __func__, hba->init_prefetch_data.icc_level , ret);
4750
4751}
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4780{
4781 int ret = 0;
4782 struct scsi_device *sdev_rpmb;
4783 struct scsi_device *sdev_boot;
4784
4785 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4786 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4787 if (IS_ERR(hba->sdev_ufs_device)) {
4788 ret = PTR_ERR(hba->sdev_ufs_device);
4789 hba->sdev_ufs_device = NULL;
4790 goto out;
4791 }
4792 scsi_device_put(hba->sdev_ufs_device);
4793
4794 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4795 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4796 if (IS_ERR(sdev_boot)) {
4797 ret = PTR_ERR(sdev_boot);
4798 goto remove_sdev_ufs_device;
4799 }
4800 scsi_device_put(sdev_boot);
4801
4802 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4803 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4804 if (IS_ERR(sdev_rpmb)) {
4805 ret = PTR_ERR(sdev_rpmb);
4806 goto remove_sdev_boot;
4807 }
4808 scsi_device_put(sdev_rpmb);
4809 goto out;
4810
4811remove_sdev_boot:
4812 scsi_remove_device(sdev_boot);
4813remove_sdev_ufs_device:
4814 scsi_remove_device(hba->sdev_ufs_device);
4815out:
4816 return ret;
4817}
4818
4819static int ufs_get_device_info(struct ufs_hba *hba,
4820 struct ufs_device_info *card_data)
4821{
4822 int err;
4823 u8 model_index;
4824 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
4825 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
4826
4827 err = ufshcd_read_device_desc(hba, desc_buf,
4828 QUERY_DESC_DEVICE_MAX_SIZE);
4829 if (err) {
4830 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
4831 __func__, err);
4832 goto out;
4833 }
4834
4835
4836
4837
4838
4839 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
4840 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
4841
4842 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4843
4844 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
4845 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
4846 if (err) {
4847 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
4848 __func__, err);
4849 goto out;
4850 }
4851
4852 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
4853 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
4854 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
4855 MAX_MODEL_LEN));
4856
4857
4858 card_data->model[MAX_MODEL_LEN] = '\0';
4859
4860out:
4861 return err;
4862}
4863
4864void ufs_advertise_fixup_device(struct ufs_hba *hba)
4865{
4866 int err;
4867 struct ufs_dev_fix *f;
4868 struct ufs_device_info card_data;
4869
4870 card_data.wmanufacturerid = 0;
4871
4872 err = ufs_get_device_info(hba, &card_data);
4873 if (err) {
4874 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
4875 __func__, err);
4876 return;
4877 }
4878
4879 for (f = ufs_fixups; f->quirk; f++) {
4880 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
4881 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
4882 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
4883 !strcmp(f->card.model, UFS_ANY_MODEL)))
4884 hba->dev_quirks |= f->quirk;
4885 }
4886}
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
4900{
4901 int ret = 0;
4902 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
4903
4904 ret = ufshcd_dme_peer_get(hba,
4905 UIC_ARG_MIB_SEL(
4906 RX_MIN_ACTIVATETIME_CAPABILITY,
4907 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4908 &peer_rx_min_activatetime);
4909 if (ret)
4910 goto out;
4911
4912
4913 tuned_pa_tactivate =
4914 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
4915 / PA_TACTIVATE_TIME_UNIT_US);
4916 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
4917 tuned_pa_tactivate);
4918
4919out:
4920 return ret;
4921}
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
4935{
4936 int ret = 0;
4937 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
4938 u32 max_hibern8_time, tuned_pa_hibern8time;
4939
4940 ret = ufshcd_dme_get(hba,
4941 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
4942 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
4943 &local_tx_hibern8_time_cap);
4944 if (ret)
4945 goto out;
4946
4947 ret = ufshcd_dme_peer_get(hba,
4948 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
4949 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4950 &peer_rx_hibern8_time_cap);
4951 if (ret)
4952 goto out;
4953
4954 max_hibern8_time = max(local_tx_hibern8_time_cap,
4955 peer_rx_hibern8_time_cap);
4956
4957 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
4958 / PA_HIBERN8_TIME_UNIT_US);
4959 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
4960 tuned_pa_hibern8time);
4961out:
4962 return ret;
4963}
4964
4965static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4966{
4967 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
4968 ufshcd_tune_pa_tactivate(hba);
4969 ufshcd_tune_pa_hibern8time(hba);
4970 }
4971
4972 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
4973
4974 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
4975}
4976
4977
4978
4979
4980
4981
4982
4983static int ufshcd_probe_hba(struct ufs_hba *hba)
4984{
4985 int ret;
4986
4987 ret = ufshcd_link_startup(hba);
4988 if (ret)
4989 goto out;
4990
4991 ufshcd_init_pwr_info(hba);
4992
4993
4994 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
4995 hba->is_urgent_bkops_lvl_checked = false;
4996
4997
4998 ufshcd_set_link_active(hba);
4999
5000 ret = ufshcd_verify_dev_init(hba);
5001 if (ret)
5002 goto out;
5003
5004 ret = ufshcd_complete_dev_init(hba);
5005 if (ret)
5006 goto out;
5007
5008 ufs_advertise_fixup_device(hba);
5009 ufshcd_tune_unipro_params(hba);
5010
5011 ret = ufshcd_set_vccq_rail_unused(hba,
5012 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5013 if (ret)
5014 goto out;
5015
5016
5017 ufshcd_set_ufs_dev_active(hba);
5018 ufshcd_force_reset_auto_bkops(hba);
5019 hba->wlun_dev_clr_ua = true;
5020
5021 if (ufshcd_get_max_pwr_mode(hba)) {
5022 dev_err(hba->dev,
5023 "%s: Failed getting max supported power mode\n",
5024 __func__);
5025 } else {
5026 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5027 if (ret)
5028 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5029 __func__, ret);
5030 }
5031
5032
5033 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5034
5035
5036
5037
5038 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5039 bool flag;
5040
5041
5042 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
5043 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5044 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
5045 hba->dev_info.f_power_on_wp_en = flag;
5046
5047 if (!hba->is_init_prefetch)
5048 ufshcd_init_icc_levels(hba);
5049
5050
5051 if (ufshcd_scsi_add_wlus(hba))
5052 goto out;
5053
5054 scsi_scan_host(hba->host);
5055 pm_runtime_put_sync(hba->dev);
5056 }
5057
5058 if (!hba->is_init_prefetch)
5059 hba->is_init_prefetch = true;
5060
5061
5062 if (ufshcd_is_clkscaling_enabled(hba))
5063 devfreq_resume_device(hba->devfreq);
5064
5065out:
5066
5067
5068
5069
5070 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5071 pm_runtime_put_sync(hba->dev);
5072 ufshcd_hba_exit(hba);
5073 }
5074
5075 return ret;
5076}
5077
5078
5079
5080
5081
5082
5083static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5084{
5085 struct ufs_hba *hba = (struct ufs_hba *)data;
5086
5087 ufshcd_probe_hba(hba);
5088}
5089
5090static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5091{
5092 unsigned long flags;
5093 struct Scsi_Host *host;
5094 struct ufs_hba *hba;
5095 int index;
5096 bool found = false;
5097
5098 if (!scmd || !scmd->device || !scmd->device->host)
5099 return BLK_EH_NOT_HANDLED;
5100
5101 host = scmd->device->host;
5102 hba = shost_priv(host);
5103 if (!hba)
5104 return BLK_EH_NOT_HANDLED;
5105
5106 spin_lock_irqsave(host->host_lock, flags);
5107
5108 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5109 if (hba->lrb[index].cmd == scmd) {
5110 found = true;
5111 break;
5112 }
5113 }
5114
5115 spin_unlock_irqrestore(host->host_lock, flags);
5116
5117
5118
5119
5120
5121
5122 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5123}
5124
5125static struct scsi_host_template ufshcd_driver_template = {
5126 .module = THIS_MODULE,
5127 .name = UFSHCD,
5128 .proc_name = UFSHCD,
5129 .queuecommand = ufshcd_queuecommand,
5130 .slave_alloc = ufshcd_slave_alloc,
5131 .slave_configure = ufshcd_slave_configure,
5132 .slave_destroy = ufshcd_slave_destroy,
5133 .change_queue_depth = ufshcd_change_queue_depth,
5134 .eh_abort_handler = ufshcd_abort,
5135 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5136 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
5137 .eh_timed_out = ufshcd_eh_timed_out,
5138 .this_id = -1,
5139 .sg_tablesize = SG_ALL,
5140 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5141 .can_queue = UFSHCD_CAN_QUEUE,
5142 .max_host_blocked = 1,
5143 .track_queue_depth = 1,
5144};
5145
5146static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5147 int ua)
5148{
5149 int ret;
5150
5151 if (!vreg)
5152 return 0;
5153
5154 ret = regulator_set_load(vreg->reg, ua);
5155 if (ret < 0) {
5156 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5157 __func__, vreg->name, ua, ret);
5158 }
5159
5160 return ret;
5161}
5162
5163static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5164 struct ufs_vreg *vreg)
5165{
5166 if (!vreg)
5167 return 0;
5168 else if (vreg->unused)
5169 return 0;
5170 else
5171 return ufshcd_config_vreg_load(hba->dev, vreg,
5172 UFS_VREG_LPM_LOAD_UA);
5173}
5174
5175static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5176 struct ufs_vreg *vreg)
5177{
5178 if (!vreg)
5179 return 0;
5180 else if (vreg->unused)
5181 return 0;
5182 else
5183 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
5184}
5185
5186static int ufshcd_config_vreg(struct device *dev,
5187 struct ufs_vreg *vreg, bool on)
5188{
5189 int ret = 0;
5190 struct regulator *reg = vreg->reg;
5191 const char *name = vreg->name;
5192 int min_uV, uA_load;
5193
5194 BUG_ON(!vreg);
5195
5196 if (regulator_count_voltages(reg) > 0) {
5197 min_uV = on ? vreg->min_uV : 0;
5198 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5199 if (ret) {
5200 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5201 __func__, name, ret);
5202 goto out;
5203 }
5204
5205 uA_load = on ? vreg->max_uA : 0;
5206 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5207 if (ret)
5208 goto out;
5209 }
5210out:
5211 return ret;
5212}
5213
5214static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5215{
5216 int ret = 0;
5217
5218 if (!vreg)
5219 goto out;
5220 else if (vreg->enabled || vreg->unused)
5221 goto out;
5222
5223 ret = ufshcd_config_vreg(dev, vreg, true);
5224 if (!ret)
5225 ret = regulator_enable(vreg->reg);
5226
5227 if (!ret)
5228 vreg->enabled = true;
5229 else
5230 dev_err(dev, "%s: %s enable failed, err=%d\n",
5231 __func__, vreg->name, ret);
5232out:
5233 return ret;
5234}
5235
5236static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5237{
5238 int ret = 0;
5239
5240 if (!vreg)
5241 goto out;
5242 else if (!vreg->enabled || vreg->unused)
5243 goto out;
5244
5245 ret = regulator_disable(vreg->reg);
5246
5247 if (!ret) {
5248
5249 ufshcd_config_vreg(dev, vreg, false);
5250 vreg->enabled = false;
5251 } else {
5252 dev_err(dev, "%s: %s disable failed, err=%d\n",
5253 __func__, vreg->name, ret);
5254 }
5255out:
5256 return ret;
5257}
5258
5259static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5260{
5261 int ret = 0;
5262 struct device *dev = hba->dev;
5263 struct ufs_vreg_info *info = &hba->vreg_info;
5264
5265 if (!info)
5266 goto out;
5267
5268 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5269 if (ret)
5270 goto out;
5271
5272 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5273 if (ret)
5274 goto out;
5275
5276 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5277 if (ret)
5278 goto out;
5279
5280out:
5281 if (ret) {
5282 ufshcd_toggle_vreg(dev, info->vccq2, false);
5283 ufshcd_toggle_vreg(dev, info->vccq, false);
5284 ufshcd_toggle_vreg(dev, info->vcc, false);
5285 }
5286 return ret;
5287}
5288
5289static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5290{
5291 struct ufs_vreg_info *info = &hba->vreg_info;
5292
5293 if (info)
5294 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5295
5296 return 0;
5297}
5298
5299static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5300{
5301 int ret = 0;
5302
5303 if (!vreg)
5304 goto out;
5305
5306 vreg->reg = devm_regulator_get(dev, vreg->name);
5307 if (IS_ERR(vreg->reg)) {
5308 ret = PTR_ERR(vreg->reg);
5309 dev_err(dev, "%s: %s get failed, err=%d\n",
5310 __func__, vreg->name, ret);
5311 }
5312out:
5313 return ret;
5314}
5315
5316static int ufshcd_init_vreg(struct ufs_hba *hba)
5317{
5318 int ret = 0;
5319 struct device *dev = hba->dev;
5320 struct ufs_vreg_info *info = &hba->vreg_info;
5321
5322 if (!info)
5323 goto out;
5324
5325 ret = ufshcd_get_vreg(dev, info->vcc);
5326 if (ret)
5327 goto out;
5328
5329 ret = ufshcd_get_vreg(dev, info->vccq);
5330 if (ret)
5331 goto out;
5332
5333 ret = ufshcd_get_vreg(dev, info->vccq2);
5334out:
5335 return ret;
5336}
5337
5338static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5339{
5340 struct ufs_vreg_info *info = &hba->vreg_info;
5341
5342 if (info)
5343 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5344
5345 return 0;
5346}
5347
5348static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5349{
5350 int ret = 0;
5351 struct ufs_vreg_info *info = &hba->vreg_info;
5352
5353 if (!info)
5354 goto out;
5355 else if (!info->vccq)
5356 goto out;
5357
5358 if (unused) {
5359
5360 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5361
5362
5363
5364
5365 if (!ret)
5366 info->vccq->unused = true;
5367 } else {
5368
5369
5370
5371
5372 info->vccq->unused = false;
5373 }
5374out:
5375 return ret;
5376}
5377
5378static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5379 bool skip_ref_clk)
5380{
5381 int ret = 0;
5382 struct ufs_clk_info *clki;
5383 struct list_head *head = &hba->clk_list_head;
5384 unsigned long flags;
5385
5386 if (!head || list_empty(head))
5387 goto out;
5388
5389 list_for_each_entry(clki, head, list) {
5390 if (!IS_ERR_OR_NULL(clki->clk)) {
5391 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5392 continue;
5393
5394 if (on && !clki->enabled) {
5395 ret = clk_prepare_enable(clki->clk);
5396 if (ret) {
5397 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5398 __func__, clki->name, ret);
5399 goto out;
5400 }
5401 } else if (!on && clki->enabled) {
5402 clk_disable_unprepare(clki->clk);
5403 }
5404 clki->enabled = on;
5405 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5406 clki->name, on ? "en" : "dis");
5407 }
5408 }
5409
5410 ret = ufshcd_vops_setup_clocks(hba, on);
5411out:
5412 if (ret) {
5413 list_for_each_entry(clki, head, list) {
5414 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5415 clk_disable_unprepare(clki->clk);
5416 }
5417 } else if (on) {
5418 spin_lock_irqsave(hba->host->host_lock, flags);
5419 hba->clk_gating.state = CLKS_ON;
5420 spin_unlock_irqrestore(hba->host->host_lock, flags);
5421 }
5422 return ret;
5423}
5424
5425static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5426{
5427 return __ufshcd_setup_clocks(hba, on, false);
5428}
5429
5430static int ufshcd_init_clocks(struct ufs_hba *hba)
5431{
5432 int ret = 0;
5433 struct ufs_clk_info *clki;
5434 struct device *dev = hba->dev;
5435 struct list_head *head = &hba->clk_list_head;
5436
5437 if (!head || list_empty(head))
5438 goto out;
5439
5440 list_for_each_entry(clki, head, list) {
5441 if (!clki->name)
5442 continue;
5443
5444 clki->clk = devm_clk_get(dev, clki->name);
5445 if (IS_ERR(clki->clk)) {
5446 ret = PTR_ERR(clki->clk);
5447 dev_err(dev, "%s: %s clk get failed, %d\n",
5448 __func__, clki->name, ret);
5449 goto out;
5450 }
5451
5452 if (clki->max_freq) {
5453 ret = clk_set_rate(clki->clk, clki->max_freq);
5454 if (ret) {
5455 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5456 __func__, clki->name,
5457 clki->max_freq, ret);
5458 goto out;
5459 }
5460 clki->curr_freq = clki->max_freq;
5461 }
5462 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5463 clki->name, clk_get_rate(clki->clk));
5464 }
5465out:
5466 return ret;
5467}
5468
5469static int ufshcd_variant_hba_init(struct ufs_hba *hba)
5470{
5471 int err = 0;
5472
5473 if (!hba->vops)
5474 goto out;
5475
5476 err = ufshcd_vops_init(hba);
5477 if (err)
5478 goto out;
5479
5480 err = ufshcd_vops_setup_regulators(hba, true);
5481 if (err)
5482 goto out_exit;
5483
5484 goto out;
5485
5486out_exit:
5487 ufshcd_vops_exit(hba);
5488out:
5489 if (err)
5490 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
5491 __func__, ufshcd_get_var_name(hba), err);
5492 return err;
5493}
5494
5495static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5496{
5497 if (!hba->vops)
5498 return;
5499
5500 ufshcd_vops_setup_clocks(hba, false);
5501
5502 ufshcd_vops_setup_regulators(hba, false);
5503
5504 ufshcd_vops_exit(hba);
5505}
5506
5507static int ufshcd_hba_init(struct ufs_hba *hba)
5508{
5509 int err;
5510
5511
5512
5513
5514
5515
5516
5517
5518 err = ufshcd_init_hba_vreg(hba);
5519 if (err)
5520 goto out;
5521
5522 err = ufshcd_setup_hba_vreg(hba, true);
5523 if (err)
5524 goto out;
5525
5526 err = ufshcd_init_clocks(hba);
5527 if (err)
5528 goto out_disable_hba_vreg;
5529
5530 err = ufshcd_setup_clocks(hba, true);
5531 if (err)
5532 goto out_disable_hba_vreg;
5533
5534 err = ufshcd_init_vreg(hba);
5535 if (err)
5536 goto out_disable_clks;
5537
5538 err = ufshcd_setup_vreg(hba, true);
5539 if (err)
5540 goto out_disable_clks;
5541
5542 err = ufshcd_variant_hba_init(hba);
5543 if (err)
5544 goto out_disable_vreg;
5545
5546 hba->is_powered = true;
5547 goto out;
5548
5549out_disable_vreg:
5550 ufshcd_setup_vreg(hba, false);
5551out_disable_clks:
5552 ufshcd_setup_clocks(hba, false);
5553out_disable_hba_vreg:
5554 ufshcd_setup_hba_vreg(hba, false);
5555out:
5556 return err;
5557}
5558
5559static void ufshcd_hba_exit(struct ufs_hba *hba)
5560{
5561 if (hba->is_powered) {
5562 ufshcd_variant_hba_exit(hba);
5563 ufshcd_setup_vreg(hba, false);
5564 ufshcd_setup_clocks(hba, false);
5565 ufshcd_setup_hba_vreg(hba, false);
5566 hba->is_powered = false;
5567 }
5568}
5569
5570static int
5571ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
5572{
5573 unsigned char cmd[6] = {REQUEST_SENSE,
5574 0,
5575 0,
5576 0,
5577 SCSI_SENSE_BUFFERSIZE,
5578 0};
5579 char *buffer;
5580 int ret;
5581
5582 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5583 if (!buffer) {
5584 ret = -ENOMEM;
5585 goto out;
5586 }
5587
5588 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5589 SCSI_SENSE_BUFFERSIZE, NULL,
5590 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
5591 if (ret)
5592 pr_err("%s: failed with err %d\n", __func__, ret);
5593
5594 kfree(buffer);
5595out:
5596 return ret;
5597}
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5609 enum ufs_dev_pwr_mode pwr_mode)
5610{
5611 unsigned char cmd[6] = { START_STOP };
5612 struct scsi_sense_hdr sshdr;
5613 struct scsi_device *sdp;
5614 unsigned long flags;
5615 int ret;
5616
5617 spin_lock_irqsave(hba->host->host_lock, flags);
5618 sdp = hba->sdev_ufs_device;
5619 if (sdp) {
5620 ret = scsi_device_get(sdp);
5621 if (!ret && !scsi_device_online(sdp)) {
5622 ret = -ENODEV;
5623 scsi_device_put(sdp);
5624 }
5625 } else {
5626 ret = -ENODEV;
5627 }
5628 spin_unlock_irqrestore(hba->host->host_lock, flags);
5629
5630 if (ret)
5631 return ret;
5632
5633
5634
5635
5636
5637
5638
5639 hba->host->eh_noresume = 1;
5640 if (hba->wlun_dev_clr_ua) {
5641 ret = ufshcd_send_request_sense(hba, sdp);
5642 if (ret)
5643 goto out;
5644
5645 hba->wlun_dev_clr_ua = false;
5646 }
5647
5648 cmd[4] = pwr_mode << 4;
5649
5650
5651
5652
5653
5654
5655 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5656 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
5657 if (ret) {
5658 sdev_printk(KERN_WARNING, sdp,
5659 "START_STOP failed for power mode: %d, result %x\n",
5660 pwr_mode, ret);
5661 if (driver_byte(ret) & DRIVER_SENSE)
5662 scsi_print_sense_hdr(sdp, NULL, &sshdr);
5663 }
5664
5665 if (!ret)
5666 hba->curr_dev_pwr_mode = pwr_mode;
5667out:
5668 scsi_device_put(sdp);
5669 hba->host->eh_noresume = 0;
5670 return ret;
5671}
5672
5673static int ufshcd_link_state_transition(struct ufs_hba *hba,
5674 enum uic_link_state req_link_state,
5675 int check_for_bkops)
5676{
5677 int ret = 0;
5678
5679 if (req_link_state == hba->uic_link_state)
5680 return 0;
5681
5682 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
5683 ret = ufshcd_uic_hibern8_enter(hba);
5684 if (!ret)
5685 ufshcd_set_link_hibern8(hba);
5686 else
5687 goto out;
5688 }
5689
5690
5691
5692
5693 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
5694 (!check_for_bkops || (check_for_bkops &&
5695 !hba->auto_bkops_enabled))) {
5696
5697
5698
5699
5700
5701
5702
5703 ret = ufshcd_uic_hibern8_enter(hba);
5704 if (ret)
5705 goto out;
5706
5707
5708
5709
5710 ufshcd_hba_stop(hba, true);
5711
5712
5713
5714
5715 ufshcd_set_link_off(hba);
5716 }
5717
5718out:
5719 return ret;
5720}
5721
5722static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
5723{
5724
5725
5726
5727
5728
5729
5730 if (!ufshcd_is_link_active(hba) &&
5731 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
5732 usleep_range(2000, 2100);
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5747 !hba->dev_info.is_lu_power_on_wp) {
5748 ufshcd_setup_vreg(hba, false);
5749 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5750 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5751 if (!ufshcd_is_link_active(hba)) {
5752 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5753 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
5754 }
5755 }
5756}
5757
5758static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5759{
5760 int ret = 0;
5761
5762 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5763 !hba->dev_info.is_lu_power_on_wp) {
5764 ret = ufshcd_setup_vreg(hba, true);
5765 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5766 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5767 if (!ret && !ufshcd_is_link_active(hba)) {
5768 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5769 if (ret)
5770 goto vcc_disable;
5771 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5772 if (ret)
5773 goto vccq_lpm;
5774 }
5775 }
5776 goto out;
5777
5778vccq_lpm:
5779 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5780vcc_disable:
5781 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5782out:
5783 return ret;
5784}
5785
5786static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
5787{
5788 if (ufshcd_is_link_off(hba))
5789 ufshcd_setup_hba_vreg(hba, false);
5790}
5791
5792static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
5793{
5794 if (ufshcd_is_link_off(hba))
5795 ufshcd_setup_hba_vreg(hba, true);
5796}
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5815{
5816 int ret = 0;
5817 enum ufs_pm_level pm_lvl;
5818 enum ufs_dev_pwr_mode req_dev_pwr_mode;
5819 enum uic_link_state req_link_state;
5820
5821 hba->pm_op_in_progress = 1;
5822 if (!ufshcd_is_shutdown_pm(pm_op)) {
5823 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
5824 hba->rpm_lvl : hba->spm_lvl;
5825 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
5826 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
5827 } else {
5828 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
5829 req_link_state = UIC_LINK_OFF_STATE;
5830 }
5831
5832
5833
5834
5835
5836 ufshcd_hold(hba, false);
5837 hba->clk_gating.is_suspended = true;
5838
5839 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5840 req_link_state == UIC_LINK_ACTIVE_STATE) {
5841 goto disable_clks;
5842 }
5843
5844 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5845 (req_link_state == hba->uic_link_state))
5846 goto out;
5847
5848
5849 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5850 ret = -EINVAL;
5851 goto out;
5852 }
5853
5854 if (ufshcd_is_runtime_pm(pm_op)) {
5855 if (ufshcd_can_autobkops_during_suspend(hba)) {
5856
5857
5858
5859
5860
5861 ret = ufshcd_urgent_bkops(hba);
5862 if (ret)
5863 goto enable_gating;
5864 } else {
5865
5866 ufshcd_disable_auto_bkops(hba);
5867 }
5868 }
5869
5870 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
5871 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
5872 !ufshcd_is_runtime_pm(pm_op))) {
5873
5874 ufshcd_disable_auto_bkops(hba);
5875 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
5876 if (ret)
5877 goto enable_gating;
5878 }
5879
5880 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
5881 if (ret)
5882 goto set_dev_active;
5883
5884 ufshcd_vreg_set_lpm(hba);
5885
5886disable_clks:
5887
5888
5889
5890
5891
5892 if (ufshcd_is_clkscaling_enabled(hba)) {
5893 devfreq_suspend_device(hba->devfreq);
5894 hba->clk_scaling.window_start_t = 0;
5895 }
5896
5897
5898
5899
5900
5901 ret = ufshcd_vops_suspend(hba, pm_op);
5902 if (ret)
5903 goto set_link_active;
5904
5905 ret = ufshcd_vops_setup_clocks(hba, false);
5906 if (ret)
5907 goto vops_resume;
5908
5909 if (!ufshcd_is_link_active(hba))
5910 ufshcd_setup_clocks(hba, false);
5911 else
5912
5913 __ufshcd_setup_clocks(hba, false, true);
5914
5915 hba->clk_gating.state = CLKS_OFF;
5916
5917
5918
5919
5920 ufshcd_disable_irq(hba);
5921
5922 ufshcd_hba_vreg_set_lpm(hba);
5923 goto out;
5924
5925vops_resume:
5926 ufshcd_vops_resume(hba, pm_op);
5927set_link_active:
5928 ufshcd_vreg_set_hpm(hba);
5929 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5930 ufshcd_set_link_active(hba);
5931 else if (ufshcd_is_link_off(hba))
5932 ufshcd_host_reset_and_restore(hba);
5933set_dev_active:
5934 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5935 ufshcd_disable_auto_bkops(hba);
5936enable_gating:
5937 hba->clk_gating.is_suspended = false;
5938 ufshcd_release(hba);
5939out:
5940 hba->pm_op_in_progress = 0;
5941 return ret;
5942}
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5955{
5956 int ret;
5957 enum uic_link_state old_link_state;
5958
5959 hba->pm_op_in_progress = 1;
5960 old_link_state = hba->uic_link_state;
5961
5962 ufshcd_hba_vreg_set_hpm(hba);
5963
5964 ret = ufshcd_setup_clocks(hba, true);
5965 if (ret)
5966 goto out;
5967
5968
5969 ret = ufshcd_enable_irq(hba);
5970 if (ret)
5971 goto disable_irq_and_vops_clks;
5972
5973 ret = ufshcd_vreg_set_hpm(hba);
5974 if (ret)
5975 goto disable_irq_and_vops_clks;
5976
5977
5978
5979
5980
5981
5982 ret = ufshcd_vops_resume(hba, pm_op);
5983 if (ret)
5984 goto disable_vreg;
5985
5986 if (ufshcd_is_link_hibern8(hba)) {
5987 ret = ufshcd_uic_hibern8_exit(hba);
5988 if (!ret)
5989 ufshcd_set_link_active(hba);
5990 else
5991 goto vendor_suspend;
5992 } else if (ufshcd_is_link_off(hba)) {
5993 ret = ufshcd_host_reset_and_restore(hba);
5994
5995
5996
5997
5998 if (ret || !ufshcd_is_link_active(hba))
5999 goto vendor_suspend;
6000 }
6001
6002 if (!ufshcd_is_ufs_dev_active(hba)) {
6003 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6004 if (ret)
6005 goto set_old_link_state;
6006 }
6007
6008
6009
6010
6011
6012 ufshcd_urgent_bkops(hba);
6013 hba->clk_gating.is_suspended = false;
6014
6015 if (ufshcd_is_clkscaling_enabled(hba))
6016 devfreq_resume_device(hba->devfreq);
6017
6018
6019 ufshcd_release(hba);
6020 goto out;
6021
6022set_old_link_state:
6023 ufshcd_link_state_transition(hba, old_link_state, 0);
6024vendor_suspend:
6025 ufshcd_vops_suspend(hba, pm_op);
6026disable_vreg:
6027 ufshcd_vreg_set_lpm(hba);
6028disable_irq_and_vops_clks:
6029 ufshcd_disable_irq(hba);
6030 ufshcd_setup_clocks(hba, false);
6031out:
6032 hba->pm_op_in_progress = 0;
6033 return ret;
6034}
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045int ufshcd_system_suspend(struct ufs_hba *hba)
6046{
6047 int ret = 0;
6048
6049 if (!hba || !hba->is_powered)
6050 return 0;
6051
6052 if (pm_runtime_suspended(hba->dev)) {
6053 if (hba->rpm_lvl == hba->spm_lvl)
6054
6055
6056
6057
6058 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6059 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6060 goto out;
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070 ret = ufshcd_runtime_resume(hba);
6071 if (ret)
6072 goto out;
6073 }
6074
6075 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6076out:
6077 if (!ret)
6078 hba->is_sys_suspended = true;
6079 return ret;
6080}
6081EXPORT_SYMBOL(ufshcd_system_suspend);
6082
6083
6084
6085
6086
6087
6088
6089
6090int ufshcd_system_resume(struct ufs_hba *hba)
6091{
6092 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
6093
6094
6095
6096
6097 return 0;
6098
6099 return ufshcd_resume(hba, UFS_SYSTEM_PM);
6100}
6101EXPORT_SYMBOL(ufshcd_system_resume);
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111int ufshcd_runtime_suspend(struct ufs_hba *hba)
6112{
6113 if (!hba || !hba->is_powered)
6114 return 0;
6115
6116 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
6117}
6118EXPORT_SYMBOL(ufshcd_runtime_suspend);
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141int ufshcd_runtime_resume(struct ufs_hba *hba)
6142{
6143 if (!hba || !hba->is_powered)
6144 return 0;
6145 else
6146 return ufshcd_resume(hba, UFS_RUNTIME_PM);
6147}
6148EXPORT_SYMBOL(ufshcd_runtime_resume);
6149
6150int ufshcd_runtime_idle(struct ufs_hba *hba)
6151{
6152 return 0;
6153}
6154EXPORT_SYMBOL(ufshcd_runtime_idle);
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164int ufshcd_shutdown(struct ufs_hba *hba)
6165{
6166 int ret = 0;
6167
6168 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6169 goto out;
6170
6171 if (pm_runtime_suspended(hba->dev)) {
6172 ret = ufshcd_runtime_resume(hba);
6173 if (ret)
6174 goto out;
6175 }
6176
6177 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6178out:
6179 if (ret)
6180 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6181
6182 return 0;
6183}
6184EXPORT_SYMBOL(ufshcd_shutdown);
6185
6186
6187
6188
6189
6190
6191void ufshcd_remove(struct ufs_hba *hba)
6192{
6193 scsi_remove_host(hba->host);
6194
6195 ufshcd_disable_intr(hba, hba->intr_mask);
6196 ufshcd_hba_stop(hba, true);
6197
6198 scsi_host_put(hba->host);
6199
6200 ufshcd_exit_clk_gating(hba);
6201 if (ufshcd_is_clkscaling_enabled(hba))
6202 devfreq_remove_device(hba->devfreq);
6203 ufshcd_hba_exit(hba);
6204}
6205EXPORT_SYMBOL_GPL(ufshcd_remove);
6206
6207
6208
6209
6210
6211void ufshcd_dealloc_host(struct ufs_hba *hba)
6212{
6213 scsi_host_put(hba->host);
6214}
6215EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6216
6217
6218
6219
6220
6221
6222
6223
6224static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6225{
6226 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6227 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6228 return 0;
6229 }
6230 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6231}
6232
6233
6234
6235
6236
6237
6238
6239int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
6240{
6241 struct Scsi_Host *host;
6242 struct ufs_hba *hba;
6243 int err = 0;
6244
6245 if (!dev) {
6246 dev_err(dev,
6247 "Invalid memory reference for dev is NULL\n");
6248 err = -ENODEV;
6249 goto out_error;
6250 }
6251
6252 host = scsi_host_alloc(&ufshcd_driver_template,
6253 sizeof(struct ufs_hba));
6254 if (!host) {
6255 dev_err(dev, "scsi_host_alloc failed\n");
6256 err = -ENOMEM;
6257 goto out_error;
6258 }
6259 hba = shost_priv(host);
6260 hba->host = host;
6261 hba->dev = dev;
6262 *hba_handle = hba;
6263
6264out_error:
6265 return err;
6266}
6267EXPORT_SYMBOL(ufshcd_alloc_host);
6268
6269static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6270{
6271 int ret = 0;
6272 struct ufs_clk_info *clki;
6273 struct list_head *head = &hba->clk_list_head;
6274
6275 if (!head || list_empty(head))
6276 goto out;
6277
6278 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6279 if (ret)
6280 return ret;
6281
6282 list_for_each_entry(clki, head, list) {
6283 if (!IS_ERR_OR_NULL(clki->clk)) {
6284 if (scale_up && clki->max_freq) {
6285 if (clki->curr_freq == clki->max_freq)
6286 continue;
6287 ret = clk_set_rate(clki->clk, clki->max_freq);
6288 if (ret) {
6289 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6290 __func__, clki->name,
6291 clki->max_freq, ret);
6292 break;
6293 }
6294 clki->curr_freq = clki->max_freq;
6295
6296 } else if (!scale_up && clki->min_freq) {
6297 if (clki->curr_freq == clki->min_freq)
6298 continue;
6299 ret = clk_set_rate(clki->clk, clki->min_freq);
6300 if (ret) {
6301 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6302 __func__, clki->name,
6303 clki->min_freq, ret);
6304 break;
6305 }
6306 clki->curr_freq = clki->min_freq;
6307 }
6308 }
6309 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6310 clki->name, clk_get_rate(clki->clk));
6311 }
6312
6313 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6314
6315out:
6316 return ret;
6317}
6318
6319static int ufshcd_devfreq_target(struct device *dev,
6320 unsigned long *freq, u32 flags)
6321{
6322 int err = 0;
6323 struct ufs_hba *hba = dev_get_drvdata(dev);
6324
6325 if (!ufshcd_is_clkscaling_enabled(hba))
6326 return -EINVAL;
6327
6328 if (*freq == UINT_MAX)
6329 err = ufshcd_scale_clks(hba, true);
6330 else if (*freq == 0)
6331 err = ufshcd_scale_clks(hba, false);
6332
6333 return err;
6334}
6335
6336static int ufshcd_devfreq_get_dev_status(struct device *dev,
6337 struct devfreq_dev_status *stat)
6338{
6339 struct ufs_hba *hba = dev_get_drvdata(dev);
6340 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6341 unsigned long flags;
6342
6343 if (!ufshcd_is_clkscaling_enabled(hba))
6344 return -EINVAL;
6345
6346 memset(stat, 0, sizeof(*stat));
6347
6348 spin_lock_irqsave(hba->host->host_lock, flags);
6349 if (!scaling->window_start_t)
6350 goto start_window;
6351
6352 if (scaling->is_busy_started)
6353 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6354 scaling->busy_start_t));
6355
6356 stat->total_time = jiffies_to_usecs((long)jiffies -
6357 (long)scaling->window_start_t);
6358 stat->busy_time = scaling->tot_busy_t;
6359start_window:
6360 scaling->window_start_t = jiffies;
6361 scaling->tot_busy_t = 0;
6362
6363 if (hba->outstanding_reqs) {
6364 scaling->busy_start_t = ktime_get();
6365 scaling->is_busy_started = true;
6366 } else {
6367 scaling->busy_start_t = ktime_set(0, 0);
6368 scaling->is_busy_started = false;
6369 }
6370 spin_unlock_irqrestore(hba->host->host_lock, flags);
6371 return 0;
6372}
6373
6374static struct devfreq_dev_profile ufs_devfreq_profile = {
6375 .polling_ms = 100,
6376 .target = ufshcd_devfreq_target,
6377 .get_dev_status = ufshcd_devfreq_get_dev_status,
6378};
6379
6380
6381
6382
6383
6384
6385
6386
6387int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6388{
6389 int err;
6390 struct Scsi_Host *host = hba->host;
6391 struct device *dev = hba->dev;
6392
6393 if (!mmio_base) {
6394 dev_err(hba->dev,
6395 "Invalid memory reference for mmio_base is NULL\n");
6396 err = -ENODEV;
6397 goto out_error;
6398 }
6399
6400 hba->mmio_base = mmio_base;
6401 hba->irq = irq;
6402
6403 err = ufshcd_hba_init(hba);
6404 if (err)
6405 goto out_error;
6406
6407
6408 ufshcd_hba_capabilities(hba);
6409
6410
6411 hba->ufs_version = ufshcd_get_ufs_version(hba);
6412
6413
6414 hba->intr_mask = ufshcd_get_intr_mask(hba);
6415
6416 err = ufshcd_set_dma_mask(hba);
6417 if (err) {
6418 dev_err(hba->dev, "set dma mask failed\n");
6419 goto out_disable;
6420 }
6421
6422
6423 err = ufshcd_memory_alloc(hba);
6424 if (err) {
6425 dev_err(hba->dev, "Memory allocation failed\n");
6426 goto out_disable;
6427 }
6428
6429
6430 ufshcd_host_memory_configure(hba);
6431
6432 host->can_queue = hba->nutrs;
6433 host->cmd_per_lun = hba->nutrs;
6434 host->max_id = UFSHCD_MAX_ID;
6435 host->max_lun = UFS_MAX_LUNS;
6436 host->max_channel = UFSHCD_MAX_CHANNEL;
6437 host->unique_id = host->host_no;
6438 host->max_cmd_len = MAX_CDB_SIZE;
6439
6440 hba->max_pwr_info.is_valid = false;
6441
6442
6443 init_waitqueue_head(&hba->tm_wq);
6444 init_waitqueue_head(&hba->tm_tag_wq);
6445
6446
6447 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
6448 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
6449
6450
6451 mutex_init(&hba->uic_cmd_mutex);
6452
6453
6454 mutex_init(&hba->dev_cmd.lock);
6455
6456
6457 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6458
6459 ufshcd_init_clk_gating(hba);
6460
6461
6462
6463
6464
6465
6466 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6467 REG_INTERRUPT_STATUS);
6468 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6469
6470
6471
6472
6473 mb();
6474
6475
6476 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
6477 if (err) {
6478 dev_err(hba->dev, "request irq failed\n");
6479 goto exit_gating;
6480 } else {
6481 hba->is_irq_enabled = true;
6482 }
6483
6484 err = scsi_add_host(host, hba->dev);
6485 if (err) {
6486 dev_err(hba->dev, "scsi_add_host failed\n");
6487 goto exit_gating;
6488 }
6489
6490
6491 err = ufshcd_hba_enable(hba);
6492 if (err) {
6493 dev_err(hba->dev, "Host controller enable failed\n");
6494 goto out_remove_scsi_host;
6495 }
6496
6497 if (ufshcd_is_clkscaling_enabled(hba)) {
6498 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
6499 "simple_ondemand", NULL);
6500 if (IS_ERR(hba->devfreq)) {
6501 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
6502 PTR_ERR(hba->devfreq));
6503 goto out_remove_scsi_host;
6504 }
6505
6506 devfreq_suspend_device(hba->devfreq);
6507 hba->clk_scaling.window_start_t = 0;
6508 }
6509
6510
6511 pm_runtime_get_sync(dev);
6512
6513
6514
6515
6516
6517 ufshcd_set_ufs_dev_poweroff(hba);
6518
6519 async_schedule(ufshcd_async_scan, hba);
6520
6521 return 0;
6522
6523out_remove_scsi_host:
6524 scsi_remove_host(hba->host);
6525exit_gating:
6526 ufshcd_exit_clk_gating(hba);
6527out_disable:
6528 hba->is_irq_enabled = false;
6529 scsi_host_put(host);
6530 ufshcd_hba_exit(hba);
6531out_error:
6532 return err;
6533}
6534EXPORT_SYMBOL_GPL(ufshcd_init);
6535
6536MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
6537MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
6538MODULE_DESCRIPTION("Generic UFS host controller driver Core");
6539MODULE_LICENSE("GPL");
6540MODULE_VERSION(UFSHCD_DRIVER_VERSION);
6541