1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/async.h>
41#include <linux/devfreq.h>
42#include <linux/nls.h>
43#include <linux/of.h>
44#include "ufshcd.h"
45#include "ufs_quirks.h"
46#include "unipro.h"
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/ufs.h>
50
51#define UFSHCD_REQ_SENSE_SIZE 18
52
53#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
54 UTP_TASK_REQ_COMPL |\
55 UFSHCD_ERROR_MASK)
56
57#define UIC_CMD_TIMEOUT 500
58
59
60#define NOP_OUT_RETRIES 10
61
62#define NOP_OUT_TIMEOUT 30
63
64
65#define QUERY_REQ_RETRIES 3
66
67#define QUERY_REQ_TIMEOUT 1500
68
69
70#define TM_CMD_TIMEOUT 100
71
72
73#define UFS_UIC_COMMAND_RETRIES 3
74
75
76#define DME_LINKSTARTUP_RETRIES 3
77
78
79#define UIC_HIBERN8_ENTER_RETRIES 3
80
81
82#define MAX_HOST_RESET_RETRIES 5
83
84
85#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
87
88#define INT_AGGR_DEF_TO 0x02
89
90#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
91 ({ \
92 int _ret; \
93 if (_on) \
94 _ret = ufshcd_enable_vreg(_dev, _vreg); \
95 else \
96 _ret = ufshcd_disable_vreg(_dev, _vreg); \
97 _ret; \
98 })
99
100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
103enum {
104 UFSHCD_MAX_CHANNEL = 0,
105 UFSHCD_MAX_ID = 1,
106 UFSHCD_CMD_PER_LUN = 32,
107 UFSHCD_CAN_QUEUE = 32,
108};
109
110
111enum {
112 UFSHCD_STATE_RESET,
113 UFSHCD_STATE_ERROR,
114 UFSHCD_STATE_OPERATIONAL,
115 UFSHCD_STATE_EH_SCHEDULED,
116};
117
118
119enum {
120 UFSHCD_EH_IN_PROGRESS = (1 << 0),
121};
122
123
124enum {
125 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0),
126 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1),
127 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2),
128 UFSHCD_UIC_NL_ERROR = (1 << 3),
129 UFSHCD_UIC_TL_ERROR = (1 << 4),
130 UFSHCD_UIC_DME_ERROR = (1 << 5),
131};
132
133#define ufshcd_set_eh_in_progress(h) \
134 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
135#define ufshcd_eh_in_progress(h) \
136 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
137#define ufshcd_clear_eh_in_progress(h) \
138 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
139
140#define ufshcd_set_ufs_dev_active(h) \
141 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
142#define ufshcd_set_ufs_dev_sleep(h) \
143 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
144#define ufshcd_set_ufs_dev_poweroff(h) \
145 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
146#define ufshcd_is_ufs_dev_active(h) \
147 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
148#define ufshcd_is_ufs_dev_sleep(h) \
149 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
150#define ufshcd_is_ufs_dev_poweroff(h) \
151 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
152
153static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
154 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
155 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
156 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
157 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
159 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
160};
161
162static inline enum ufs_dev_pwr_mode
163ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
164{
165 return ufs_pm_lvl_states[lvl].dev_state;
166}
167
168static inline enum uic_link_state
169ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
170{
171 return ufs_pm_lvl_states[lvl].link_state;
172}
173
174static inline enum ufs_pm_level
175ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
176 enum uic_link_state link_state)
177{
178 enum ufs_pm_level lvl;
179
180 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
181 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
182 (ufs_pm_lvl_states[lvl].link_state == link_state))
183 return lvl;
184 }
185
186
187 return UFS_PM_LVL_0;
188}
189
190static struct ufs_dev_fix ufs_fixups[] = {
191
192 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
193 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
194 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
195 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
196 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
197 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
198 UFS_DEVICE_NO_FASTAUTO),
199 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
200 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
201 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
202 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
203 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
204 UFS_DEVICE_QUIRK_PA_TACTIVATE),
205 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
206 UFS_DEVICE_QUIRK_PA_TACTIVATE),
207 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
208 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
209 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
210
211 END_FIX
212};
213
214static void ufshcd_tmc_handler(struct ufs_hba *hba);
215static void ufshcd_async_scan(void *data, async_cookie_t cookie);
216static int ufshcd_reset_and_restore(struct ufs_hba *hba);
217static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
218static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
219static void ufshcd_hba_exit(struct ufs_hba *hba);
220static int ufshcd_probe_hba(struct ufs_hba *hba);
221static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
222 bool skip_ref_clk);
223static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
224static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
225static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
226static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
227static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
228static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
229static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
230static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
231static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
232static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
233static irqreturn_t ufshcd_intr(int irq, void *__hba);
234static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
235 struct ufs_pa_layer_attr *desired_pwr_mode);
236static int ufshcd_change_power_mode(struct ufs_hba *hba,
237 struct ufs_pa_layer_attr *pwr_mode);
238static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
239{
240 return tag >= 0 && tag < hba->nutrs;
241}
242
243static inline int ufshcd_enable_irq(struct ufs_hba *hba)
244{
245 int ret = 0;
246
247 if (!hba->is_irq_enabled) {
248 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
249 hba);
250 if (ret)
251 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
252 __func__, ret);
253 hba->is_irq_enabled = true;
254 }
255
256 return ret;
257}
258
259static inline void ufshcd_disable_irq(struct ufs_hba *hba)
260{
261 if (hba->is_irq_enabled) {
262 free_irq(hba->irq, hba);
263 hba->is_irq_enabled = false;
264 }
265}
266
267
268static inline void ufshcd_remove_non_printable(char *val)
269{
270 if (!val)
271 return;
272
273 if (*val < 0x20 || *val > 0x7e)
274 *val = ' ';
275}
276
277static void ufshcd_add_command_trace(struct ufs_hba *hba,
278 unsigned int tag, const char *str)
279{
280 sector_t lba = -1;
281 u8 opcode = 0;
282 u32 intr, doorbell;
283 struct ufshcd_lrb *lrbp;
284 int transfer_len = -1;
285
286 if (!trace_ufshcd_command_enabled())
287 return;
288
289 lrbp = &hba->lrb[tag];
290
291 if (lrbp->cmd) {
292 opcode = (u8)(*lrbp->cmd->cmnd);
293 if ((opcode == READ_10) || (opcode == WRITE_10)) {
294
295
296
297
298 if (lrbp->cmd->request && lrbp->cmd->request->bio)
299 lba =
300 lrbp->cmd->request->bio->bi_iter.bi_sector;
301 transfer_len = be32_to_cpu(
302 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
303 }
304 }
305
306 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
307 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
308 trace_ufshcd_command(dev_name(hba->dev), str, tag,
309 doorbell, transfer_len, intr, lba, opcode);
310}
311
312static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
313{
314 struct ufs_clk_info *clki;
315 struct list_head *head = &hba->clk_list_head;
316
317 if (list_empty(head))
318 return;
319
320 list_for_each_entry(clki, head, list) {
321 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
322 clki->max_freq)
323 dev_err(hba->dev, "clk: %s, rate: %u\n",
324 clki->name, clki->curr_freq);
325 }
326}
327
328static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
329 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
330{
331 int i;
332
333 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
334 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
335
336 if (err_hist->reg[p] == 0)
337 continue;
338 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
339 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
340 }
341}
342
343static void ufshcd_print_host_regs(struct ufs_hba *hba)
344{
345
346
347
348
349
350
351
352
353 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
354 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
355 hba->ufs_version, hba->capabilities);
356 dev_err(hba->dev,
357 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
358 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
359 dev_err(hba->dev,
360 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
361 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
362 hba->ufs_stats.hibern8_exit_cnt);
363
364 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
365 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
366 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
367 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
368 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
369
370 ufshcd_print_clk_freqs(hba);
371
372 if (hba->vops && hba->vops->dbg_register_dump)
373 hba->vops->dbg_register_dump(hba);
374}
375
376static
377void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
378{
379 struct ufshcd_lrb *lrbp;
380 int prdt_length;
381 int tag;
382
383 for_each_set_bit(tag, &bitmap, hba->nutrs) {
384 lrbp = &hba->lrb[tag];
385
386 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
387 tag, ktime_to_us(lrbp->issue_time_stamp));
388 dev_err(hba->dev,
389 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
390 tag, (u64)lrbp->utrd_dma_addr);
391
392 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
393 sizeof(struct utp_transfer_req_desc));
394 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
395 (u64)lrbp->ucd_req_dma_addr);
396 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
397 sizeof(struct utp_upiu_req));
398 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
399 (u64)lrbp->ucd_rsp_dma_addr);
400 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
401 sizeof(struct utp_upiu_rsp));
402
403 prdt_length = le16_to_cpu(
404 lrbp->utr_descriptor_ptr->prd_table_length);
405 dev_err(hba->dev,
406 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
407 tag, prdt_length,
408 (u64)lrbp->ucd_prdt_dma_addr);
409
410 if (pr_prdt)
411 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
412 sizeof(struct ufshcd_sg_entry) * prdt_length);
413 }
414}
415
416static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
417{
418 struct utp_task_req_desc *tmrdp;
419 int tag;
420
421 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
422 tmrdp = &hba->utmrdl_base_addr[tag];
423 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
424 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
425 sizeof(struct request_desc_header));
426 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
427 tag);
428 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
429 sizeof(struct utp_upiu_req));
430 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
431 tag);
432 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
433 sizeof(struct utp_task_req_desc));
434 }
435}
436
437static void ufshcd_print_host_state(struct ufs_hba *hba)
438{
439 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
440 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
441 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
442 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
443 hba->saved_err, hba->saved_uic_err);
444 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
445 hba->curr_dev_pwr_mode, hba->uic_link_state);
446 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
447 hba->pm_op_in_progress, hba->is_sys_suspended);
448 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
449 hba->auto_bkops_enabled, hba->host->host_self_blocked);
450 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
451 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
452 hba->eh_flags, hba->req_abort_count);
453 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
454 hba->capabilities, hba->caps);
455 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
456 hba->dev_quirks);
457}
458
459
460
461
462
463
464static void ufshcd_print_pwr_info(struct ufs_hba *hba)
465{
466 static const char * const names[] = {
467 "INVALID MODE",
468 "FAST MODE",
469 "SLOW_MODE",
470 "INVALID MODE",
471 "FASTAUTO_MODE",
472 "SLOWAUTO_MODE",
473 "INVALID MODE",
474 };
475
476 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
477 __func__,
478 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
479 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
480 names[hba->pwr_info.pwr_rx],
481 names[hba->pwr_info.pwr_tx],
482 hba->pwr_info.hs_rate);
483}
484
485
486
487
488
489
490
491
492
493
494
495
496
497int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
498 u32 val, unsigned long interval_us,
499 unsigned long timeout_ms, bool can_sleep)
500{
501 int err = 0;
502 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
503
504
505 val = val & mask;
506
507 while ((ufshcd_readl(hba, reg) & mask) != val) {
508 if (can_sleep)
509 usleep_range(interval_us, interval_us + 50);
510 else
511 udelay(interval_us);
512 if (time_after(jiffies, timeout)) {
513 if ((ufshcd_readl(hba, reg) & mask) != val)
514 err = -ETIMEDOUT;
515 break;
516 }
517 }
518
519 return err;
520}
521
522
523
524
525
526
527
528static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
529{
530 u32 intr_mask = 0;
531
532 switch (hba->ufs_version) {
533 case UFSHCI_VERSION_10:
534 intr_mask = INTERRUPT_MASK_ALL_VER_10;
535 break;
536 case UFSHCI_VERSION_11:
537 case UFSHCI_VERSION_20:
538 intr_mask = INTERRUPT_MASK_ALL_VER_11;
539 break;
540 case UFSHCI_VERSION_21:
541 default:
542 intr_mask = INTERRUPT_MASK_ALL_VER_21;
543 break;
544 }
545
546 return intr_mask;
547}
548
549
550
551
552
553
554
555static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
556{
557 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
558 return ufshcd_vops_get_ufs_hci_version(hba);
559
560 return ufshcd_readl(hba, REG_UFS_VERSION);
561}
562
563
564
565
566
567
568
569
570static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
571{
572 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
573 DEVICE_PRESENT) ? true : false;
574}
575
576
577
578
579
580
581
582
583static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
584{
585 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
586}
587
588
589
590
591
592
593
594
595static inline int
596ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
597{
598 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
599}
600
601
602
603
604
605
606
607
608
609
610static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
611{
612 int tag;
613 bool ret = false;
614
615 if (!free_slot)
616 goto out;
617
618 do {
619 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
620 if (tag >= hba->nutmrs)
621 goto out;
622 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
623
624 *free_slot = tag;
625 ret = true;
626out:
627 return ret;
628}
629
630static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
631{
632 clear_bit_unlock(slot, &hba->tm_slots_in_use);
633}
634
635
636
637
638
639
640static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
641{
642 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
643}
644
645
646
647
648
649
650static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
651{
652 __clear_bit(tag, &hba->outstanding_reqs);
653}
654
655
656
657
658
659
660
661static inline int ufshcd_get_lists_status(u32 reg)
662{
663 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
664}
665
666
667
668
669
670
671
672
673static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
674{
675 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
676 MASK_UIC_COMMAND_RESULT;
677}
678
679
680
681
682
683
684
685
686static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
687{
688 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
689}
690
691
692
693
694
695static inline int
696ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
697{
698 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
699}
700
701
702
703
704
705
706
707
708static inline int
709ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
710{
711 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
712}
713
714
715
716
717
718
719
720
721static inline unsigned int
722ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
723{
724 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
725 MASK_RSP_UPIU_DATA_SEG_LEN;
726}
727
728
729
730
731
732
733
734
735
736
737static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
738{
739 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
740 MASK_RSP_EXCEPTION_EVENT ? true : false;
741}
742
743
744
745
746
747static inline void
748ufshcd_reset_intr_aggr(struct ufs_hba *hba)
749{
750 ufshcd_writel(hba, INT_AGGR_ENABLE |
751 INT_AGGR_COUNTER_AND_TIMER_RESET,
752 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
753}
754
755
756
757
758
759
760
761static inline void
762ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
763{
764 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
765 INT_AGGR_COUNTER_THLD_VAL(cnt) |
766 INT_AGGR_TIMEOUT_VAL(tmout),
767 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
768}
769
770
771
772
773
774static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
775{
776 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
777}
778
779
780
781
782
783
784
785static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
786{
787 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
788 REG_UTP_TASK_REQ_LIST_RUN_STOP);
789 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
790 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
791}
792
793
794
795
796
797static inline void ufshcd_hba_start(struct ufs_hba *hba)
798{
799 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
800}
801
802
803
804
805
806
807
808static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
809{
810 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
811 ? false : true;
812}
813
814static const char *ufschd_uic_link_state_to_string(
815 enum uic_link_state state)
816{
817 switch (state) {
818 case UIC_LINK_OFF_STATE: return "OFF";
819 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
820 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
821 default: return "UNKNOWN";
822 }
823}
824
825static const char *ufschd_ufs_dev_pwr_mode_to_string(
826 enum ufs_dev_pwr_mode state)
827{
828 switch (state) {
829 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
830 case UFS_SLEEP_PWR_MODE: return "SLEEP";
831 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
832 default: return "UNKNOWN";
833 }
834}
835
836u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
837{
838
839 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
840 (hba->ufs_version == UFSHCI_VERSION_11))
841 return UFS_UNIPRO_VER_1_41;
842 else
843 return UFS_UNIPRO_VER_1_6;
844}
845EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
846
847static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
848{
849
850
851
852
853
854
855
856
857
858 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
859 return true;
860 else
861 return false;
862}
863
864static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
865{
866 int ret = 0;
867 struct ufs_clk_info *clki;
868 struct list_head *head = &hba->clk_list_head;
869 ktime_t start = ktime_get();
870 bool clk_state_changed = false;
871
872 if (list_empty(head))
873 goto out;
874
875 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
876 if (ret)
877 return ret;
878
879 list_for_each_entry(clki, head, list) {
880 if (!IS_ERR_OR_NULL(clki->clk)) {
881 if (scale_up && clki->max_freq) {
882 if (clki->curr_freq == clki->max_freq)
883 continue;
884
885 clk_state_changed = true;
886 ret = clk_set_rate(clki->clk, clki->max_freq);
887 if (ret) {
888 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
889 __func__, clki->name,
890 clki->max_freq, ret);
891 break;
892 }
893 trace_ufshcd_clk_scaling(dev_name(hba->dev),
894 "scaled up", clki->name,
895 clki->curr_freq,
896 clki->max_freq);
897
898 clki->curr_freq = clki->max_freq;
899
900 } else if (!scale_up && clki->min_freq) {
901 if (clki->curr_freq == clki->min_freq)
902 continue;
903
904 clk_state_changed = true;
905 ret = clk_set_rate(clki->clk, clki->min_freq);
906 if (ret) {
907 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
908 __func__, clki->name,
909 clki->min_freq, ret);
910 break;
911 }
912 trace_ufshcd_clk_scaling(dev_name(hba->dev),
913 "scaled down", clki->name,
914 clki->curr_freq,
915 clki->min_freq);
916 clki->curr_freq = clki->min_freq;
917 }
918 }
919 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
920 clki->name, clk_get_rate(clki->clk));
921 }
922
923 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
924
925out:
926 if (clk_state_changed)
927 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
928 (scale_up ? "up" : "down"),
929 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
930 return ret;
931}
932
933
934
935
936
937
938
939
940static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
941 bool scale_up)
942{
943 struct ufs_clk_info *clki;
944 struct list_head *head = &hba->clk_list_head;
945
946 if (list_empty(head))
947 return false;
948
949 list_for_each_entry(clki, head, list) {
950 if (!IS_ERR_OR_NULL(clki->clk)) {
951 if (scale_up && clki->max_freq) {
952 if (clki->curr_freq == clki->max_freq)
953 continue;
954 return true;
955 } else if (!scale_up && clki->min_freq) {
956 if (clki->curr_freq == clki->min_freq)
957 continue;
958 return true;
959 }
960 }
961 }
962
963 return false;
964}
965
966static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
967 u64 wait_timeout_us)
968{
969 unsigned long flags;
970 int ret = 0;
971 u32 tm_doorbell;
972 u32 tr_doorbell;
973 bool timeout = false, do_last_check = false;
974 ktime_t start;
975
976 ufshcd_hold(hba, false);
977 spin_lock_irqsave(hba->host->host_lock, flags);
978
979
980
981
982 start = ktime_get();
983 do {
984 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
985 ret = -EBUSY;
986 goto out;
987 }
988
989 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
990 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
991 if (!tm_doorbell && !tr_doorbell) {
992 timeout = false;
993 break;
994 } else if (do_last_check) {
995 break;
996 }
997
998 spin_unlock_irqrestore(hba->host->host_lock, flags);
999 schedule();
1000 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1001 wait_timeout_us) {
1002 timeout = true;
1003
1004
1005
1006
1007
1008 do_last_check = true;
1009 }
1010 spin_lock_irqsave(hba->host->host_lock, flags);
1011 } while (tm_doorbell || tr_doorbell);
1012
1013 if (timeout) {
1014 dev_err(hba->dev,
1015 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1016 __func__, tm_doorbell, tr_doorbell);
1017 ret = -EBUSY;
1018 }
1019out:
1020 spin_unlock_irqrestore(hba->host->host_lock, flags);
1021 ufshcd_release(hba);
1022 return ret;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1035{
1036 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1037 int ret = 0;
1038 struct ufs_pa_layer_attr new_pwr_info;
1039
1040 if (scale_up) {
1041 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1042 sizeof(struct ufs_pa_layer_attr));
1043 } else {
1044 memcpy(&new_pwr_info, &hba->pwr_info,
1045 sizeof(struct ufs_pa_layer_attr));
1046
1047 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1048 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1049
1050 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1051 &hba->pwr_info,
1052 sizeof(struct ufs_pa_layer_attr));
1053
1054
1055 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1056 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1057 }
1058 }
1059
1060
1061 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1062
1063 if (ret)
1064 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1065 __func__, ret,
1066 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1067 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1068
1069 return ret;
1070}
1071
1072static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1073{
1074 #define DOORBELL_CLR_TOUT_US (1000 * 1000)
1075 int ret = 0;
1076
1077
1078
1079
1080 scsi_block_requests(hba->host);
1081 down_write(&hba->clk_scaling_lock);
1082 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1083 ret = -EBUSY;
1084 up_write(&hba->clk_scaling_lock);
1085 scsi_unblock_requests(hba->host);
1086 }
1087
1088 return ret;
1089}
1090
1091static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1092{
1093 up_write(&hba->clk_scaling_lock);
1094 scsi_unblock_requests(hba->host);
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1107{
1108 int ret = 0;
1109
1110
1111 ufshcd_hold(hba, false);
1112
1113 ret = ufshcd_clock_scaling_prepare(hba);
1114 if (ret)
1115 return ret;
1116
1117
1118 if (!scale_up) {
1119 ret = ufshcd_scale_gear(hba, false);
1120 if (ret)
1121 goto out;
1122 }
1123
1124 ret = ufshcd_scale_clks(hba, scale_up);
1125 if (ret) {
1126 if (!scale_up)
1127 ufshcd_scale_gear(hba, true);
1128 goto out;
1129 }
1130
1131
1132 if (scale_up) {
1133 ret = ufshcd_scale_gear(hba, true);
1134 if (ret) {
1135 ufshcd_scale_clks(hba, false);
1136 goto out;
1137 }
1138 }
1139
1140 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1141
1142out:
1143 ufshcd_clock_scaling_unprepare(hba);
1144 ufshcd_release(hba);
1145 return ret;
1146}
1147
1148static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1149{
1150 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1151 clk_scaling.suspend_work);
1152 unsigned long irq_flags;
1153
1154 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1155 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1156 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1157 return;
1158 }
1159 hba->clk_scaling.is_suspended = true;
1160 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1161
1162 __ufshcd_suspend_clkscaling(hba);
1163}
1164
1165static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1166{
1167 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1168 clk_scaling.resume_work);
1169 unsigned long irq_flags;
1170
1171 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1172 if (!hba->clk_scaling.is_suspended) {
1173 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1174 return;
1175 }
1176 hba->clk_scaling.is_suspended = false;
1177 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1178
1179 devfreq_resume_device(hba->devfreq);
1180}
1181
1182static int ufshcd_devfreq_target(struct device *dev,
1183 unsigned long *freq, u32 flags)
1184{
1185 int ret = 0;
1186 struct ufs_hba *hba = dev_get_drvdata(dev);
1187 ktime_t start;
1188 bool scale_up, sched_clk_scaling_suspend_work = false;
1189 unsigned long irq_flags;
1190
1191 if (!ufshcd_is_clkscaling_supported(hba))
1192 return -EINVAL;
1193
1194 if ((*freq > 0) && (*freq < UINT_MAX)) {
1195 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1196 return -EINVAL;
1197 }
1198
1199 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1200 if (ufshcd_eh_in_progress(hba)) {
1201 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1202 return 0;
1203 }
1204
1205 if (!hba->clk_scaling.active_reqs)
1206 sched_clk_scaling_suspend_work = true;
1207
1208 scale_up = (*freq == UINT_MAX) ? true : false;
1209 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1210 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1211 ret = 0;
1212 goto out;
1213 }
1214 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1215
1216 start = ktime_get();
1217 ret = ufshcd_devfreq_scale(hba, scale_up);
1218
1219 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1220 (scale_up ? "up" : "down"),
1221 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1222
1223out:
1224 if (sched_clk_scaling_suspend_work)
1225 queue_work(hba->clk_scaling.workq,
1226 &hba->clk_scaling.suspend_work);
1227
1228 return ret;
1229}
1230
1231
1232static int ufshcd_devfreq_get_dev_status(struct device *dev,
1233 struct devfreq_dev_status *stat)
1234{
1235 struct ufs_hba *hba = dev_get_drvdata(dev);
1236 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1237 unsigned long flags;
1238
1239 if (!ufshcd_is_clkscaling_supported(hba))
1240 return -EINVAL;
1241
1242 memset(stat, 0, sizeof(*stat));
1243
1244 spin_lock_irqsave(hba->host->host_lock, flags);
1245 if (!scaling->window_start_t)
1246 goto start_window;
1247
1248 if (scaling->is_busy_started)
1249 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1250 scaling->busy_start_t));
1251
1252 stat->total_time = jiffies_to_usecs((long)jiffies -
1253 (long)scaling->window_start_t);
1254 stat->busy_time = scaling->tot_busy_t;
1255start_window:
1256 scaling->window_start_t = jiffies;
1257 scaling->tot_busy_t = 0;
1258
1259 if (hba->outstanding_reqs) {
1260 scaling->busy_start_t = ktime_get();
1261 scaling->is_busy_started = true;
1262 } else {
1263 scaling->busy_start_t = 0;
1264 scaling->is_busy_started = false;
1265 }
1266 spin_unlock_irqrestore(hba->host->host_lock, flags);
1267 return 0;
1268}
1269
1270static struct devfreq_dev_profile ufs_devfreq_profile = {
1271 .polling_ms = 100,
1272 .target = ufshcd_devfreq_target,
1273 .get_dev_status = ufshcd_devfreq_get_dev_status,
1274};
1275
1276static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1277{
1278 unsigned long flags;
1279
1280 devfreq_suspend_device(hba->devfreq);
1281 spin_lock_irqsave(hba->host->host_lock, flags);
1282 hba->clk_scaling.window_start_t = 0;
1283 spin_unlock_irqrestore(hba->host->host_lock, flags);
1284}
1285
1286static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1287{
1288 unsigned long flags;
1289 bool suspend = false;
1290
1291 if (!ufshcd_is_clkscaling_supported(hba))
1292 return;
1293
1294 spin_lock_irqsave(hba->host->host_lock, flags);
1295 if (!hba->clk_scaling.is_suspended) {
1296 suspend = true;
1297 hba->clk_scaling.is_suspended = true;
1298 }
1299 spin_unlock_irqrestore(hba->host->host_lock, flags);
1300
1301 if (suspend)
1302 __ufshcd_suspend_clkscaling(hba);
1303}
1304
1305static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1306{
1307 unsigned long flags;
1308 bool resume = false;
1309
1310 if (!ufshcd_is_clkscaling_supported(hba))
1311 return;
1312
1313 spin_lock_irqsave(hba->host->host_lock, flags);
1314 if (hba->clk_scaling.is_suspended) {
1315 resume = true;
1316 hba->clk_scaling.is_suspended = false;
1317 }
1318 spin_unlock_irqrestore(hba->host->host_lock, flags);
1319
1320 if (resume)
1321 devfreq_resume_device(hba->devfreq);
1322}
1323
1324static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1325 struct device_attribute *attr, char *buf)
1326{
1327 struct ufs_hba *hba = dev_get_drvdata(dev);
1328
1329 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1330}
1331
1332static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1333 struct device_attribute *attr, const char *buf, size_t count)
1334{
1335 struct ufs_hba *hba = dev_get_drvdata(dev);
1336 u32 value;
1337 int err;
1338
1339 if (kstrtou32(buf, 0, &value))
1340 return -EINVAL;
1341
1342 value = !!value;
1343 if (value == hba->clk_scaling.is_allowed)
1344 goto out;
1345
1346 pm_runtime_get_sync(hba->dev);
1347 ufshcd_hold(hba, false);
1348
1349 cancel_work_sync(&hba->clk_scaling.suspend_work);
1350 cancel_work_sync(&hba->clk_scaling.resume_work);
1351
1352 hba->clk_scaling.is_allowed = value;
1353
1354 if (value) {
1355 ufshcd_resume_clkscaling(hba);
1356 } else {
1357 ufshcd_suspend_clkscaling(hba);
1358 err = ufshcd_devfreq_scale(hba, true);
1359 if (err)
1360 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1361 __func__, err);
1362 }
1363
1364 ufshcd_release(hba);
1365 pm_runtime_put_sync(hba->dev);
1366out:
1367 return count;
1368}
1369
1370static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1371{
1372 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1373 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1374 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1375 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1376 hba->clk_scaling.enable_attr.attr.mode = 0644;
1377 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1378 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1379}
1380
1381static void ufshcd_ungate_work(struct work_struct *work)
1382{
1383 int ret;
1384 unsigned long flags;
1385 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1386 clk_gating.ungate_work);
1387
1388 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1389
1390 spin_lock_irqsave(hba->host->host_lock, flags);
1391 if (hba->clk_gating.state == CLKS_ON) {
1392 spin_unlock_irqrestore(hba->host->host_lock, flags);
1393 goto unblock_reqs;
1394 }
1395
1396 spin_unlock_irqrestore(hba->host->host_lock, flags);
1397 ufshcd_setup_clocks(hba, true);
1398
1399
1400 if (ufshcd_can_hibern8_during_gating(hba)) {
1401
1402 hba->clk_gating.is_suspended = true;
1403 if (ufshcd_is_link_hibern8(hba)) {
1404 ret = ufshcd_uic_hibern8_exit(hba);
1405 if (ret)
1406 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1407 __func__, ret);
1408 else
1409 ufshcd_set_link_active(hba);
1410 }
1411 hba->clk_gating.is_suspended = false;
1412 }
1413unblock_reqs:
1414 scsi_unblock_requests(hba->host);
1415}
1416
1417
1418
1419
1420
1421
1422
1423int ufshcd_hold(struct ufs_hba *hba, bool async)
1424{
1425 int rc = 0;
1426 unsigned long flags;
1427
1428 if (!ufshcd_is_clkgating_allowed(hba))
1429 goto out;
1430 spin_lock_irqsave(hba->host->host_lock, flags);
1431 hba->clk_gating.active_reqs++;
1432
1433 if (ufshcd_eh_in_progress(hba)) {
1434 spin_unlock_irqrestore(hba->host->host_lock, flags);
1435 return 0;
1436 }
1437
1438start:
1439 switch (hba->clk_gating.state) {
1440 case CLKS_ON:
1441
1442
1443
1444
1445
1446
1447
1448
1449 if (ufshcd_can_hibern8_during_gating(hba) &&
1450 ufshcd_is_link_hibern8(hba)) {
1451 spin_unlock_irqrestore(hba->host->host_lock, flags);
1452 flush_work(&hba->clk_gating.ungate_work);
1453 spin_lock_irqsave(hba->host->host_lock, flags);
1454 goto start;
1455 }
1456 break;
1457 case REQ_CLKS_OFF:
1458 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1459 hba->clk_gating.state = CLKS_ON;
1460 trace_ufshcd_clk_gating(dev_name(hba->dev),
1461 hba->clk_gating.state);
1462 break;
1463 }
1464
1465
1466
1467
1468
1469 case CLKS_OFF:
1470 scsi_block_requests(hba->host);
1471 hba->clk_gating.state = REQ_CLKS_ON;
1472 trace_ufshcd_clk_gating(dev_name(hba->dev),
1473 hba->clk_gating.state);
1474 schedule_work(&hba->clk_gating.ungate_work);
1475
1476
1477
1478
1479 case REQ_CLKS_ON:
1480 if (async) {
1481 rc = -EAGAIN;
1482 hba->clk_gating.active_reqs--;
1483 break;
1484 }
1485
1486 spin_unlock_irqrestore(hba->host->host_lock, flags);
1487 flush_work(&hba->clk_gating.ungate_work);
1488
1489 spin_lock_irqsave(hba->host->host_lock, flags);
1490 goto start;
1491 default:
1492 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1493 __func__, hba->clk_gating.state);
1494 break;
1495 }
1496 spin_unlock_irqrestore(hba->host->host_lock, flags);
1497out:
1498 return rc;
1499}
1500EXPORT_SYMBOL_GPL(ufshcd_hold);
1501
1502static void ufshcd_gate_work(struct work_struct *work)
1503{
1504 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1505 clk_gating.gate_work.work);
1506 unsigned long flags;
1507
1508 spin_lock_irqsave(hba->host->host_lock, flags);
1509
1510
1511
1512
1513
1514
1515 if (hba->clk_gating.is_suspended ||
1516 (hba->clk_gating.state == REQ_CLKS_ON)) {
1517 hba->clk_gating.state = CLKS_ON;
1518 trace_ufshcd_clk_gating(dev_name(hba->dev),
1519 hba->clk_gating.state);
1520 goto rel_lock;
1521 }
1522
1523 if (hba->clk_gating.active_reqs
1524 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1525 || hba->lrb_in_use || hba->outstanding_tasks
1526 || hba->active_uic_cmd || hba->uic_async_done)
1527 goto rel_lock;
1528
1529 spin_unlock_irqrestore(hba->host->host_lock, flags);
1530
1531
1532 if (ufshcd_can_hibern8_during_gating(hba)) {
1533 if (ufshcd_uic_hibern8_enter(hba)) {
1534 hba->clk_gating.state = CLKS_ON;
1535 trace_ufshcd_clk_gating(dev_name(hba->dev),
1536 hba->clk_gating.state);
1537 goto out;
1538 }
1539 ufshcd_set_link_hibern8(hba);
1540 }
1541
1542 if (!ufshcd_is_link_active(hba))
1543 ufshcd_setup_clocks(hba, false);
1544 else
1545
1546 __ufshcd_setup_clocks(hba, false, true);
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 spin_lock_irqsave(hba->host->host_lock, flags);
1558 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1559 hba->clk_gating.state = CLKS_OFF;
1560 trace_ufshcd_clk_gating(dev_name(hba->dev),
1561 hba->clk_gating.state);
1562 }
1563rel_lock:
1564 spin_unlock_irqrestore(hba->host->host_lock, flags);
1565out:
1566 return;
1567}
1568
1569
1570static void __ufshcd_release(struct ufs_hba *hba)
1571{
1572 if (!ufshcd_is_clkgating_allowed(hba))
1573 return;
1574
1575 hba->clk_gating.active_reqs--;
1576
1577 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1578 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1579 || hba->lrb_in_use || hba->outstanding_tasks
1580 || hba->active_uic_cmd || hba->uic_async_done
1581 || ufshcd_eh_in_progress(hba))
1582 return;
1583
1584 hba->clk_gating.state = REQ_CLKS_OFF;
1585 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1586 schedule_delayed_work(&hba->clk_gating.gate_work,
1587 msecs_to_jiffies(hba->clk_gating.delay_ms));
1588}
1589
1590void ufshcd_release(struct ufs_hba *hba)
1591{
1592 unsigned long flags;
1593
1594 spin_lock_irqsave(hba->host->host_lock, flags);
1595 __ufshcd_release(hba);
1596 spin_unlock_irqrestore(hba->host->host_lock, flags);
1597}
1598EXPORT_SYMBOL_GPL(ufshcd_release);
1599
1600static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1601 struct device_attribute *attr, char *buf)
1602{
1603 struct ufs_hba *hba = dev_get_drvdata(dev);
1604
1605 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1606}
1607
1608static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1609 struct device_attribute *attr, const char *buf, size_t count)
1610{
1611 struct ufs_hba *hba = dev_get_drvdata(dev);
1612 unsigned long flags, value;
1613
1614 if (kstrtoul(buf, 0, &value))
1615 return -EINVAL;
1616
1617 spin_lock_irqsave(hba->host->host_lock, flags);
1618 hba->clk_gating.delay_ms = value;
1619 spin_unlock_irqrestore(hba->host->host_lock, flags);
1620 return count;
1621}
1622
1623static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1624 struct device_attribute *attr, char *buf)
1625{
1626 struct ufs_hba *hba = dev_get_drvdata(dev);
1627
1628 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1629}
1630
1631static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1632 struct device_attribute *attr, const char *buf, size_t count)
1633{
1634 struct ufs_hba *hba = dev_get_drvdata(dev);
1635 unsigned long flags;
1636 u32 value;
1637
1638 if (kstrtou32(buf, 0, &value))
1639 return -EINVAL;
1640
1641 value = !!value;
1642 if (value == hba->clk_gating.is_enabled)
1643 goto out;
1644
1645 if (value) {
1646 ufshcd_release(hba);
1647 } else {
1648 spin_lock_irqsave(hba->host->host_lock, flags);
1649 hba->clk_gating.active_reqs++;
1650 spin_unlock_irqrestore(hba->host->host_lock, flags);
1651 }
1652
1653 hba->clk_gating.is_enabled = value;
1654out:
1655 return count;
1656}
1657
1658static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1659{
1660 if (!ufshcd_is_clkgating_allowed(hba))
1661 return;
1662
1663 hba->clk_gating.delay_ms = 150;
1664 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1665 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1666
1667 hba->clk_gating.is_enabled = true;
1668
1669 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1670 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1671 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1672 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1673 hba->clk_gating.delay_attr.attr.mode = 0644;
1674 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1675 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1676
1677 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1678 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1679 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1680 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1681 hba->clk_gating.enable_attr.attr.mode = 0644;
1682 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1683 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1684}
1685
1686static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1687{
1688 if (!ufshcd_is_clkgating_allowed(hba))
1689 return;
1690 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1691 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1692 cancel_work_sync(&hba->clk_gating.ungate_work);
1693 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1694}
1695
1696
1697static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1698{
1699 bool queue_resume_work = false;
1700
1701 if (!ufshcd_is_clkscaling_supported(hba))
1702 return;
1703
1704 if (!hba->clk_scaling.active_reqs++)
1705 queue_resume_work = true;
1706
1707 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1708 return;
1709
1710 if (queue_resume_work)
1711 queue_work(hba->clk_scaling.workq,
1712 &hba->clk_scaling.resume_work);
1713
1714 if (!hba->clk_scaling.window_start_t) {
1715 hba->clk_scaling.window_start_t = jiffies;
1716 hba->clk_scaling.tot_busy_t = 0;
1717 hba->clk_scaling.is_busy_started = false;
1718 }
1719
1720 if (!hba->clk_scaling.is_busy_started) {
1721 hba->clk_scaling.busy_start_t = ktime_get();
1722 hba->clk_scaling.is_busy_started = true;
1723 }
1724}
1725
1726static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1727{
1728 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1729
1730 if (!ufshcd_is_clkscaling_supported(hba))
1731 return;
1732
1733 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1734 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1735 scaling->busy_start_t));
1736 scaling->busy_start_t = 0;
1737 scaling->is_busy_started = false;
1738 }
1739}
1740
1741
1742
1743
1744
1745static inline
1746void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1747{
1748 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1749 ufshcd_clk_scaling_start_busy(hba);
1750 __set_bit(task_tag, &hba->outstanding_reqs);
1751 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1752
1753 wmb();
1754 ufshcd_add_command_trace(hba, task_tag, "send");
1755}
1756
1757
1758
1759
1760
1761static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1762{
1763 int len;
1764 if (lrbp->sense_buffer &&
1765 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1766 int len_to_copy;
1767
1768 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1769 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1770
1771 memcpy(lrbp->sense_buffer,
1772 lrbp->ucd_rsp_ptr->sr.sense_data,
1773 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1774 }
1775}
1776
1777
1778
1779
1780
1781
1782
1783static
1784int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1785{
1786 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1787
1788 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1789
1790
1791 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1792 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1793 GENERAL_UPIU_REQUEST_SIZE;
1794 u16 resp_len;
1795 u16 buf_len;
1796
1797
1798 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1799 MASK_QUERY_DATA_SEG_LEN;
1800 buf_len = be16_to_cpu(
1801 hba->dev_cmd.query.request.upiu_req.length);
1802 if (likely(buf_len >= resp_len)) {
1803 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1804 } else {
1805 dev_warn(hba->dev,
1806 "%s: Response size is bigger than buffer",
1807 __func__);
1808 return -EINVAL;
1809 }
1810 }
1811
1812 return 0;
1813}
1814
1815
1816
1817
1818
1819static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1820{
1821 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1822
1823
1824 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1825 hba->nutmrs =
1826 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1827}
1828
1829
1830
1831
1832
1833
1834
1835static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1836{
1837 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1838 return true;
1839 else
1840 return false;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1851{
1852 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1853}
1854
1855
1856
1857
1858
1859
1860
1861
1862static inline void
1863ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1864{
1865 WARN_ON(hba->active_uic_cmd);
1866
1867 hba->active_uic_cmd = uic_cmd;
1868
1869
1870 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1871 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1872 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1873
1874
1875 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1876 REG_UIC_COMMAND);
1877}
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static int
1888ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1889{
1890 int ret;
1891 unsigned long flags;
1892
1893 if (wait_for_completion_timeout(&uic_cmd->done,
1894 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1895 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1896 else
1897 ret = -ETIMEDOUT;
1898
1899 spin_lock_irqsave(hba->host->host_lock, flags);
1900 hba->active_uic_cmd = NULL;
1901 spin_unlock_irqrestore(hba->host->host_lock, flags);
1902
1903 return ret;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static int
1917__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1918 bool completion)
1919{
1920 if (!ufshcd_ready_for_uic_cmd(hba)) {
1921 dev_err(hba->dev,
1922 "Controller not ready to accept UIC commands\n");
1923 return -EIO;
1924 }
1925
1926 if (completion)
1927 init_completion(&uic_cmd->done);
1928
1929 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1930
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941static int
1942ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1943{
1944 int ret;
1945 unsigned long flags;
1946
1947 ufshcd_hold(hba, false);
1948 mutex_lock(&hba->uic_cmd_mutex);
1949 ufshcd_add_delay_before_dme_cmd(hba);
1950
1951 spin_lock_irqsave(hba->host->host_lock, flags);
1952 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1953 spin_unlock_irqrestore(hba->host->host_lock, flags);
1954 if (!ret)
1955 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1956
1957 mutex_unlock(&hba->uic_cmd_mutex);
1958
1959 ufshcd_release(hba);
1960 return ret;
1961}
1962
1963
1964
1965
1966
1967
1968
1969static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1970{
1971 struct ufshcd_sg_entry *prd_table;
1972 struct scatterlist *sg;
1973 struct scsi_cmnd *cmd;
1974 int sg_segments;
1975 int i;
1976
1977 cmd = lrbp->cmd;
1978 sg_segments = scsi_dma_map(cmd);
1979 if (sg_segments < 0)
1980 return sg_segments;
1981
1982 if (sg_segments) {
1983 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1984 lrbp->utr_descriptor_ptr->prd_table_length =
1985 cpu_to_le16((u16)(sg_segments *
1986 sizeof(struct ufshcd_sg_entry)));
1987 else
1988 lrbp->utr_descriptor_ptr->prd_table_length =
1989 cpu_to_le16((u16) (sg_segments));
1990
1991 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1992
1993 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1994 prd_table[i].size =
1995 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1996 prd_table[i].base_addr =
1997 cpu_to_le32(lower_32_bits(sg->dma_address));
1998 prd_table[i].upper_addr =
1999 cpu_to_le32(upper_32_bits(sg->dma_address));
2000 prd_table[i].reserved = 0;
2001 }
2002 } else {
2003 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2004 }
2005
2006 return 0;
2007}
2008
2009
2010
2011
2012
2013
2014static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2015{
2016 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2017
2018 if (hba->ufs_version == UFSHCI_VERSION_10) {
2019 u32 rw;
2020 rw = set & INTERRUPT_MASK_RW_VER_10;
2021 set = rw | ((set ^ intrs) & intrs);
2022 } else {
2023 set |= intrs;
2024 }
2025
2026 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2027}
2028
2029
2030
2031
2032
2033
2034static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2035{
2036 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2037
2038 if (hba->ufs_version == UFSHCI_VERSION_10) {
2039 u32 rw;
2040 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2041 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2042 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2043
2044 } else {
2045 set &= ~intrs;
2046 }
2047
2048 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2059 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2060{
2061 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2062 u32 data_direction;
2063 u32 dword_0;
2064
2065 if (cmd_dir == DMA_FROM_DEVICE) {
2066 data_direction = UTP_DEVICE_TO_HOST;
2067 *upiu_flags = UPIU_CMD_FLAGS_READ;
2068 } else if (cmd_dir == DMA_TO_DEVICE) {
2069 data_direction = UTP_HOST_TO_DEVICE;
2070 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2071 } else {
2072 data_direction = UTP_NO_DATA_TRANSFER;
2073 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2074 }
2075
2076 dword_0 = data_direction | (lrbp->command_type
2077 << UPIU_COMMAND_TYPE_OFFSET);
2078 if (lrbp->intr_cmd)
2079 dword_0 |= UTP_REQ_DESC_INT_CMD;
2080
2081
2082 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2083
2084 req_desc->header.dword_1 = 0;
2085
2086
2087
2088
2089
2090 req_desc->header.dword_2 =
2091 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2092
2093 req_desc->header.dword_3 = 0;
2094
2095 req_desc->prd_table_length = 0;
2096}
2097
2098
2099
2100
2101
2102
2103
2104static
2105void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2106{
2107 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2108 unsigned short cdb_len;
2109
2110
2111 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2112 UPIU_TRANSACTION_COMMAND, upiu_flags,
2113 lrbp->lun, lrbp->task_tag);
2114 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2115 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2116
2117
2118 ucd_req_ptr->header.dword_2 = 0;
2119
2120 ucd_req_ptr->sc.exp_data_transfer_len =
2121 cpu_to_be32(lrbp->cmd->sdb.length);
2122
2123 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2124 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2125 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2126
2127 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2128}
2129
2130
2131
2132
2133
2134
2135
2136
2137static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2138 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2139{
2140 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2141 struct ufs_query *query = &hba->dev_cmd.query;
2142 u16 len = be16_to_cpu(query->request.upiu_req.length);
2143 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2144
2145
2146 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2147 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2148 lrbp->lun, lrbp->task_tag);
2149 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2150 0, query->request.query_func, 0, 0);
2151
2152
2153 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2154 ucd_req_ptr->header.dword_2 =
2155 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2156 else
2157 ucd_req_ptr->header.dword_2 = 0;
2158
2159
2160 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2161 QUERY_OSF_SIZE);
2162
2163
2164 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2165 memcpy(descp, query->descriptor, len);
2166
2167 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2168}
2169
2170static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2171{
2172 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2173
2174 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2175
2176
2177 ucd_req_ptr->header.dword_0 =
2178 UPIU_HEADER_DWORD(
2179 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2180
2181 ucd_req_ptr->header.dword_1 = 0;
2182 ucd_req_ptr->header.dword_2 = 0;
2183
2184 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2185}
2186
2187
2188
2189
2190
2191
2192
2193static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2194{
2195 u32 upiu_flags;
2196 int ret = 0;
2197
2198 if (hba->ufs_version == UFSHCI_VERSION_20)
2199 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2200 else
2201 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2202
2203 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2204 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2205 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2206 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2207 ufshcd_prepare_utp_nop_upiu(lrbp);
2208 else
2209 ret = -EINVAL;
2210
2211 return ret;
2212}
2213
2214
2215
2216
2217
2218
2219
2220static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2221{
2222 u32 upiu_flags;
2223 int ret = 0;
2224
2225 if (hba->ufs_version == UFSHCI_VERSION_20)
2226 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2227 else
2228 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2229
2230 if (likely(lrbp->cmd)) {
2231 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2232 lrbp->cmd->sc_data_direction);
2233 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2234 } else {
2235 ret = -EINVAL;
2236 }
2237
2238 return ret;
2239}
2240
2241
2242
2243
2244
2245
2246
2247static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2248{
2249 if (scsi_is_wlun(scsi_lun))
2250 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2251 | UFS_UPIU_WLUN_ID;
2252 else
2253 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2254}
2255
2256
2257
2258
2259
2260
2261
2262static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2263{
2264 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2275{
2276 struct ufshcd_lrb *lrbp;
2277 struct ufs_hba *hba;
2278 unsigned long flags;
2279 int tag;
2280 int err = 0;
2281
2282 hba = shost_priv(host);
2283
2284 tag = cmd->request->tag;
2285 if (!ufshcd_valid_tag(hba, tag)) {
2286 dev_err(hba->dev,
2287 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2288 __func__, tag, cmd, cmd->request);
2289 BUG();
2290 }
2291
2292 if (!down_read_trylock(&hba->clk_scaling_lock))
2293 return SCSI_MLQUEUE_HOST_BUSY;
2294
2295 spin_lock_irqsave(hba->host->host_lock, flags);
2296 switch (hba->ufshcd_state) {
2297 case UFSHCD_STATE_OPERATIONAL:
2298 break;
2299 case UFSHCD_STATE_EH_SCHEDULED:
2300 case UFSHCD_STATE_RESET:
2301 err = SCSI_MLQUEUE_HOST_BUSY;
2302 goto out_unlock;
2303 case UFSHCD_STATE_ERROR:
2304 set_host_byte(cmd, DID_ERROR);
2305 cmd->scsi_done(cmd);
2306 goto out_unlock;
2307 default:
2308 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2309 __func__, hba->ufshcd_state);
2310 set_host_byte(cmd, DID_BAD_TARGET);
2311 cmd->scsi_done(cmd);
2312 goto out_unlock;
2313 }
2314
2315
2316 if (ufshcd_eh_in_progress(hba)) {
2317 set_host_byte(cmd, DID_ERROR);
2318 cmd->scsi_done(cmd);
2319 goto out_unlock;
2320 }
2321 spin_unlock_irqrestore(hba->host->host_lock, flags);
2322
2323 hba->req_abort_count = 0;
2324
2325
2326 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2327
2328
2329
2330
2331
2332
2333 err = SCSI_MLQUEUE_HOST_BUSY;
2334 goto out;
2335 }
2336
2337 err = ufshcd_hold(hba, true);
2338 if (err) {
2339 err = SCSI_MLQUEUE_HOST_BUSY;
2340 clear_bit_unlock(tag, &hba->lrb_in_use);
2341 goto out;
2342 }
2343 WARN_ON(hba->clk_gating.state != CLKS_ON);
2344
2345 lrbp = &hba->lrb[tag];
2346
2347 WARN_ON(lrbp->cmd);
2348 lrbp->cmd = cmd;
2349 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2350 lrbp->sense_buffer = cmd->sense_buffer;
2351 lrbp->task_tag = tag;
2352 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2353 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2354 lrbp->req_abort_skip = false;
2355
2356 ufshcd_comp_scsi_upiu(hba, lrbp);
2357
2358 err = ufshcd_map_sg(hba, lrbp);
2359 if (err) {
2360 lrbp->cmd = NULL;
2361 clear_bit_unlock(tag, &hba->lrb_in_use);
2362 goto out;
2363 }
2364
2365 wmb();
2366
2367
2368 spin_lock_irqsave(hba->host->host_lock, flags);
2369 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2370 ufshcd_send_command(hba, tag);
2371out_unlock:
2372 spin_unlock_irqrestore(hba->host->host_lock, flags);
2373out:
2374 up_read(&hba->clk_scaling_lock);
2375 return err;
2376}
2377
2378static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2379 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2380{
2381 lrbp->cmd = NULL;
2382 lrbp->sense_bufflen = 0;
2383 lrbp->sense_buffer = NULL;
2384 lrbp->task_tag = tag;
2385 lrbp->lun = 0;
2386 lrbp->intr_cmd = true;
2387 hba->dev_cmd.type = cmd_type;
2388
2389 return ufshcd_comp_devman_upiu(hba, lrbp);
2390}
2391
2392static int
2393ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2394{
2395 int err = 0;
2396 unsigned long flags;
2397 u32 mask = 1 << tag;
2398
2399
2400 spin_lock_irqsave(hba->host->host_lock, flags);
2401 ufshcd_utrl_clear(hba, tag);
2402 spin_unlock_irqrestore(hba->host->host_lock, flags);
2403
2404
2405
2406
2407
2408 err = ufshcd_wait_for_register(hba,
2409 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2410 mask, ~mask, 1000, 1000, true);
2411
2412 return err;
2413}
2414
2415static int
2416ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2417{
2418 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2419
2420
2421 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2422 UPIU_RSP_CODE_OFFSET;
2423 return query_res->response;
2424}
2425
2426
2427
2428
2429
2430
2431static int
2432ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2433{
2434 int resp;
2435 int err = 0;
2436
2437 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2438 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2439
2440 switch (resp) {
2441 case UPIU_TRANSACTION_NOP_IN:
2442 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2443 err = -EINVAL;
2444 dev_err(hba->dev, "%s: unexpected response %x\n",
2445 __func__, resp);
2446 }
2447 break;
2448 case UPIU_TRANSACTION_QUERY_RSP:
2449 err = ufshcd_check_query_response(hba, lrbp);
2450 if (!err)
2451 err = ufshcd_copy_query_response(hba, lrbp);
2452 break;
2453 case UPIU_TRANSACTION_REJECT_UPIU:
2454
2455 err = -EPERM;
2456 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2457 __func__);
2458 break;
2459 default:
2460 err = -EINVAL;
2461 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2462 __func__, resp);
2463 break;
2464 }
2465
2466 return err;
2467}
2468
2469static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2470 struct ufshcd_lrb *lrbp, int max_timeout)
2471{
2472 int err = 0;
2473 unsigned long time_left;
2474 unsigned long flags;
2475
2476 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2477 msecs_to_jiffies(max_timeout));
2478
2479
2480 wmb();
2481 spin_lock_irqsave(hba->host->host_lock, flags);
2482 hba->dev_cmd.complete = NULL;
2483 if (likely(time_left)) {
2484 err = ufshcd_get_tr_ocs(lrbp);
2485 if (!err)
2486 err = ufshcd_dev_cmd_completion(hba, lrbp);
2487 }
2488 spin_unlock_irqrestore(hba->host->host_lock, flags);
2489
2490 if (!time_left) {
2491 err = -ETIMEDOUT;
2492 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2493 __func__, lrbp->task_tag);
2494 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2495
2496 err = -EAGAIN;
2497
2498
2499
2500
2501
2502 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2503 }
2504
2505 return err;
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2520{
2521 int tag;
2522 bool ret = false;
2523 unsigned long tmp;
2524
2525 if (!tag_out)
2526 goto out;
2527
2528 do {
2529 tmp = ~hba->lrb_in_use;
2530 tag = find_last_bit(&tmp, hba->nutrs);
2531 if (tag >= hba->nutrs)
2532 goto out;
2533 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2534
2535 *tag_out = tag;
2536 ret = true;
2537out:
2538 return ret;
2539}
2540
2541static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2542{
2543 clear_bit_unlock(tag, &hba->lrb_in_use);
2544}
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2556 enum dev_cmd_type cmd_type, int timeout)
2557{
2558 struct ufshcd_lrb *lrbp;
2559 int err;
2560 int tag;
2561 struct completion wait;
2562 unsigned long flags;
2563
2564 down_read(&hba->clk_scaling_lock);
2565
2566
2567
2568
2569
2570
2571 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2572
2573 init_completion(&wait);
2574 lrbp = &hba->lrb[tag];
2575 WARN_ON(lrbp->cmd);
2576 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2577 if (unlikely(err))
2578 goto out_put_tag;
2579
2580 hba->dev_cmd.complete = &wait;
2581
2582
2583 wmb();
2584 spin_lock_irqsave(hba->host->host_lock, flags);
2585 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2586 ufshcd_send_command(hba, tag);
2587 spin_unlock_irqrestore(hba->host->host_lock, flags);
2588
2589 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2590
2591out_put_tag:
2592 ufshcd_put_dev_cmd_tag(hba, tag);
2593 wake_up(&hba->dev_cmd.tag_wq);
2594 up_read(&hba->clk_scaling_lock);
2595 return err;
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608static inline void ufshcd_init_query(struct ufs_hba *hba,
2609 struct ufs_query_req **request, struct ufs_query_res **response,
2610 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2611{
2612 *request = &hba->dev_cmd.query.request;
2613 *response = &hba->dev_cmd.query.response;
2614 memset(*request, 0, sizeof(struct ufs_query_req));
2615 memset(*response, 0, sizeof(struct ufs_query_res));
2616 (*request)->upiu_req.opcode = opcode;
2617 (*request)->upiu_req.idn = idn;
2618 (*request)->upiu_req.index = index;
2619 (*request)->upiu_req.selector = selector;
2620}
2621
2622static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2623 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2624{
2625 int ret;
2626 int retries;
2627
2628 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2629 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2630 if (ret)
2631 dev_dbg(hba->dev,
2632 "%s: failed with error %d, retries %d\n",
2633 __func__, ret, retries);
2634 else
2635 break;
2636 }
2637
2638 if (ret)
2639 dev_err(hba->dev,
2640 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2641 __func__, opcode, idn, ret, retries);
2642 return ret;
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2655 enum flag_idn idn, bool *flag_res)
2656{
2657 struct ufs_query_req *request = NULL;
2658 struct ufs_query_res *response = NULL;
2659 int err, index = 0, selector = 0;
2660 int timeout = QUERY_REQ_TIMEOUT;
2661
2662 BUG_ON(!hba);
2663
2664 ufshcd_hold(hba, false);
2665 mutex_lock(&hba->dev_cmd.lock);
2666 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2667 selector);
2668
2669 switch (opcode) {
2670 case UPIU_QUERY_OPCODE_SET_FLAG:
2671 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2672 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2673 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2674 break;
2675 case UPIU_QUERY_OPCODE_READ_FLAG:
2676 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2677 if (!flag_res) {
2678
2679 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2680 __func__);
2681 err = -EINVAL;
2682 goto out_unlock;
2683 }
2684 break;
2685 default:
2686 dev_err(hba->dev,
2687 "%s: Expected query flag opcode but got = %d\n",
2688 __func__, opcode);
2689 err = -EINVAL;
2690 goto out_unlock;
2691 }
2692
2693 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2694
2695 if (err) {
2696 dev_err(hba->dev,
2697 "%s: Sending flag query for idn %d failed, err = %d\n",
2698 __func__, idn, err);
2699 goto out_unlock;
2700 }
2701
2702 if (flag_res)
2703 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2704 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2705
2706out_unlock:
2707 mutex_unlock(&hba->dev_cmd.lock);
2708 ufshcd_release(hba);
2709 return err;
2710}
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2724 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2725{
2726 struct ufs_query_req *request = NULL;
2727 struct ufs_query_res *response = NULL;
2728 int err;
2729
2730 BUG_ON(!hba);
2731
2732 ufshcd_hold(hba, false);
2733 if (!attr_val) {
2734 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2735 __func__, opcode);
2736 err = -EINVAL;
2737 goto out;
2738 }
2739
2740 mutex_lock(&hba->dev_cmd.lock);
2741 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2742 selector);
2743
2744 switch (opcode) {
2745 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2746 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2747 request->upiu_req.value = cpu_to_be32(*attr_val);
2748 break;
2749 case UPIU_QUERY_OPCODE_READ_ATTR:
2750 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2751 break;
2752 default:
2753 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2754 __func__, opcode);
2755 err = -EINVAL;
2756 goto out_unlock;
2757 }
2758
2759 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2760
2761 if (err) {
2762 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2763 __func__, opcode, idn, index, err);
2764 goto out_unlock;
2765 }
2766
2767 *attr_val = be32_to_cpu(response->upiu_res.value);
2768
2769out_unlock:
2770 mutex_unlock(&hba->dev_cmd.lock);
2771out:
2772 ufshcd_release(hba);
2773 return err;
2774}
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2790 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2791 u32 *attr_val)
2792{
2793 int ret = 0;
2794 u32 retries;
2795
2796 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2797 ret = ufshcd_query_attr(hba, opcode, idn, index,
2798 selector, attr_val);
2799 if (ret)
2800 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2801 __func__, ret, retries);
2802 else
2803 break;
2804 }
2805
2806 if (ret)
2807 dev_err(hba->dev,
2808 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2809 __func__, idn, ret, QUERY_REQ_RETRIES);
2810 return ret;
2811}
2812
2813static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2814 enum query_opcode opcode, enum desc_idn idn, u8 index,
2815 u8 selector, u8 *desc_buf, int *buf_len)
2816{
2817 struct ufs_query_req *request = NULL;
2818 struct ufs_query_res *response = NULL;
2819 int err;
2820
2821 BUG_ON(!hba);
2822
2823 ufshcd_hold(hba, false);
2824 if (!desc_buf) {
2825 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2826 __func__, opcode);
2827 err = -EINVAL;
2828 goto out;
2829 }
2830
2831 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2832 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2833 __func__, *buf_len);
2834 err = -EINVAL;
2835 goto out;
2836 }
2837
2838 mutex_lock(&hba->dev_cmd.lock);
2839 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2840 selector);
2841 hba->dev_cmd.query.descriptor = desc_buf;
2842 request->upiu_req.length = cpu_to_be16(*buf_len);
2843
2844 switch (opcode) {
2845 case UPIU_QUERY_OPCODE_WRITE_DESC:
2846 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2847 break;
2848 case UPIU_QUERY_OPCODE_READ_DESC:
2849 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2850 break;
2851 default:
2852 dev_err(hba->dev,
2853 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2854 __func__, opcode);
2855 err = -EINVAL;
2856 goto out_unlock;
2857 }
2858
2859 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2860
2861 if (err) {
2862 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2863 __func__, opcode, idn, index, err);
2864 goto out_unlock;
2865 }
2866
2867 hba->dev_cmd.query.descriptor = NULL;
2868 *buf_len = be16_to_cpu(response->upiu_res.length);
2869
2870out_unlock:
2871 mutex_unlock(&hba->dev_cmd.lock);
2872out:
2873 ufshcd_release(hba);
2874 return err;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2893 enum query_opcode opcode,
2894 enum desc_idn idn, u8 index,
2895 u8 selector,
2896 u8 *desc_buf, int *buf_len)
2897{
2898 int err;
2899 int retries;
2900
2901 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2902 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2903 selector, desc_buf, buf_len);
2904 if (!err || err == -EINVAL)
2905 break;
2906 }
2907
2908 return err;
2909}
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920static int ufshcd_read_desc_length(struct ufs_hba *hba,
2921 enum desc_idn desc_id,
2922 int desc_index,
2923 int *desc_length)
2924{
2925 int ret;
2926 u8 header[QUERY_DESC_HDR_SIZE];
2927 int header_len = QUERY_DESC_HDR_SIZE;
2928
2929 if (desc_id >= QUERY_DESC_IDN_MAX)
2930 return -EINVAL;
2931
2932 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2933 desc_id, desc_index, 0, header,
2934 &header_len);
2935
2936 if (ret) {
2937 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2938 __func__, desc_id);
2939 return ret;
2940 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2941 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2942 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2943 desc_id);
2944 ret = -EINVAL;
2945 }
2946
2947 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2948 return ret;
2949
2950}
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2961 enum desc_idn desc_id, int *desc_len)
2962{
2963 switch (desc_id) {
2964 case QUERY_DESC_IDN_DEVICE:
2965 *desc_len = hba->desc_size.dev_desc;
2966 break;
2967 case QUERY_DESC_IDN_POWER:
2968 *desc_len = hba->desc_size.pwr_desc;
2969 break;
2970 case QUERY_DESC_IDN_GEOMETRY:
2971 *desc_len = hba->desc_size.geom_desc;
2972 break;
2973 case QUERY_DESC_IDN_CONFIGURATION:
2974 *desc_len = hba->desc_size.conf_desc;
2975 break;
2976 case QUERY_DESC_IDN_UNIT:
2977 *desc_len = hba->desc_size.unit_desc;
2978 break;
2979 case QUERY_DESC_IDN_INTERCONNECT:
2980 *desc_len = hba->desc_size.interc_desc;
2981 break;
2982 case QUERY_DESC_IDN_STRING:
2983 *desc_len = QUERY_DESC_MAX_SIZE;
2984 break;
2985 case QUERY_DESC_IDN_RFU_0:
2986 case QUERY_DESC_IDN_RFU_1:
2987 *desc_len = 0;
2988 break;
2989 default:
2990 *desc_len = 0;
2991 return -EINVAL;
2992 }
2993 return 0;
2994}
2995EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008static int ufshcd_read_desc_param(struct ufs_hba *hba,
3009 enum desc_idn desc_id,
3010 int desc_index,
3011 u8 param_offset,
3012 u8 *param_read_buf,
3013 u8 param_size)
3014{
3015 int ret;
3016 u8 *desc_buf;
3017 int buff_len;
3018 bool is_kmalloc = true;
3019
3020
3021 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3022 return -EINVAL;
3023
3024
3025
3026
3027 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3028
3029
3030 if (ret || !buff_len) {
3031 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3032 __func__);
3033 return ret;
3034 }
3035
3036
3037 if (param_offset != 0 || param_size < buff_len) {
3038 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3039 if (!desc_buf)
3040 return -ENOMEM;
3041 } else {
3042 desc_buf = param_read_buf;
3043 is_kmalloc = false;
3044 }
3045
3046
3047 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3048 desc_id, desc_index, 0,
3049 desc_buf, &buff_len);
3050
3051 if (ret) {
3052 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3053 __func__, desc_id, desc_index, param_offset, ret);
3054 goto out;
3055 }
3056
3057
3058 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3059 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3060 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3061 ret = -EINVAL;
3062 goto out;
3063 }
3064
3065
3066 if (is_kmalloc && param_size > buff_len)
3067 param_size = buff_len;
3068
3069 if (is_kmalloc)
3070 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3071out:
3072 if (is_kmalloc)
3073 kfree(desc_buf);
3074 return ret;
3075}
3076
3077static inline int ufshcd_read_desc(struct ufs_hba *hba,
3078 enum desc_idn desc_id,
3079 int desc_index,
3080 u8 *buf,
3081 u32 size)
3082{
3083 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3084}
3085
3086static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3087 u8 *buf,
3088 u32 size)
3089{
3090 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3091}
3092
3093static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3094{
3095 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108#define ASCII_STD true
3109static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3110 u8 *buf, u32 size, bool ascii)
3111{
3112 int err = 0;
3113
3114 err = ufshcd_read_desc(hba,
3115 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3116
3117 if (err) {
3118 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3119 __func__, QUERY_REQ_RETRIES, err);
3120 goto out;
3121 }
3122
3123 if (ascii) {
3124 int desc_len;
3125 int ascii_len;
3126 int i;
3127 char *buff_ascii;
3128
3129 desc_len = buf[0];
3130
3131 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3132 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3133 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3134 __func__);
3135 err = -ENOMEM;
3136 goto out;
3137 }
3138
3139 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3140 if (!buff_ascii) {
3141 err = -ENOMEM;
3142 goto out;
3143 }
3144
3145
3146
3147
3148
3149 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3150 desc_len - QUERY_DESC_HDR_SIZE,
3151 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3152
3153
3154 for (i = 0; i < ascii_len; i++)
3155 ufshcd_remove_non_printable(&buff_ascii[i]);
3156
3157 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3158 size - QUERY_DESC_HDR_SIZE);
3159 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3160 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3161 kfree(buff_ascii);
3162 }
3163out:
3164 return err;
3165}
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3178 int lun,
3179 enum unit_desc_param param_offset,
3180 u8 *param_read_buf,
3181 u32 param_size)
3182{
3183
3184
3185
3186
3187 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
3188 return -EOPNOTSUPP;
3189
3190 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3191 param_offset, param_read_buf, param_size);
3192}
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207static int ufshcd_memory_alloc(struct ufs_hba *hba)
3208{
3209 size_t utmrdl_size, utrdl_size, ucdl_size;
3210
3211
3212 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3213 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3214 ucdl_size,
3215 &hba->ucdl_dma_addr,
3216 GFP_KERNEL);
3217
3218
3219
3220
3221
3222
3223
3224 if (!hba->ucdl_base_addr ||
3225 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3226 dev_err(hba->dev,
3227 "Command Descriptor Memory allocation failed\n");
3228 goto out;
3229 }
3230
3231
3232
3233
3234
3235 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3236 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3237 utrdl_size,
3238 &hba->utrdl_dma_addr,
3239 GFP_KERNEL);
3240 if (!hba->utrdl_base_addr ||
3241 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3242 dev_err(hba->dev,
3243 "Transfer Descriptor Memory allocation failed\n");
3244 goto out;
3245 }
3246
3247
3248
3249
3250
3251 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3252 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3253 utmrdl_size,
3254 &hba->utmrdl_dma_addr,
3255 GFP_KERNEL);
3256 if (!hba->utmrdl_base_addr ||
3257 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3258 dev_err(hba->dev,
3259 "Task Management Descriptor Memory allocation failed\n");
3260 goto out;
3261 }
3262
3263
3264 hba->lrb = devm_kzalloc(hba->dev,
3265 hba->nutrs * sizeof(struct ufshcd_lrb),
3266 GFP_KERNEL);
3267 if (!hba->lrb) {
3268 dev_err(hba->dev, "LRB Memory allocation failed\n");
3269 goto out;
3270 }
3271 return 0;
3272out:
3273 return -ENOMEM;
3274}
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3290{
3291 struct utp_transfer_cmd_desc *cmd_descp;
3292 struct utp_transfer_req_desc *utrdlp;
3293 dma_addr_t cmd_desc_dma_addr;
3294 dma_addr_t cmd_desc_element_addr;
3295 u16 response_offset;
3296 u16 prdt_offset;
3297 int cmd_desc_size;
3298 int i;
3299
3300 utrdlp = hba->utrdl_base_addr;
3301 cmd_descp = hba->ucdl_base_addr;
3302
3303 response_offset =
3304 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3305 prdt_offset =
3306 offsetof(struct utp_transfer_cmd_desc, prd_table);
3307
3308 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3309 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3310
3311 for (i = 0; i < hba->nutrs; i++) {
3312
3313 cmd_desc_element_addr =
3314 (cmd_desc_dma_addr + (cmd_desc_size * i));
3315 utrdlp[i].command_desc_base_addr_lo =
3316 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3317 utrdlp[i].command_desc_base_addr_hi =
3318 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3319
3320
3321 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3322 utrdlp[i].response_upiu_offset =
3323 cpu_to_le16(response_offset);
3324 utrdlp[i].prd_table_offset =
3325 cpu_to_le16(prdt_offset);
3326 utrdlp[i].response_upiu_length =
3327 cpu_to_le16(ALIGNED_UPIU_SIZE);
3328 } else {
3329 utrdlp[i].response_upiu_offset =
3330 cpu_to_le16((response_offset >> 2));
3331 utrdlp[i].prd_table_offset =
3332 cpu_to_le16((prdt_offset >> 2));
3333 utrdlp[i].response_upiu_length =
3334 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3335 }
3336
3337 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3338 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3339 (i * sizeof(struct utp_transfer_req_desc));
3340 hba->lrb[i].ucd_req_ptr =
3341 (struct utp_upiu_req *)(cmd_descp + i);
3342 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3343 hba->lrb[i].ucd_rsp_ptr =
3344 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3345 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3346 response_offset;
3347 hba->lrb[i].ucd_prdt_ptr =
3348 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3349 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3350 prdt_offset;
3351 }
3352}
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3366{
3367 struct uic_command uic_cmd = {0};
3368 int ret;
3369
3370 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3371
3372 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3373 if (ret)
3374 dev_dbg(hba->dev,
3375 "dme-link-startup: error code %d\n", ret);
3376 return ret;
3377}
3378
3379static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3380{
3381 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3382 unsigned long min_sleep_time_us;
3383
3384 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3385 return;
3386
3387
3388
3389
3390
3391 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3392 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3393 } else {
3394 unsigned long delta =
3395 (unsigned long) ktime_to_us(
3396 ktime_sub(ktime_get(),
3397 hba->last_dme_cmd_tstamp));
3398
3399 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3400 min_sleep_time_us =
3401 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3402 else
3403 return;
3404 }
3405
3406
3407 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3421 u8 attr_set, u32 mib_val, u8 peer)
3422{
3423 struct uic_command uic_cmd = {0};
3424 static const char *const action[] = {
3425 "dme-set",
3426 "dme-peer-set"
3427 };
3428 const char *set = action[!!peer];
3429 int ret;
3430 int retries = UFS_UIC_COMMAND_RETRIES;
3431
3432 uic_cmd.command = peer ?
3433 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3434 uic_cmd.argument1 = attr_sel;
3435 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3436 uic_cmd.argument3 = mib_val;
3437
3438 do {
3439
3440 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3441 if (ret)
3442 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3443 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3444 } while (ret && peer && --retries);
3445
3446 if (ret)
3447 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3448 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3449 UFS_UIC_COMMAND_RETRIES - retries);
3450
3451 return ret;
3452}
3453EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3465 u32 *mib_val, u8 peer)
3466{
3467 struct uic_command uic_cmd = {0};
3468 static const char *const action[] = {
3469 "dme-get",
3470 "dme-peer-get"
3471 };
3472 const char *get = action[!!peer];
3473 int ret;
3474 int retries = UFS_UIC_COMMAND_RETRIES;
3475 struct ufs_pa_layer_attr orig_pwr_info;
3476 struct ufs_pa_layer_attr temp_pwr_info;
3477 bool pwr_mode_change = false;
3478
3479 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3480 orig_pwr_info = hba->pwr_info;
3481 temp_pwr_info = orig_pwr_info;
3482
3483 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3484 orig_pwr_info.pwr_rx == FAST_MODE) {
3485 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3486 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3487 pwr_mode_change = true;
3488 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3489 orig_pwr_info.pwr_rx == SLOW_MODE) {
3490 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3491 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3492 pwr_mode_change = true;
3493 }
3494 if (pwr_mode_change) {
3495 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3496 if (ret)
3497 goto out;
3498 }
3499 }
3500
3501 uic_cmd.command = peer ?
3502 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3503 uic_cmd.argument1 = attr_sel;
3504
3505 do {
3506
3507 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3508 if (ret)
3509 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3510 get, UIC_GET_ATTR_ID(attr_sel), ret);
3511 } while (ret && peer && --retries);
3512
3513 if (ret)
3514 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3515 get, UIC_GET_ATTR_ID(attr_sel),
3516 UFS_UIC_COMMAND_RETRIES - retries);
3517
3518 if (mib_val && !ret)
3519 *mib_val = uic_cmd.argument3;
3520
3521 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3522 && pwr_mode_change)
3523 ufshcd_change_power_mode(hba, &orig_pwr_info);
3524out:
3525 return ret;
3526}
3527EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3546{
3547 struct completion uic_async_done;
3548 unsigned long flags;
3549 u8 status;
3550 int ret;
3551 bool reenable_intr = false;
3552
3553 mutex_lock(&hba->uic_cmd_mutex);
3554 init_completion(&uic_async_done);
3555 ufshcd_add_delay_before_dme_cmd(hba);
3556
3557 spin_lock_irqsave(hba->host->host_lock, flags);
3558 hba->uic_async_done = &uic_async_done;
3559 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3560 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3561
3562
3563
3564
3565 wmb();
3566 reenable_intr = true;
3567 }
3568 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3569 spin_unlock_irqrestore(hba->host->host_lock, flags);
3570 if (ret) {
3571 dev_err(hba->dev,
3572 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3573 cmd->command, cmd->argument3, ret);
3574 goto out;
3575 }
3576
3577 if (!wait_for_completion_timeout(hba->uic_async_done,
3578 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3579 dev_err(hba->dev,
3580 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3581 cmd->command, cmd->argument3);
3582 ret = -ETIMEDOUT;
3583 goto out;
3584 }
3585
3586 status = ufshcd_get_upmcrs(hba);
3587 if (status != PWR_LOCAL) {
3588 dev_err(hba->dev,
3589 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3590 cmd->command, status);
3591 ret = (status != PWR_OK) ? status : -1;
3592 }
3593out:
3594 if (ret) {
3595 ufshcd_print_host_state(hba);
3596 ufshcd_print_pwr_info(hba);
3597 ufshcd_print_host_regs(hba);
3598 }
3599
3600 spin_lock_irqsave(hba->host->host_lock, flags);
3601 hba->active_uic_cmd = NULL;
3602 hba->uic_async_done = NULL;
3603 if (reenable_intr)
3604 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3605 spin_unlock_irqrestore(hba->host->host_lock, flags);
3606 mutex_unlock(&hba->uic_cmd_mutex);
3607
3608 return ret;
3609}
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3620{
3621 struct uic_command uic_cmd = {0};
3622 int ret;
3623
3624 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3625 ret = ufshcd_dme_set(hba,
3626 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3627 if (ret) {
3628 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3629 __func__, ret);
3630 goto out;
3631 }
3632 }
3633
3634 uic_cmd.command = UIC_CMD_DME_SET;
3635 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3636 uic_cmd.argument3 = mode;
3637 ufshcd_hold(hba, false);
3638 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3639 ufshcd_release(hba);
3640
3641out:
3642 return ret;
3643}
3644
3645static int ufshcd_link_recovery(struct ufs_hba *hba)
3646{
3647 int ret;
3648 unsigned long flags;
3649
3650 spin_lock_irqsave(hba->host->host_lock, flags);
3651 hba->ufshcd_state = UFSHCD_STATE_RESET;
3652 ufshcd_set_eh_in_progress(hba);
3653 spin_unlock_irqrestore(hba->host->host_lock, flags);
3654
3655 ret = ufshcd_host_reset_and_restore(hba);
3656
3657 spin_lock_irqsave(hba->host->host_lock, flags);
3658 if (ret)
3659 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3660 ufshcd_clear_eh_in_progress(hba);
3661 spin_unlock_irqrestore(hba->host->host_lock, flags);
3662
3663 if (ret)
3664 dev_err(hba->dev, "%s: link recovery failed, err %d",
3665 __func__, ret);
3666
3667 return ret;
3668}
3669
3670static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3671{
3672 int ret;
3673 struct uic_command uic_cmd = {0};
3674 ktime_t start = ktime_get();
3675
3676 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3677
3678 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3679 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3680 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3681 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3682
3683 if (ret) {
3684 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3685 __func__, ret);
3686
3687
3688
3689
3690
3691 if (ufshcd_link_recovery(hba))
3692 ret = -ENOLINK;
3693 } else
3694 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3695 POST_CHANGE);
3696
3697 return ret;
3698}
3699
3700static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3701{
3702 int ret = 0, retries;
3703
3704 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3705 ret = __ufshcd_uic_hibern8_enter(hba);
3706 if (!ret || ret == -ENOLINK)
3707 goto out;
3708 }
3709out:
3710 return ret;
3711}
3712
3713static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3714{
3715 struct uic_command uic_cmd = {0};
3716 int ret;
3717 ktime_t start = ktime_get();
3718
3719 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3720
3721 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3722 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3723 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3724 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3725
3726 if (ret) {
3727 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3728 __func__, ret);
3729 ret = ufshcd_link_recovery(hba);
3730 } else {
3731 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3732 POST_CHANGE);
3733 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3734 hba->ufs_stats.hibern8_exit_cnt++;
3735 }
3736
3737 return ret;
3738}
3739
3740
3741
3742
3743
3744
3745static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3746{
3747 hba->pwr_info.gear_rx = UFS_PWM_G1;
3748 hba->pwr_info.gear_tx = UFS_PWM_G1;
3749 hba->pwr_info.lane_rx = 1;
3750 hba->pwr_info.lane_tx = 1;
3751 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3752 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3753 hba->pwr_info.hs_rate = 0;
3754}
3755
3756
3757
3758
3759
3760static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3761{
3762 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3763
3764 if (hba->max_pwr_info.is_valid)
3765 return 0;
3766
3767 pwr_info->pwr_tx = FAST_MODE;
3768 pwr_info->pwr_rx = FAST_MODE;
3769 pwr_info->hs_rate = PA_HS_MODE_B;
3770
3771
3772 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3773 &pwr_info->lane_rx);
3774 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3775 &pwr_info->lane_tx);
3776
3777 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3778 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3779 __func__,
3780 pwr_info->lane_rx,
3781 pwr_info->lane_tx);
3782 return -EINVAL;
3783 }
3784
3785
3786
3787
3788
3789
3790 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3791 if (!pwr_info->gear_rx) {
3792 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3793 &pwr_info->gear_rx);
3794 if (!pwr_info->gear_rx) {
3795 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3796 __func__, pwr_info->gear_rx);
3797 return -EINVAL;
3798 }
3799 pwr_info->pwr_rx = SLOW_MODE;
3800 }
3801
3802 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3803 &pwr_info->gear_tx);
3804 if (!pwr_info->gear_tx) {
3805 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3806 &pwr_info->gear_tx);
3807 if (!pwr_info->gear_tx) {
3808 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3809 __func__, pwr_info->gear_tx);
3810 return -EINVAL;
3811 }
3812 pwr_info->pwr_tx = SLOW_MODE;
3813 }
3814
3815 hba->max_pwr_info.is_valid = true;
3816 return 0;
3817}
3818
3819static int ufshcd_change_power_mode(struct ufs_hba *hba,
3820 struct ufs_pa_layer_attr *pwr_mode)
3821{
3822 int ret;
3823
3824
3825 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3826 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3827 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3828 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3829 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3830 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3831 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3832 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3833 return 0;
3834 }
3835
3836
3837
3838
3839
3840
3841
3842 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3843 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3844 pwr_mode->lane_rx);
3845 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3846 pwr_mode->pwr_rx == FAST_MODE)
3847 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
3848 else
3849 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
3850
3851 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3852 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3853 pwr_mode->lane_tx);
3854 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3855 pwr_mode->pwr_tx == FAST_MODE)
3856 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
3857 else
3858 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
3859
3860 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3861 pwr_mode->pwr_tx == FASTAUTO_MODE ||
3862 pwr_mode->pwr_rx == FAST_MODE ||
3863 pwr_mode->pwr_tx == FAST_MODE)
3864 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3865 pwr_mode->hs_rate);
3866
3867 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3868 | pwr_mode->pwr_tx);
3869
3870 if (ret) {
3871 dev_err(hba->dev,
3872 "%s: power mode change failed %d\n", __func__, ret);
3873 } else {
3874 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3875 pwr_mode);
3876
3877 memcpy(&hba->pwr_info, pwr_mode,
3878 sizeof(struct ufs_pa_layer_attr));
3879 }
3880
3881 return ret;
3882}
3883
3884
3885
3886
3887
3888
3889static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3890 struct ufs_pa_layer_attr *desired_pwr_mode)
3891{
3892 struct ufs_pa_layer_attr final_params = { 0 };
3893 int ret;
3894
3895 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3896 desired_pwr_mode, &final_params);
3897
3898 if (ret)
3899 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3900
3901 ret = ufshcd_change_power_mode(hba, &final_params);
3902 if (!ret)
3903 ufshcd_print_pwr_info(hba);
3904
3905 return ret;
3906}
3907
3908
3909
3910
3911
3912
3913
3914static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3915{
3916 int i;
3917 int err;
3918 bool flag_res = 1;
3919
3920 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3921 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
3922 if (err) {
3923 dev_err(hba->dev,
3924 "%s setting fDeviceInit flag failed with error %d\n",
3925 __func__, err);
3926 goto out;
3927 }
3928
3929
3930 for (i = 0; i < 1000 && !err && flag_res; i++)
3931 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3932 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3933
3934 if (err)
3935 dev_err(hba->dev,
3936 "%s reading fDeviceInit flag failed with error %d\n",
3937 __func__, err);
3938 else if (flag_res)
3939 dev_err(hba->dev,
3940 "%s fDeviceInit was not cleared by the device\n",
3941 __func__);
3942
3943out:
3944 return err;
3945}
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3960{
3961 int err = 0;
3962 u32 reg;
3963
3964
3965 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3966
3967
3968 if (ufshcd_is_intr_aggr_allowed(hba))
3969 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3970 else
3971 ufshcd_disable_intr_aggr(hba);
3972
3973
3974 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3975 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3976 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3977 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3978 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3979 REG_UTP_TASK_REQ_LIST_BASE_L);
3980 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3981 REG_UTP_TASK_REQ_LIST_BASE_H);
3982
3983
3984
3985
3986
3987 wmb();
3988
3989
3990
3991
3992 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
3993 if (!(ufshcd_get_lists_status(reg))) {
3994 ufshcd_enable_run_stop_reg(hba);
3995 } else {
3996 dev_err(hba->dev,
3997 "Host controller not ready to process requests");
3998 err = -EIO;
3999 goto out;
4000 }
4001
4002out:
4003 return err;
4004}
4005
4006
4007
4008
4009
4010
4011static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4012{
4013 int err;
4014
4015 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4016 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4017 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4018 10, 1, can_sleep);
4019 if (err)
4020 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4021}
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033static int ufshcd_hba_enable(struct ufs_hba *hba)
4034{
4035 int retry;
4036
4037
4038
4039
4040
4041
4042
4043 if (!ufshcd_is_hba_active(hba))
4044
4045 ufshcd_hba_stop(hba, true);
4046
4047
4048 ufshcd_set_link_off(hba);
4049
4050 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4051
4052
4053 ufshcd_hba_start(hba);
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065 msleep(1);
4066
4067
4068 retry = 10;
4069 while (ufshcd_is_hba_active(hba)) {
4070 if (retry) {
4071 retry--;
4072 } else {
4073 dev_err(hba->dev,
4074 "Controller enable failed\n");
4075 return -EIO;
4076 }
4077 msleep(5);
4078 }
4079
4080
4081 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4082
4083 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4084
4085 return 0;
4086}
4087
4088static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4089{
4090 int tx_lanes, i, err = 0;
4091
4092 if (!peer)
4093 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4094 &tx_lanes);
4095 else
4096 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4097 &tx_lanes);
4098 for (i = 0; i < tx_lanes; i++) {
4099 if (!peer)
4100 err = ufshcd_dme_set(hba,
4101 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4102 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4103 0);
4104 else
4105 err = ufshcd_dme_peer_set(hba,
4106 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4107 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4108 0);
4109 if (err) {
4110 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4111 __func__, peer, i, err);
4112 break;
4113 }
4114 }
4115
4116 return err;
4117}
4118
4119static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4120{
4121 return ufshcd_disable_tx_lcc(hba, true);
4122}
4123
4124
4125
4126
4127
4128
4129
4130static int ufshcd_link_startup(struct ufs_hba *hba)
4131{
4132 int ret;
4133 int retries = DME_LINKSTARTUP_RETRIES;
4134 bool link_startup_again = false;
4135
4136
4137
4138
4139
4140 if (!ufshcd_is_ufs_dev_active(hba))
4141 link_startup_again = true;
4142
4143link_startup:
4144 do {
4145 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4146
4147 ret = ufshcd_dme_link_startup(hba);
4148
4149
4150 if (!ret && !ufshcd_is_device_present(hba)) {
4151 dev_err(hba->dev, "%s: Device not present\n", __func__);
4152 ret = -ENXIO;
4153 goto out;
4154 }
4155
4156
4157
4158
4159
4160
4161 if (ret && ufshcd_hba_enable(hba))
4162 goto out;
4163 } while (ret && retries--);
4164
4165 if (ret)
4166
4167 goto out;
4168
4169 if (link_startup_again) {
4170 link_startup_again = false;
4171 retries = DME_LINKSTARTUP_RETRIES;
4172 goto link_startup;
4173 }
4174
4175
4176 ufshcd_init_pwr_info(hba);
4177 ufshcd_print_pwr_info(hba);
4178
4179 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4180 ret = ufshcd_disable_device_tx_lcc(hba);
4181 if (ret)
4182 goto out;
4183 }
4184
4185
4186 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4187 if (ret)
4188 goto out;
4189
4190 ret = ufshcd_make_hba_operational(hba);
4191out:
4192 if (ret) {
4193 dev_err(hba->dev, "link startup failed %d\n", ret);
4194 ufshcd_print_host_state(hba);
4195 ufshcd_print_pwr_info(hba);
4196 ufshcd_print_host_regs(hba);
4197 }
4198 return ret;
4199}
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4212{
4213 int err = 0;
4214 int retries;
4215
4216 ufshcd_hold(hba, false);
4217 mutex_lock(&hba->dev_cmd.lock);
4218 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4219 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4220 NOP_OUT_TIMEOUT);
4221
4222 if (!err || err == -ETIMEDOUT)
4223 break;
4224
4225 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4226 }
4227 mutex_unlock(&hba->dev_cmd.lock);
4228 ufshcd_release(hba);
4229
4230 if (err)
4231 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4232 return err;
4233}
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4245{
4246 int ret = 0;
4247 u8 lun_qdepth;
4248 struct ufs_hba *hba;
4249
4250 hba = shost_priv(sdev->host);
4251
4252 lun_qdepth = hba->nutrs;
4253 ret = ufshcd_read_unit_desc_param(hba,
4254 ufshcd_scsi_to_upiu_lun(sdev->lun),
4255 UNIT_DESC_PARAM_LU_Q_DEPTH,
4256 &lun_qdepth,
4257 sizeof(lun_qdepth));
4258
4259
4260 if (ret == -EOPNOTSUPP)
4261 lun_qdepth = 1;
4262 else if (!lun_qdepth)
4263
4264 lun_qdepth = hba->nutrs;
4265 else
4266 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4267
4268 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4269 __func__, lun_qdepth);
4270 scsi_change_queue_depth(sdev, lun_qdepth);
4271}
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4285 u8 lun,
4286 u8 *b_lu_write_protect)
4287{
4288 int ret;
4289
4290 if (!b_lu_write_protect)
4291 ret = -EINVAL;
4292
4293
4294
4295
4296
4297 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4298 ret = -ENOTSUPP;
4299 else
4300 ret = ufshcd_read_unit_desc_param(hba,
4301 lun,
4302 UNIT_DESC_PARAM_LU_WR_PROTECT,
4303 b_lu_write_protect,
4304 sizeof(*b_lu_write_protect));
4305 return ret;
4306}
4307
4308
4309
4310
4311
4312
4313
4314
4315static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4316 struct scsi_device *sdev)
4317{
4318 if (hba->dev_info.f_power_on_wp_en &&
4319 !hba->dev_info.is_lu_power_on_wp) {
4320 u8 b_lu_write_protect;
4321
4322 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4323 &b_lu_write_protect) &&
4324 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4325 hba->dev_info.is_lu_power_on_wp = true;
4326 }
4327}
4328
4329
4330
4331
4332
4333
4334
4335static int ufshcd_slave_alloc(struct scsi_device *sdev)
4336{
4337 struct ufs_hba *hba;
4338
4339 hba = shost_priv(sdev->host);
4340
4341
4342 sdev->use_10_for_ms = 1;
4343
4344
4345 sdev->allow_restart = 1;
4346
4347
4348 sdev->no_report_opcodes = 1;
4349
4350
4351 ufshcd_set_queue_depth(sdev);
4352
4353 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4354
4355 return 0;
4356}
4357
4358
4359
4360
4361
4362
4363
4364
4365static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4366{
4367 struct ufs_hba *hba = shost_priv(sdev->host);
4368
4369 if (depth > hba->nutrs)
4370 depth = hba->nutrs;
4371 return scsi_change_queue_depth(sdev, depth);
4372}
4373
4374
4375
4376
4377
4378static int ufshcd_slave_configure(struct scsi_device *sdev)
4379{
4380 struct request_queue *q = sdev->request_queue;
4381
4382 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4383 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4384
4385 return 0;
4386}
4387
4388
4389
4390
4391
4392static void ufshcd_slave_destroy(struct scsi_device *sdev)
4393{
4394 struct ufs_hba *hba;
4395
4396 hba = shost_priv(sdev->host);
4397
4398 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4399 unsigned long flags;
4400
4401 spin_lock_irqsave(hba->host->host_lock, flags);
4402 hba->sdev_ufs_device = NULL;
4403 spin_unlock_irqrestore(hba->host->host_lock, flags);
4404 }
4405}
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4416{
4417 struct utp_task_req_desc *task_req_descp;
4418 struct utp_upiu_task_rsp *task_rsp_upiup;
4419 unsigned long flags;
4420 int ocs_value;
4421 int task_result;
4422
4423 spin_lock_irqsave(hba->host->host_lock, flags);
4424
4425
4426 __clear_bit(index, &hba->outstanding_tasks);
4427
4428 task_req_descp = hba->utmrdl_base_addr;
4429 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4430
4431 if (ocs_value == OCS_SUCCESS) {
4432 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4433 task_req_descp[index].task_rsp_upiu;
4434 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4435 task_result = task_result & MASK_TM_SERVICE_RESP;
4436 if (resp)
4437 *resp = (u8)task_result;
4438 } else {
4439 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4440 __func__, ocs_value);
4441 }
4442 spin_unlock_irqrestore(hba->host->host_lock, flags);
4443
4444 return ocs_value;
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454static inline int
4455ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4456{
4457 int result = 0;
4458
4459 switch (scsi_status) {
4460 case SAM_STAT_CHECK_CONDITION:
4461 ufshcd_copy_sense_data(lrbp);
4462 case SAM_STAT_GOOD:
4463 result |= DID_OK << 16 |
4464 COMMAND_COMPLETE << 8 |
4465 scsi_status;
4466 break;
4467 case SAM_STAT_TASK_SET_FULL:
4468 case SAM_STAT_BUSY:
4469 case SAM_STAT_TASK_ABORTED:
4470 ufshcd_copy_sense_data(lrbp);
4471 result |= scsi_status;
4472 break;
4473 default:
4474 result |= DID_ERROR << 16;
4475 break;
4476 }
4477
4478 return result;
4479}
4480
4481
4482
4483
4484
4485
4486
4487
4488static inline int
4489ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4490{
4491 int result = 0;
4492 int scsi_status;
4493 int ocs;
4494
4495
4496 ocs = ufshcd_get_tr_ocs(lrbp);
4497
4498 switch (ocs) {
4499 case OCS_SUCCESS:
4500 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4501 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4502 switch (result) {
4503 case UPIU_TRANSACTION_RESPONSE:
4504
4505
4506
4507
4508 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4509
4510
4511
4512
4513
4514 scsi_status = result & MASK_SCSI_STATUS;
4515 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529 if (!hba->pm_op_in_progress &&
4530 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4531 schedule_work(&hba->eeh_work);
4532 break;
4533 case UPIU_TRANSACTION_REJECT_UPIU:
4534
4535 result = DID_ERROR << 16;
4536 dev_err(hba->dev,
4537 "Reject UPIU not fully implemented\n");
4538 break;
4539 default:
4540 result = DID_ERROR << 16;
4541 dev_err(hba->dev,
4542 "Unexpected request response code = %x\n",
4543 result);
4544 break;
4545 }
4546 break;
4547 case OCS_ABORTED:
4548 result |= DID_ABORT << 16;
4549 break;
4550 case OCS_INVALID_COMMAND_STATUS:
4551 result |= DID_REQUEUE << 16;
4552 break;
4553 case OCS_INVALID_CMD_TABLE_ATTR:
4554 case OCS_INVALID_PRDT_ATTR:
4555 case OCS_MISMATCH_DATA_BUF_SIZE:
4556 case OCS_MISMATCH_RESP_UPIU_SIZE:
4557 case OCS_PEER_COMM_FAILURE:
4558 case OCS_FATAL_ERROR:
4559 default:
4560 result |= DID_ERROR << 16;
4561 dev_err(hba->dev,
4562 "OCS error from controller = %x for tag %d\n",
4563 ocs, lrbp->task_tag);
4564 ufshcd_print_host_regs(hba);
4565 ufshcd_print_host_state(hba);
4566 break;
4567 }
4568
4569 if (host_byte(result) != DID_OK)
4570 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4571 return result;
4572}
4573
4574
4575
4576
4577
4578
4579static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4580{
4581 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4582 hba->active_uic_cmd->argument2 |=
4583 ufshcd_get_uic_cmd_result(hba);
4584 hba->active_uic_cmd->argument3 =
4585 ufshcd_get_dme_attr_val(hba);
4586 complete(&hba->active_uic_cmd->done);
4587 }
4588
4589 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4590 complete(hba->uic_async_done);
4591}
4592
4593
4594
4595
4596
4597
4598static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4599 unsigned long completed_reqs)
4600{
4601 struct ufshcd_lrb *lrbp;
4602 struct scsi_cmnd *cmd;
4603 int result;
4604 int index;
4605
4606 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4607 lrbp = &hba->lrb[index];
4608 cmd = lrbp->cmd;
4609 if (cmd) {
4610 ufshcd_add_command_trace(hba, index, "complete");
4611 result = ufshcd_transfer_rsp_status(hba, lrbp);
4612 scsi_dma_unmap(cmd);
4613 cmd->result = result;
4614
4615 lrbp->cmd = NULL;
4616 clear_bit_unlock(index, &hba->lrb_in_use);
4617
4618 cmd->scsi_done(cmd);
4619 __ufshcd_release(hba);
4620 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4621 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4622 if (hba->dev_cmd.complete) {
4623 ufshcd_add_command_trace(hba, index,
4624 "dev_complete");
4625 complete(hba->dev_cmd.complete);
4626 }
4627 }
4628 if (ufshcd_is_clkscaling_supported(hba))
4629 hba->clk_scaling.active_reqs--;
4630 }
4631
4632
4633 hba->outstanding_reqs ^= completed_reqs;
4634
4635 ufshcd_clk_scaling_update_busy(hba);
4636
4637
4638 wake_up(&hba->dev_cmd.tag_wq);
4639}
4640
4641
4642
4643
4644
4645static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4646{
4647 unsigned long completed_reqs;
4648 u32 tr_doorbell;
4649
4650
4651
4652
4653
4654
4655
4656
4657 if (ufshcd_is_intr_aggr_allowed(hba))
4658 ufshcd_reset_intr_aggr(hba);
4659
4660 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4661 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4662
4663 __ufshcd_transfer_req_compl(hba, completed_reqs);
4664}
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4677{
4678 int err = 0;
4679 u32 val;
4680
4681 if (!(hba->ee_ctrl_mask & mask))
4682 goto out;
4683
4684 val = hba->ee_ctrl_mask & ~mask;
4685 val &= MASK_EE_STATUS;
4686 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4687 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4688 if (!err)
4689 hba->ee_ctrl_mask &= ~mask;
4690out:
4691 return err;
4692}
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4705{
4706 int err = 0;
4707 u32 val;
4708
4709 if (hba->ee_ctrl_mask & mask)
4710 goto out;
4711
4712 val = hba->ee_ctrl_mask | mask;
4713 val &= MASK_EE_STATUS;
4714 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4715 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4716 if (!err)
4717 hba->ee_ctrl_mask |= mask;
4718out:
4719 return err;
4720}
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4734{
4735 int err = 0;
4736
4737 if (hba->auto_bkops_enabled)
4738 goto out;
4739
4740 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4741 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4742 if (err) {
4743 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4744 __func__, err);
4745 goto out;
4746 }
4747
4748 hba->auto_bkops_enabled = true;
4749 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4750
4751
4752 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4753 if (err)
4754 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4755 __func__, err);
4756out:
4757 return err;
4758}
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4773{
4774 int err = 0;
4775
4776 if (!hba->auto_bkops_enabled)
4777 goto out;
4778
4779
4780
4781
4782
4783 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4784 if (err) {
4785 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4786 __func__, err);
4787 goto out;
4788 }
4789
4790 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4791 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4792 if (err) {
4793 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4794 __func__, err);
4795 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4796 goto out;
4797 }
4798
4799 hba->auto_bkops_enabled = false;
4800 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
4801out:
4802 return err;
4803}
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
4815{
4816 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
4817 hba->auto_bkops_enabled = false;
4818 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4819 ufshcd_enable_auto_bkops(hba);
4820 } else {
4821 hba->auto_bkops_enabled = true;
4822 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4823 ufshcd_disable_auto_bkops(hba);
4824 }
4825}
4826
4827static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4828{
4829 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4830 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
4831}
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
4850 enum bkops_status status)
4851{
4852 int err;
4853 u32 curr_status = 0;
4854
4855 err = ufshcd_get_bkops_status(hba, &curr_status);
4856 if (err) {
4857 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4858 __func__, err);
4859 goto out;
4860 } else if (curr_status > BKOPS_STATUS_MAX) {
4861 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
4862 __func__, curr_status);
4863 err = -EINVAL;
4864 goto out;
4865 }
4866
4867 if (curr_status >= status)
4868 err = ufshcd_enable_auto_bkops(hba);
4869 else
4870 err = ufshcd_disable_auto_bkops(hba);
4871out:
4872 return err;
4873}
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885static int ufshcd_urgent_bkops(struct ufs_hba *hba)
4886{
4887 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
4888}
4889
4890static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
4891{
4892 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4893 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
4894}
4895
4896static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
4897{
4898 int err;
4899 u32 curr_status = 0;
4900
4901 if (hba->is_urgent_bkops_lvl_checked)
4902 goto enable_auto_bkops;
4903
4904 err = ufshcd_get_bkops_status(hba, &curr_status);
4905 if (err) {
4906 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4907 __func__, err);
4908 goto out;
4909 }
4910
4911
4912
4913
4914
4915
4916
4917 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4918 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4919 __func__, curr_status);
4920
4921 hba->urgent_bkops_lvl = curr_status;
4922 hba->is_urgent_bkops_lvl_checked = true;
4923 }
4924
4925enable_auto_bkops:
4926 err = ufshcd_enable_auto_bkops(hba);
4927out:
4928 if (err < 0)
4929 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4930 __func__, err);
4931}
4932
4933
4934
4935
4936
4937
4938
4939
4940static void ufshcd_exception_event_handler(struct work_struct *work)
4941{
4942 struct ufs_hba *hba;
4943 int err;
4944 u32 status = 0;
4945 hba = container_of(work, struct ufs_hba, eeh_work);
4946
4947 pm_runtime_get_sync(hba->dev);
4948 err = ufshcd_get_ee_status(hba, &status);
4949 if (err) {
4950 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4951 __func__, err);
4952 goto out;
4953 }
4954
4955 status &= hba->ee_ctrl_mask;
4956
4957 if (status & MASK_EE_URGENT_BKOPS)
4958 ufshcd_bkops_exception_event_handler(hba);
4959
4960out:
4961 pm_runtime_put_sync(hba->dev);
4962 return;
4963}
4964
4965
4966static void ufshcd_complete_requests(struct ufs_hba *hba)
4967{
4968 ufshcd_transfer_req_compl(hba);
4969 ufshcd_tmc_handler(hba);
4970}
4971
4972
4973
4974
4975
4976
4977
4978
4979static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
4980{
4981 unsigned long flags;
4982 bool err_handling = true;
4983
4984 spin_lock_irqsave(hba->host->host_lock, flags);
4985
4986
4987
4988
4989 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
4990 goto out;
4991
4992 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
4993 ((hba->saved_err & UIC_ERROR) &&
4994 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
4995 goto out;
4996
4997 if ((hba->saved_err & UIC_ERROR) &&
4998 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
4999 int err;
5000
5001
5002
5003 spin_unlock_irqrestore(hba->host->host_lock, flags);
5004 msleep(50);
5005 spin_lock_irqsave(hba->host->host_lock, flags);
5006
5007
5008
5009
5010
5011 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5012 ((hba->saved_err & UIC_ERROR) &&
5013 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5014 goto out;
5015
5016
5017
5018
5019
5020
5021
5022
5023 spin_unlock_irqrestore(hba->host->host_lock, flags);
5024 err = ufshcd_verify_dev_init(hba);
5025 spin_lock_irqsave(hba->host->host_lock, flags);
5026
5027 if (err)
5028 goto out;
5029
5030
5031 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5032 hba->saved_err &= ~UIC_ERROR;
5033
5034 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5035 if (!hba->saved_uic_err) {
5036 err_handling = false;
5037 goto out;
5038 }
5039 }
5040out:
5041 spin_unlock_irqrestore(hba->host->host_lock, flags);
5042 return err_handling;
5043}
5044
5045
5046
5047
5048
5049static void ufshcd_err_handler(struct work_struct *work)
5050{
5051 struct ufs_hba *hba;
5052 unsigned long flags;
5053 u32 err_xfer = 0;
5054 u32 err_tm = 0;
5055 int err = 0;
5056 int tag;
5057 bool needs_reset = false;
5058
5059 hba = container_of(work, struct ufs_hba, eh_work);
5060
5061 pm_runtime_get_sync(hba->dev);
5062 ufshcd_hold(hba, false);
5063
5064 spin_lock_irqsave(hba->host->host_lock, flags);
5065 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5066 goto out;
5067
5068 hba->ufshcd_state = UFSHCD_STATE_RESET;
5069 ufshcd_set_eh_in_progress(hba);
5070
5071
5072 ufshcd_complete_requests(hba);
5073
5074 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5075 bool ret;
5076
5077 spin_unlock_irqrestore(hba->host->host_lock, flags);
5078
5079 ret = ufshcd_quirk_dl_nac_errors(hba);
5080 spin_lock_irqsave(hba->host->host_lock, flags);
5081 if (!ret)
5082 goto skip_err_handling;
5083 }
5084 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5085 ((hba->saved_err & UIC_ERROR) &&
5086 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5087 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5088 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5089 needs_reset = true;
5090
5091
5092
5093
5094
5095
5096 if (needs_reset)
5097 goto skip_pending_xfer_clear;
5098
5099
5100 spin_unlock_irqrestore(hba->host->host_lock, flags);
5101
5102 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5103 if (ufshcd_clear_cmd(hba, tag)) {
5104 err_xfer = true;
5105 goto lock_skip_pending_xfer_clear;
5106 }
5107 }
5108
5109
5110 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5111 if (ufshcd_clear_tm_cmd(hba, tag)) {
5112 err_tm = true;
5113 goto lock_skip_pending_xfer_clear;
5114 }
5115 }
5116
5117lock_skip_pending_xfer_clear:
5118 spin_lock_irqsave(hba->host->host_lock, flags);
5119
5120
5121 ufshcd_complete_requests(hba);
5122
5123 if (err_xfer || err_tm)
5124 needs_reset = true;
5125
5126skip_pending_xfer_clear:
5127
5128 if (needs_reset) {
5129 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5130
5131
5132
5133
5134
5135
5136
5137
5138 if (hba->outstanding_reqs == max_doorbells)
5139 __ufshcd_transfer_req_compl(hba,
5140 (1UL << (hba->nutrs - 1)));
5141
5142 spin_unlock_irqrestore(hba->host->host_lock, flags);
5143 err = ufshcd_reset_and_restore(hba);
5144 spin_lock_irqsave(hba->host->host_lock, flags);
5145 if (err) {
5146 dev_err(hba->dev, "%s: reset and restore failed\n",
5147 __func__);
5148 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5149 }
5150
5151
5152
5153
5154 scsi_report_bus_reset(hba->host, 0);
5155 hba->saved_err = 0;
5156 hba->saved_uic_err = 0;
5157 }
5158
5159skip_err_handling:
5160 if (!needs_reset) {
5161 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5162 if (hba->saved_err || hba->saved_uic_err)
5163 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5164 __func__, hba->saved_err, hba->saved_uic_err);
5165 }
5166
5167 ufshcd_clear_eh_in_progress(hba);
5168
5169out:
5170 spin_unlock_irqrestore(hba->host->host_lock, flags);
5171 scsi_unblock_requests(hba->host);
5172 ufshcd_release(hba);
5173 pm_runtime_put_sync(hba->dev);
5174}
5175
5176static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5177 u32 reg)
5178{
5179 reg_hist->reg[reg_hist->pos] = reg;
5180 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5181 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5182}
5183
5184
5185
5186
5187
5188static void ufshcd_update_uic_error(struct ufs_hba *hba)
5189{
5190 u32 reg;
5191
5192
5193 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5194
5195 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5196 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5197
5198
5199
5200
5201 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5202 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5203 }
5204
5205
5206 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5207 if (reg)
5208 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5209
5210 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5211 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5212 else if (hba->dev_quirks &
5213 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5214 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5215 hba->uic_error |=
5216 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5217 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5218 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5219 }
5220
5221
5222 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5223 if (reg) {
5224 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5225 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5226 }
5227
5228 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5229 if (reg) {
5230 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5231 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5232 }
5233
5234 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5235 if (reg) {
5236 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5237 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5238 }
5239
5240 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5241 __func__, hba->uic_error);
5242}
5243
5244
5245
5246
5247
5248static void ufshcd_check_errors(struct ufs_hba *hba)
5249{
5250 bool queue_eh_work = false;
5251
5252 if (hba->errors & INT_FATAL_ERRORS)
5253 queue_eh_work = true;
5254
5255 if (hba->errors & UIC_ERROR) {
5256 hba->uic_error = 0;
5257 ufshcd_update_uic_error(hba);
5258 if (hba->uic_error)
5259 queue_eh_work = true;
5260 }
5261
5262 if (queue_eh_work) {
5263
5264
5265
5266
5267 hba->saved_err |= hba->errors;
5268 hba->saved_uic_err |= hba->uic_error;
5269
5270
5271 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5272
5273 scsi_block_requests(hba->host);
5274
5275 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5276
5277
5278 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5279 bool pr_prdt = !!(hba->saved_err &
5280 SYSTEM_BUS_FATAL_ERROR);
5281
5282 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5283 __func__, hba->saved_err,
5284 hba->saved_uic_err);
5285
5286 ufshcd_print_host_regs(hba);
5287 ufshcd_print_pwr_info(hba);
5288 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5289 ufshcd_print_trs(hba, hba->outstanding_reqs,
5290 pr_prdt);
5291 }
5292 schedule_work(&hba->eh_work);
5293 }
5294 }
5295
5296
5297
5298
5299
5300
5301}
5302
5303
5304
5305
5306
5307static void ufshcd_tmc_handler(struct ufs_hba *hba)
5308{
5309 u32 tm_doorbell;
5310
5311 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5312 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5313 wake_up(&hba->tm_wq);
5314}
5315
5316
5317
5318
5319
5320
5321static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5322{
5323 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5324 if (hba->errors)
5325 ufshcd_check_errors(hba);
5326
5327 if (intr_status & UFSHCD_UIC_MASK)
5328 ufshcd_uic_cmd_compl(hba, intr_status);
5329
5330 if (intr_status & UTP_TASK_REQ_COMPL)
5331 ufshcd_tmc_handler(hba);
5332
5333 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5334 ufshcd_transfer_req_compl(hba);
5335}
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345static irqreturn_t ufshcd_intr(int irq, void *__hba)
5346{
5347 u32 intr_status, enabled_intr_status;
5348 irqreturn_t retval = IRQ_NONE;
5349 struct ufs_hba *hba = __hba;
5350
5351 spin_lock(hba->host->host_lock);
5352 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5353 enabled_intr_status =
5354 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5355
5356 if (intr_status)
5357 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5358
5359 if (enabled_intr_status) {
5360 ufshcd_sl_intr(hba, enabled_intr_status);
5361 retval = IRQ_HANDLED;
5362 }
5363 spin_unlock(hba->host->host_lock);
5364 return retval;
5365}
5366
5367static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5368{
5369 int err = 0;
5370 u32 mask = 1 << tag;
5371 unsigned long flags;
5372
5373 if (!test_bit(tag, &hba->outstanding_tasks))
5374 goto out;
5375
5376 spin_lock_irqsave(hba->host->host_lock, flags);
5377 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
5378 spin_unlock_irqrestore(hba->host->host_lock, flags);
5379
5380
5381 err = ufshcd_wait_for_register(hba,
5382 REG_UTP_TASK_REQ_DOOR_BELL,
5383 mask, 0, 1000, 1000, true);
5384out:
5385 return err;
5386}
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5399 u8 tm_function, u8 *tm_response)
5400{
5401 struct utp_task_req_desc *task_req_descp;
5402 struct utp_upiu_task_req *task_req_upiup;
5403 struct Scsi_Host *host;
5404 unsigned long flags;
5405 int free_slot;
5406 int err;
5407 int task_tag;
5408
5409 host = hba->host;
5410
5411
5412
5413
5414
5415
5416 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5417 ufshcd_hold(hba, false);
5418
5419 spin_lock_irqsave(host->host_lock, flags);
5420 task_req_descp = hba->utmrdl_base_addr;
5421 task_req_descp += free_slot;
5422
5423
5424 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5425 task_req_descp->header.dword_2 =
5426 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5427
5428
5429 task_req_upiup =
5430 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5431 task_tag = hba->nutrs + free_slot;
5432 task_req_upiup->header.dword_0 =
5433 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5434 lun_id, task_tag);
5435 task_req_upiup->header.dword_1 =
5436 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5437
5438
5439
5440
5441 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5442 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5443
5444 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5445
5446
5447 __set_bit(free_slot, &hba->outstanding_tasks);
5448
5449
5450 wmb();
5451
5452 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5453
5454 wmb();
5455
5456 spin_unlock_irqrestore(host->host_lock, flags);
5457
5458
5459 err = wait_event_timeout(hba->tm_wq,
5460 test_bit(free_slot, &hba->tm_condition),
5461 msecs_to_jiffies(TM_CMD_TIMEOUT));
5462 if (!err) {
5463 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5464 __func__, tm_function);
5465 if (ufshcd_clear_tm_cmd(hba, free_slot))
5466 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5467 __func__, free_slot);
5468 err = -ETIMEDOUT;
5469 } else {
5470 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5471 }
5472
5473 clear_bit(free_slot, &hba->tm_condition);
5474 ufshcd_put_tm_slot(hba, free_slot);
5475 wake_up(&hba->tm_tag_wq);
5476
5477 ufshcd_release(hba);
5478 return err;
5479}
5480
5481
5482
5483
5484
5485
5486
5487
5488static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5489{
5490 struct Scsi_Host *host;
5491 struct ufs_hba *hba;
5492 unsigned int tag;
5493 u32 pos;
5494 int err;
5495 u8 resp = 0xF;
5496 struct ufshcd_lrb *lrbp;
5497 unsigned long flags;
5498
5499 host = cmd->device->host;
5500 hba = shost_priv(host);
5501 tag = cmd->request->tag;
5502
5503 lrbp = &hba->lrb[tag];
5504 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5505 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5506 if (!err)
5507 err = resp;
5508 goto out;
5509 }
5510
5511
5512 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5513 if (hba->lrb[pos].lun == lrbp->lun) {
5514 err = ufshcd_clear_cmd(hba, pos);
5515 if (err)
5516 break;
5517 }
5518 }
5519 spin_lock_irqsave(host->host_lock, flags);
5520 ufshcd_transfer_req_compl(hba);
5521 spin_unlock_irqrestore(host->host_lock, flags);
5522
5523out:
5524 hba->req_abort_count = 0;
5525 if (!err) {
5526 err = SUCCESS;
5527 } else {
5528 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5529 err = FAILED;
5530 }
5531 return err;
5532}
5533
5534static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5535{
5536 struct ufshcd_lrb *lrbp;
5537 int tag;
5538
5539 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5540 lrbp = &hba->lrb[tag];
5541 lrbp->req_abort_skip = true;
5542 }
5543}
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557static int ufshcd_abort(struct scsi_cmnd *cmd)
5558{
5559 struct Scsi_Host *host;
5560 struct ufs_hba *hba;
5561 unsigned long flags;
5562 unsigned int tag;
5563 int err = 0;
5564 int poll_cnt;
5565 u8 resp = 0xF;
5566 struct ufshcd_lrb *lrbp;
5567 u32 reg;
5568
5569 host = cmd->device->host;
5570 hba = shost_priv(host);
5571 tag = cmd->request->tag;
5572 lrbp = &hba->lrb[tag];
5573 if (!ufshcd_valid_tag(hba, tag)) {
5574 dev_err(hba->dev,
5575 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5576 __func__, tag, cmd, cmd->request);
5577 BUG();
5578 }
5579
5580
5581
5582
5583
5584
5585
5586
5587 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5588 return ufshcd_eh_host_reset_handler(cmd);
5589
5590 ufshcd_hold(hba, false);
5591 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5592
5593 if (!(test_bit(tag, &hba->outstanding_reqs))) {
5594 dev_err(hba->dev,
5595 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5596 __func__, tag, hba->outstanding_reqs, reg);
5597 goto out;
5598 }
5599
5600 if (!(reg & (1 << tag))) {
5601 dev_err(hba->dev,
5602 "%s: cmd was completed, but without a notifying intr, tag = %d",
5603 __func__, tag);
5604 }
5605
5606
5607 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5608
5609
5610
5611
5612
5613
5614
5615
5616 scsi_print_command(hba->lrb[tag].cmd);
5617 if (!hba->req_abort_count) {
5618 ufshcd_print_host_regs(hba);
5619 ufshcd_print_host_state(hba);
5620 ufshcd_print_pwr_info(hba);
5621 ufshcd_print_trs(hba, 1 << tag, true);
5622 } else {
5623 ufshcd_print_trs(hba, 1 << tag, false);
5624 }
5625 hba->req_abort_count++;
5626
5627
5628 if (lrbp->req_abort_skip) {
5629 err = -EIO;
5630 goto out;
5631 }
5632
5633 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5634 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5635 UFS_QUERY_TASK, &resp);
5636 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5637
5638 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5639 __func__, tag);
5640 break;
5641 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5642
5643
5644
5645
5646 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5647 __func__, tag);
5648 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5649 if (reg & (1 << tag)) {
5650
5651 usleep_range(100, 200);
5652 continue;
5653 }
5654
5655 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5656 __func__, tag);
5657 goto out;
5658 } else {
5659 dev_err(hba->dev,
5660 "%s: no response from device. tag = %d, err %d\n",
5661 __func__, tag, err);
5662 if (!err)
5663 err = resp;
5664 goto out;
5665 }
5666 }
5667
5668 if (!poll_cnt) {
5669 err = -EBUSY;
5670 goto out;
5671 }
5672
5673 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5674 UFS_ABORT_TASK, &resp);
5675 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5676 if (!err) {
5677 err = resp;
5678 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5679 __func__, tag, err);
5680 }
5681 goto out;
5682 }
5683
5684 err = ufshcd_clear_cmd(hba, tag);
5685 if (err) {
5686 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5687 __func__, tag, err);
5688 goto out;
5689 }
5690
5691 scsi_dma_unmap(cmd);
5692
5693 spin_lock_irqsave(host->host_lock, flags);
5694 ufshcd_outstanding_req_clear(hba, tag);
5695 hba->lrb[tag].cmd = NULL;
5696 spin_unlock_irqrestore(host->host_lock, flags);
5697
5698 clear_bit_unlock(tag, &hba->lrb_in_use);
5699 wake_up(&hba->dev_cmd.tag_wq);
5700
5701out:
5702 if (!err) {
5703 err = SUCCESS;
5704 } else {
5705 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5706 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
5707 err = FAILED;
5708 }
5709
5710
5711
5712
5713
5714 ufshcd_release(hba);
5715 return err;
5716}
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5729{
5730 int err;
5731 unsigned long flags;
5732
5733
5734 spin_lock_irqsave(hba->host->host_lock, flags);
5735 ufshcd_hba_stop(hba, false);
5736 spin_unlock_irqrestore(hba->host->host_lock, flags);
5737
5738
5739 ufshcd_scale_clks(hba, true);
5740
5741 err = ufshcd_hba_enable(hba);
5742 if (err)
5743 goto out;
5744
5745
5746 err = ufshcd_probe_hba(hba);
5747
5748 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
5749 err = -EIO;
5750out:
5751 if (err)
5752 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5753
5754 return err;
5755}
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5767{
5768 int err = 0;
5769 unsigned long flags;
5770 int retries = MAX_HOST_RESET_RETRIES;
5771
5772 do {
5773 err = ufshcd_host_reset_and_restore(hba);
5774 } while (err && --retries);
5775
5776
5777
5778
5779
5780 spin_lock_irqsave(hba->host->host_lock, flags);
5781 ufshcd_transfer_req_compl(hba);
5782 ufshcd_tmc_handler(hba);
5783 spin_unlock_irqrestore(hba->host->host_lock, flags);
5784
5785 return err;
5786}
5787
5788
5789
5790
5791
5792
5793
5794static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5795{
5796 int err;
5797 unsigned long flags;
5798 struct ufs_hba *hba;
5799
5800 hba = shost_priv(cmd->device->host);
5801
5802 ufshcd_hold(hba, false);
5803
5804
5805
5806
5807
5808
5809 do {
5810 spin_lock_irqsave(hba->host->host_lock, flags);
5811 if (!(work_pending(&hba->eh_work) ||
5812 hba->ufshcd_state == UFSHCD_STATE_RESET ||
5813 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
5814 break;
5815 spin_unlock_irqrestore(hba->host->host_lock, flags);
5816 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
5817 flush_work(&hba->eh_work);
5818 } while (1);
5819
5820 hba->ufshcd_state = UFSHCD_STATE_RESET;
5821 ufshcd_set_eh_in_progress(hba);
5822 spin_unlock_irqrestore(hba->host->host_lock, flags);
5823
5824 err = ufshcd_reset_and_restore(hba);
5825
5826 spin_lock_irqsave(hba->host->host_lock, flags);
5827 if (!err) {
5828 err = SUCCESS;
5829 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5830 } else {
5831 err = FAILED;
5832 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5833 }
5834 ufshcd_clear_eh_in_progress(hba);
5835 spin_unlock_irqrestore(hba->host->host_lock, flags);
5836
5837 ufshcd_release(hba);
5838 return err;
5839}
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
5850{
5851 int i;
5852 int curr_uA;
5853 u16 data;
5854 u16 unit;
5855
5856 for (i = start_scan; i >= 0; i--) {
5857 data = be16_to_cpup((__be16 *)&buff[2 * i]);
5858 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
5859 ATTR_ICC_LVL_UNIT_OFFSET;
5860 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
5861 switch (unit) {
5862 case UFSHCD_NANO_AMP:
5863 curr_uA = curr_uA / 1000;
5864 break;
5865 case UFSHCD_MILI_AMP:
5866 curr_uA = curr_uA * 1000;
5867 break;
5868 case UFSHCD_AMP:
5869 curr_uA = curr_uA * 1000 * 1000;
5870 break;
5871 case UFSHCD_MICRO_AMP:
5872 default:
5873 break;
5874 }
5875 if (sup_curr_uA >= curr_uA)
5876 break;
5877 }
5878 if (i < 0) {
5879 i = 0;
5880 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
5881 }
5882
5883 return (u32)i;
5884}
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894
5895static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
5896 u8 *desc_buf, int len)
5897{
5898 u32 icc_level = 0;
5899
5900 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
5901 !hba->vreg_info.vccq2) {
5902 dev_err(hba->dev,
5903 "%s: Regulator capability was not set, actvIccLevel=%d",
5904 __func__, icc_level);
5905 goto out;
5906 }
5907
5908 if (hba->vreg_info.vcc)
5909 icc_level = ufshcd_get_max_icc_level(
5910 hba->vreg_info.vcc->max_uA,
5911 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
5912 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
5913
5914 if (hba->vreg_info.vccq)
5915 icc_level = ufshcd_get_max_icc_level(
5916 hba->vreg_info.vccq->max_uA,
5917 icc_level,
5918 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
5919
5920 if (hba->vreg_info.vccq2)
5921 icc_level = ufshcd_get_max_icc_level(
5922 hba->vreg_info.vccq2->max_uA,
5923 icc_level,
5924 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
5925out:
5926 return icc_level;
5927}
5928
5929static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5930{
5931 int ret;
5932 int buff_len = hba->desc_size.pwr_desc;
5933 u8 desc_buf[hba->desc_size.pwr_desc];
5934
5935 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5936 if (ret) {
5937 dev_err(hba->dev,
5938 "%s: Failed reading power descriptor.len = %d ret = %d",
5939 __func__, buff_len, ret);
5940 return;
5941 }
5942
5943 hba->init_prefetch_data.icc_level =
5944 ufshcd_find_max_sup_active_icc_level(hba,
5945 desc_buf, buff_len);
5946 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
5947 __func__, hba->init_prefetch_data.icc_level);
5948
5949 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5950 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
5951 &hba->init_prefetch_data.icc_level);
5952
5953 if (ret)
5954 dev_err(hba->dev,
5955 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5956 __func__, hba->init_prefetch_data.icc_level , ret);
5957
5958}
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
5987{
5988 int ret = 0;
5989 struct scsi_device *sdev_rpmb;
5990 struct scsi_device *sdev_boot;
5991
5992 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
5993 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
5994 if (IS_ERR(hba->sdev_ufs_device)) {
5995 ret = PTR_ERR(hba->sdev_ufs_device);
5996 hba->sdev_ufs_device = NULL;
5997 goto out;
5998 }
5999 scsi_device_put(hba->sdev_ufs_device);
6000
6001 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6002 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6003 if (IS_ERR(sdev_boot)) {
6004 ret = PTR_ERR(sdev_boot);
6005 goto remove_sdev_ufs_device;
6006 }
6007 scsi_device_put(sdev_boot);
6008
6009 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6010 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6011 if (IS_ERR(sdev_rpmb)) {
6012 ret = PTR_ERR(sdev_rpmb);
6013 goto remove_sdev_boot;
6014 }
6015 scsi_device_put(sdev_rpmb);
6016 goto out;
6017
6018remove_sdev_boot:
6019 scsi_remove_device(sdev_boot);
6020remove_sdev_ufs_device:
6021 scsi_remove_device(hba->sdev_ufs_device);
6022out:
6023 return ret;
6024}
6025
6026static int ufs_get_device_desc(struct ufs_hba *hba,
6027 struct ufs_dev_desc *dev_desc)
6028{
6029 int err;
6030 u8 model_index;
6031 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6032 u8 desc_buf[hba->desc_size.dev_desc];
6033
6034 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6035 if (err) {
6036 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6037 __func__, err);
6038 goto out;
6039 }
6040
6041
6042
6043
6044
6045 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6046 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6047
6048 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6049
6050 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6051 QUERY_DESC_MAX_SIZE, ASCII_STD);
6052 if (err) {
6053 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6054 __func__, err);
6055 goto out;
6056 }
6057
6058 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6059 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6060 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6061 MAX_MODEL_LEN));
6062
6063
6064 dev_desc->model[MAX_MODEL_LEN] = '\0';
6065
6066out:
6067 return err;
6068}
6069
6070static void ufs_fixup_device_setup(struct ufs_hba *hba,
6071 struct ufs_dev_desc *dev_desc)
6072{
6073 struct ufs_dev_fix *f;
6074
6075 for (f = ufs_fixups; f->quirk; f++) {
6076 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6077 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6078 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6079 !strcmp(f->card.model, UFS_ANY_MODEL)))
6080 hba->dev_quirks |= f->quirk;
6081 }
6082}
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6096{
6097 int ret = 0;
6098 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6099
6100 ret = ufshcd_dme_peer_get(hba,
6101 UIC_ARG_MIB_SEL(
6102 RX_MIN_ACTIVATETIME_CAPABILITY,
6103 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6104 &peer_rx_min_activatetime);
6105 if (ret)
6106 goto out;
6107
6108
6109 tuned_pa_tactivate =
6110 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6111 / PA_TACTIVATE_TIME_UNIT_US);
6112 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6113 tuned_pa_tactivate);
6114
6115out:
6116 return ret;
6117}
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6131{
6132 int ret = 0;
6133 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6134 u32 max_hibern8_time, tuned_pa_hibern8time;
6135
6136 ret = ufshcd_dme_get(hba,
6137 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6138 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6139 &local_tx_hibern8_time_cap);
6140 if (ret)
6141 goto out;
6142
6143 ret = ufshcd_dme_peer_get(hba,
6144 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6145 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6146 &peer_rx_hibern8_time_cap);
6147 if (ret)
6148 goto out;
6149
6150 max_hibern8_time = max(local_tx_hibern8_time_cap,
6151 peer_rx_hibern8_time_cap);
6152
6153 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6154 / PA_HIBERN8_TIME_UNIT_US);
6155 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6156 tuned_pa_hibern8time);
6157out:
6158 return ret;
6159}
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6173{
6174 int ret = 0;
6175 u32 granularity, peer_granularity;
6176 u32 pa_tactivate, peer_pa_tactivate;
6177 u32 pa_tactivate_us, peer_pa_tactivate_us;
6178 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6179
6180 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6181 &granularity);
6182 if (ret)
6183 goto out;
6184
6185 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6186 &peer_granularity);
6187 if (ret)
6188 goto out;
6189
6190 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6191 (granularity > PA_GRANULARITY_MAX_VAL)) {
6192 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6193 __func__, granularity);
6194 return -EINVAL;
6195 }
6196
6197 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6198 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6199 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6200 __func__, peer_granularity);
6201 return -EINVAL;
6202 }
6203
6204 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6205 if (ret)
6206 goto out;
6207
6208 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6209 &peer_pa_tactivate);
6210 if (ret)
6211 goto out;
6212
6213 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6214 peer_pa_tactivate_us = peer_pa_tactivate *
6215 gran_to_us_table[peer_granularity - 1];
6216
6217 if (pa_tactivate_us > peer_pa_tactivate_us) {
6218 u32 new_peer_pa_tactivate;
6219
6220 new_peer_pa_tactivate = pa_tactivate_us /
6221 gran_to_us_table[peer_granularity - 1];
6222 new_peer_pa_tactivate++;
6223 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6224 new_peer_pa_tactivate);
6225 }
6226
6227out:
6228 return ret;
6229}
6230
6231static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6232{
6233 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6234 ufshcd_tune_pa_tactivate(hba);
6235 ufshcd_tune_pa_hibern8time(hba);
6236 }
6237
6238 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6239
6240 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6241
6242 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6243 ufshcd_quirk_tune_host_pa_tactivate(hba);
6244
6245 ufshcd_vops_apply_dev_quirks(hba);
6246}
6247
6248static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6249{
6250 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6251
6252 hba->ufs_stats.hibern8_exit_cnt = 0;
6253 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6254
6255 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6256 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6257 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6258 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6259 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6260
6261 hba->req_abort_count = 0;
6262}
6263
6264static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6265{
6266 int err;
6267
6268 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6269 &hba->desc_size.dev_desc);
6270 if (err)
6271 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6272
6273 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6274 &hba->desc_size.pwr_desc);
6275 if (err)
6276 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6277
6278 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6279 &hba->desc_size.interc_desc);
6280 if (err)
6281 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6282
6283 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6284 &hba->desc_size.conf_desc);
6285 if (err)
6286 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6287
6288 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6289 &hba->desc_size.unit_desc);
6290 if (err)
6291 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6292
6293 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6294 &hba->desc_size.geom_desc);
6295 if (err)
6296 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6297}
6298
6299static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6300{
6301 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6302 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6303 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6304 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6305 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6306 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6307}
6308
6309
6310
6311
6312
6313
6314
6315static int ufshcd_probe_hba(struct ufs_hba *hba)
6316{
6317 struct ufs_dev_desc card = {0};
6318 int ret;
6319 ktime_t start = ktime_get();
6320
6321 ret = ufshcd_link_startup(hba);
6322 if (ret)
6323 goto out;
6324
6325
6326 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6327 hba->is_urgent_bkops_lvl_checked = false;
6328
6329
6330 ufshcd_clear_dbg_ufs_stats(hba);
6331
6332
6333 ufshcd_set_link_active(hba);
6334
6335 ret = ufshcd_verify_dev_init(hba);
6336 if (ret)
6337 goto out;
6338
6339 ret = ufshcd_complete_dev_init(hba);
6340 if (ret)
6341 goto out;
6342
6343
6344 ufshcd_init_desc_sizes(hba);
6345
6346 ret = ufs_get_device_desc(hba, &card);
6347 if (ret) {
6348 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6349 __func__, ret);
6350 goto out;
6351 }
6352
6353 ufs_fixup_device_setup(hba, &card);
6354 ufshcd_tune_unipro_params(hba);
6355
6356 ret = ufshcd_set_vccq_rail_unused(hba,
6357 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6358 if (ret)
6359 goto out;
6360
6361
6362 ufshcd_set_ufs_dev_active(hba);
6363 ufshcd_force_reset_auto_bkops(hba);
6364 hba->wlun_dev_clr_ua = true;
6365
6366 if (ufshcd_get_max_pwr_mode(hba)) {
6367 dev_err(hba->dev,
6368 "%s: Failed getting max supported power mode\n",
6369 __func__);
6370 } else {
6371 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6372 if (ret) {
6373 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6374 __func__, ret);
6375 goto out;
6376 }
6377 }
6378
6379
6380 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6381
6382
6383
6384
6385
6386 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6387 bool flag;
6388
6389
6390 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6391 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6392 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6393 hba->dev_info.f_power_on_wp_en = flag;
6394
6395 if (!hba->is_init_prefetch)
6396 ufshcd_init_icc_levels(hba);
6397
6398
6399 if (ufshcd_scsi_add_wlus(hba))
6400 goto out;
6401
6402
6403 if (ufshcd_is_clkscaling_supported(hba)) {
6404 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6405 &hba->pwr_info,
6406 sizeof(struct ufs_pa_layer_attr));
6407 hba->clk_scaling.saved_pwr_info.is_valid = true;
6408 if (!hba->devfreq) {
6409 hba->devfreq = devm_devfreq_add_device(hba->dev,
6410 &ufs_devfreq_profile,
6411 "simple_ondemand",
6412 NULL);
6413 if (IS_ERR(hba->devfreq)) {
6414 ret = PTR_ERR(hba->devfreq);
6415 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6416 ret);
6417 goto out;
6418 }
6419 }
6420 hba->clk_scaling.is_allowed = true;
6421 }
6422
6423 scsi_scan_host(hba->host);
6424 pm_runtime_put_sync(hba->dev);
6425 }
6426
6427 if (!hba->is_init_prefetch)
6428 hba->is_init_prefetch = true;
6429
6430out:
6431
6432
6433
6434
6435 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6436 pm_runtime_put_sync(hba->dev);
6437 ufshcd_hba_exit(hba);
6438 }
6439
6440 trace_ufshcd_init(dev_name(hba->dev), ret,
6441 ktime_to_us(ktime_sub(ktime_get(), start)),
6442 hba->curr_dev_pwr_mode, hba->uic_link_state);
6443 return ret;
6444}
6445
6446
6447
6448
6449
6450
6451static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6452{
6453 struct ufs_hba *hba = (struct ufs_hba *)data;
6454
6455 ufshcd_probe_hba(hba);
6456}
6457
6458static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6459{
6460 unsigned long flags;
6461 struct Scsi_Host *host;
6462 struct ufs_hba *hba;
6463 int index;
6464 bool found = false;
6465
6466 if (!scmd || !scmd->device || !scmd->device->host)
6467 return BLK_EH_NOT_HANDLED;
6468
6469 host = scmd->device->host;
6470 hba = shost_priv(host);
6471 if (!hba)
6472 return BLK_EH_NOT_HANDLED;
6473
6474 spin_lock_irqsave(host->host_lock, flags);
6475
6476 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6477 if (hba->lrb[index].cmd == scmd) {
6478 found = true;
6479 break;
6480 }
6481 }
6482
6483 spin_unlock_irqrestore(host->host_lock, flags);
6484
6485
6486
6487
6488
6489
6490 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
6491}
6492
6493static struct scsi_host_template ufshcd_driver_template = {
6494 .module = THIS_MODULE,
6495 .name = UFSHCD,
6496 .proc_name = UFSHCD,
6497 .queuecommand = ufshcd_queuecommand,
6498 .slave_alloc = ufshcd_slave_alloc,
6499 .slave_configure = ufshcd_slave_configure,
6500 .slave_destroy = ufshcd_slave_destroy,
6501 .change_queue_depth = ufshcd_change_queue_depth,
6502 .eh_abort_handler = ufshcd_abort,
6503 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6504 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6505 .eh_timed_out = ufshcd_eh_timed_out,
6506 .this_id = -1,
6507 .sg_tablesize = SG_ALL,
6508 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
6509 .can_queue = UFSHCD_CAN_QUEUE,
6510 .max_host_blocked = 1,
6511 .track_queue_depth = 1,
6512};
6513
6514static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6515 int ua)
6516{
6517 int ret;
6518
6519 if (!vreg)
6520 return 0;
6521
6522 ret = regulator_set_load(vreg->reg, ua);
6523 if (ret < 0) {
6524 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6525 __func__, vreg->name, ua, ret);
6526 }
6527
6528 return ret;
6529}
6530
6531static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6532 struct ufs_vreg *vreg)
6533{
6534 if (!vreg)
6535 return 0;
6536 else if (vreg->unused)
6537 return 0;
6538 else
6539 return ufshcd_config_vreg_load(hba->dev, vreg,
6540 UFS_VREG_LPM_LOAD_UA);
6541}
6542
6543static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6544 struct ufs_vreg *vreg)
6545{
6546 if (!vreg)
6547 return 0;
6548 else if (vreg->unused)
6549 return 0;
6550 else
6551 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6552}
6553
6554static int ufshcd_config_vreg(struct device *dev,
6555 struct ufs_vreg *vreg, bool on)
6556{
6557 int ret = 0;
6558 struct regulator *reg = vreg->reg;
6559 const char *name = vreg->name;
6560 int min_uV, uA_load;
6561
6562 BUG_ON(!vreg);
6563
6564 if (regulator_count_voltages(reg) > 0) {
6565 min_uV = on ? vreg->min_uV : 0;
6566 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6567 if (ret) {
6568 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6569 __func__, name, ret);
6570 goto out;
6571 }
6572
6573 uA_load = on ? vreg->max_uA : 0;
6574 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6575 if (ret)
6576 goto out;
6577 }
6578out:
6579 return ret;
6580}
6581
6582static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6583{
6584 int ret = 0;
6585
6586 if (!vreg)
6587 goto out;
6588 else if (vreg->enabled || vreg->unused)
6589 goto out;
6590
6591 ret = ufshcd_config_vreg(dev, vreg, true);
6592 if (!ret)
6593 ret = regulator_enable(vreg->reg);
6594
6595 if (!ret)
6596 vreg->enabled = true;
6597 else
6598 dev_err(dev, "%s: %s enable failed, err=%d\n",
6599 __func__, vreg->name, ret);
6600out:
6601 return ret;
6602}
6603
6604static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6605{
6606 int ret = 0;
6607
6608 if (!vreg)
6609 goto out;
6610 else if (!vreg->enabled || vreg->unused)
6611 goto out;
6612
6613 ret = regulator_disable(vreg->reg);
6614
6615 if (!ret) {
6616
6617 ufshcd_config_vreg(dev, vreg, false);
6618 vreg->enabled = false;
6619 } else {
6620 dev_err(dev, "%s: %s disable failed, err=%d\n",
6621 __func__, vreg->name, ret);
6622 }
6623out:
6624 return ret;
6625}
6626
6627static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6628{
6629 int ret = 0;
6630 struct device *dev = hba->dev;
6631 struct ufs_vreg_info *info = &hba->vreg_info;
6632
6633 if (!info)
6634 goto out;
6635
6636 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6637 if (ret)
6638 goto out;
6639
6640 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6641 if (ret)
6642 goto out;
6643
6644 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6645 if (ret)
6646 goto out;
6647
6648out:
6649 if (ret) {
6650 ufshcd_toggle_vreg(dev, info->vccq2, false);
6651 ufshcd_toggle_vreg(dev, info->vccq, false);
6652 ufshcd_toggle_vreg(dev, info->vcc, false);
6653 }
6654 return ret;
6655}
6656
6657static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6658{
6659 struct ufs_vreg_info *info = &hba->vreg_info;
6660
6661 if (info)
6662 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6663
6664 return 0;
6665}
6666
6667static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6668{
6669 int ret = 0;
6670
6671 if (!vreg)
6672 goto out;
6673
6674 vreg->reg = devm_regulator_get(dev, vreg->name);
6675 if (IS_ERR(vreg->reg)) {
6676 ret = PTR_ERR(vreg->reg);
6677 dev_err(dev, "%s: %s get failed, err=%d\n",
6678 __func__, vreg->name, ret);
6679 }
6680out:
6681 return ret;
6682}
6683
6684static int ufshcd_init_vreg(struct ufs_hba *hba)
6685{
6686 int ret = 0;
6687 struct device *dev = hba->dev;
6688 struct ufs_vreg_info *info = &hba->vreg_info;
6689
6690 if (!info)
6691 goto out;
6692
6693 ret = ufshcd_get_vreg(dev, info->vcc);
6694 if (ret)
6695 goto out;
6696
6697 ret = ufshcd_get_vreg(dev, info->vccq);
6698 if (ret)
6699 goto out;
6700
6701 ret = ufshcd_get_vreg(dev, info->vccq2);
6702out:
6703 return ret;
6704}
6705
6706static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6707{
6708 struct ufs_vreg_info *info = &hba->vreg_info;
6709
6710 if (info)
6711 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6712
6713 return 0;
6714}
6715
6716static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6717{
6718 int ret = 0;
6719 struct ufs_vreg_info *info = &hba->vreg_info;
6720
6721 if (!info)
6722 goto out;
6723 else if (!info->vccq)
6724 goto out;
6725
6726 if (unused) {
6727
6728 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
6729
6730
6731
6732
6733 if (!ret)
6734 info->vccq->unused = true;
6735 } else {
6736
6737
6738
6739
6740 info->vccq->unused = false;
6741 }
6742out:
6743 return ret;
6744}
6745
6746static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6747 bool skip_ref_clk)
6748{
6749 int ret = 0;
6750 struct ufs_clk_info *clki;
6751 struct list_head *head = &hba->clk_list_head;
6752 unsigned long flags;
6753 ktime_t start = ktime_get();
6754 bool clk_state_changed = false;
6755
6756 if (list_empty(head))
6757 goto out;
6758
6759 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6760 if (ret)
6761 return ret;
6762
6763 list_for_each_entry(clki, head, list) {
6764 if (!IS_ERR_OR_NULL(clki->clk)) {
6765 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
6766 continue;
6767
6768 clk_state_changed = on ^ clki->enabled;
6769 if (on && !clki->enabled) {
6770 ret = clk_prepare_enable(clki->clk);
6771 if (ret) {
6772 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
6773 __func__, clki->name, ret);
6774 goto out;
6775 }
6776 } else if (!on && clki->enabled) {
6777 clk_disable_unprepare(clki->clk);
6778 }
6779 clki->enabled = on;
6780 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
6781 clki->name, on ? "en" : "dis");
6782 }
6783 }
6784
6785 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
6786 if (ret)
6787 return ret;
6788
6789out:
6790 if (ret) {
6791 list_for_each_entry(clki, head, list) {
6792 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
6793 clk_disable_unprepare(clki->clk);
6794 }
6795 } else if (!ret && on) {
6796 spin_lock_irqsave(hba->host->host_lock, flags);
6797 hba->clk_gating.state = CLKS_ON;
6798 trace_ufshcd_clk_gating(dev_name(hba->dev),
6799 hba->clk_gating.state);
6800 spin_unlock_irqrestore(hba->host->host_lock, flags);
6801 }
6802
6803 if (clk_state_changed)
6804 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
6805 (on ? "on" : "off"),
6806 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
6807 return ret;
6808}
6809
6810static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
6811{
6812 return __ufshcd_setup_clocks(hba, on, false);
6813}
6814
6815static int ufshcd_init_clocks(struct ufs_hba *hba)
6816{
6817 int ret = 0;
6818 struct ufs_clk_info *clki;
6819 struct device *dev = hba->dev;
6820 struct list_head *head = &hba->clk_list_head;
6821
6822 if (list_empty(head))
6823 goto out;
6824
6825 list_for_each_entry(clki, head, list) {
6826 if (!clki->name)
6827 continue;
6828
6829 clki->clk = devm_clk_get(dev, clki->name);
6830 if (IS_ERR(clki->clk)) {
6831 ret = PTR_ERR(clki->clk);
6832 dev_err(dev, "%s: %s clk get failed, %d\n",
6833 __func__, clki->name, ret);
6834 goto out;
6835 }
6836
6837 if (clki->max_freq) {
6838 ret = clk_set_rate(clki->clk, clki->max_freq);
6839 if (ret) {
6840 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6841 __func__, clki->name,
6842 clki->max_freq, ret);
6843 goto out;
6844 }
6845 clki->curr_freq = clki->max_freq;
6846 }
6847 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
6848 clki->name, clk_get_rate(clki->clk));
6849 }
6850out:
6851 return ret;
6852}
6853
6854static int ufshcd_variant_hba_init(struct ufs_hba *hba)
6855{
6856 int err = 0;
6857
6858 if (!hba->vops)
6859 goto out;
6860
6861 err = ufshcd_vops_init(hba);
6862 if (err)
6863 goto out;
6864
6865 err = ufshcd_vops_setup_regulators(hba, true);
6866 if (err)
6867 goto out_exit;
6868
6869 goto out;
6870
6871out_exit:
6872 ufshcd_vops_exit(hba);
6873out:
6874 if (err)
6875 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
6876 __func__, ufshcd_get_var_name(hba), err);
6877 return err;
6878}
6879
6880static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
6881{
6882 if (!hba->vops)
6883 return;
6884
6885 ufshcd_vops_setup_regulators(hba, false);
6886
6887 ufshcd_vops_exit(hba);
6888}
6889
6890static int ufshcd_hba_init(struct ufs_hba *hba)
6891{
6892 int err;
6893
6894
6895
6896
6897
6898
6899
6900
6901 err = ufshcd_init_hba_vreg(hba);
6902 if (err)
6903 goto out;
6904
6905 err = ufshcd_setup_hba_vreg(hba, true);
6906 if (err)
6907 goto out;
6908
6909 err = ufshcd_init_clocks(hba);
6910 if (err)
6911 goto out_disable_hba_vreg;
6912
6913 err = ufshcd_setup_clocks(hba, true);
6914 if (err)
6915 goto out_disable_hba_vreg;
6916
6917 err = ufshcd_init_vreg(hba);
6918 if (err)
6919 goto out_disable_clks;
6920
6921 err = ufshcd_setup_vreg(hba, true);
6922 if (err)
6923 goto out_disable_clks;
6924
6925 err = ufshcd_variant_hba_init(hba);
6926 if (err)
6927 goto out_disable_vreg;
6928
6929 hba->is_powered = true;
6930 goto out;
6931
6932out_disable_vreg:
6933 ufshcd_setup_vreg(hba, false);
6934out_disable_clks:
6935 ufshcd_setup_clocks(hba, false);
6936out_disable_hba_vreg:
6937 ufshcd_setup_hba_vreg(hba, false);
6938out:
6939 return err;
6940}
6941
6942static void ufshcd_hba_exit(struct ufs_hba *hba)
6943{
6944 if (hba->is_powered) {
6945 ufshcd_variant_hba_exit(hba);
6946 ufshcd_setup_vreg(hba, false);
6947 ufshcd_suspend_clkscaling(hba);
6948 if (ufshcd_is_clkscaling_supported(hba)) {
6949 if (hba->devfreq)
6950 ufshcd_suspend_clkscaling(hba);
6951 destroy_workqueue(hba->clk_scaling.workq);
6952 }
6953 ufshcd_setup_clocks(hba, false);
6954 ufshcd_setup_hba_vreg(hba, false);
6955 hba->is_powered = false;
6956 }
6957}
6958
6959static int
6960ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
6961{
6962 unsigned char cmd[6] = {REQUEST_SENSE,
6963 0,
6964 0,
6965 0,
6966 UFSHCD_REQ_SENSE_SIZE,
6967 0};
6968 char *buffer;
6969 int ret;
6970
6971 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
6972 if (!buffer) {
6973 ret = -ENOMEM;
6974 goto out;
6975 }
6976
6977 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
6978 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
6979 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
6980 if (ret)
6981 pr_err("%s: failed with err %d\n", __func__, ret);
6982
6983 kfree(buffer);
6984out:
6985 return ret;
6986}
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
6998 enum ufs_dev_pwr_mode pwr_mode)
6999{
7000 unsigned char cmd[6] = { START_STOP };
7001 struct scsi_sense_hdr sshdr;
7002 struct scsi_device *sdp;
7003 unsigned long flags;
7004 int ret;
7005
7006 spin_lock_irqsave(hba->host->host_lock, flags);
7007 sdp = hba->sdev_ufs_device;
7008 if (sdp) {
7009 ret = scsi_device_get(sdp);
7010 if (!ret && !scsi_device_online(sdp)) {
7011 ret = -ENODEV;
7012 scsi_device_put(sdp);
7013 }
7014 } else {
7015 ret = -ENODEV;
7016 }
7017 spin_unlock_irqrestore(hba->host->host_lock, flags);
7018
7019 if (ret)
7020 return ret;
7021
7022
7023
7024
7025
7026
7027
7028 hba->host->eh_noresume = 1;
7029 if (hba->wlun_dev_clr_ua) {
7030 ret = ufshcd_send_request_sense(hba, sdp);
7031 if (ret)
7032 goto out;
7033
7034 hba->wlun_dev_clr_ua = false;
7035 }
7036
7037 cmd[4] = pwr_mode << 4;
7038
7039
7040
7041
7042
7043
7044 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7045 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7046 if (ret) {
7047 sdev_printk(KERN_WARNING, sdp,
7048 "START_STOP failed for power mode: %d, result %x\n",
7049 pwr_mode, ret);
7050 if (driver_byte(ret) & DRIVER_SENSE)
7051 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7052 }
7053
7054 if (!ret)
7055 hba->curr_dev_pwr_mode = pwr_mode;
7056out:
7057 scsi_device_put(sdp);
7058 hba->host->eh_noresume = 0;
7059 return ret;
7060}
7061
7062static int ufshcd_link_state_transition(struct ufs_hba *hba,
7063 enum uic_link_state req_link_state,
7064 int check_for_bkops)
7065{
7066 int ret = 0;
7067
7068 if (req_link_state == hba->uic_link_state)
7069 return 0;
7070
7071 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7072 ret = ufshcd_uic_hibern8_enter(hba);
7073 if (!ret)
7074 ufshcd_set_link_hibern8(hba);
7075 else
7076 goto out;
7077 }
7078
7079
7080
7081
7082 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7083 (!check_for_bkops || (check_for_bkops &&
7084 !hba->auto_bkops_enabled))) {
7085
7086
7087
7088
7089
7090
7091
7092 ret = ufshcd_uic_hibern8_enter(hba);
7093 if (ret)
7094 goto out;
7095
7096
7097
7098
7099 ufshcd_hba_stop(hba, true);
7100
7101
7102
7103
7104 ufshcd_set_link_off(hba);
7105 }
7106
7107out:
7108 return ret;
7109}
7110
7111static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7112{
7113
7114
7115
7116
7117
7118
7119 if (!ufshcd_is_link_active(hba) &&
7120 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7121 usleep_range(2000, 2100);
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7136 !hba->dev_info.is_lu_power_on_wp) {
7137 ufshcd_setup_vreg(hba, false);
7138 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7139 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7140 if (!ufshcd_is_link_active(hba)) {
7141 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7142 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7143 }
7144 }
7145}
7146
7147static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7148{
7149 int ret = 0;
7150
7151 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7152 !hba->dev_info.is_lu_power_on_wp) {
7153 ret = ufshcd_setup_vreg(hba, true);
7154 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7155 if (!ret && !ufshcd_is_link_active(hba)) {
7156 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7157 if (ret)
7158 goto vcc_disable;
7159 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7160 if (ret)
7161 goto vccq_lpm;
7162 }
7163 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7164 }
7165 goto out;
7166
7167vccq_lpm:
7168 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7169vcc_disable:
7170 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7171out:
7172 return ret;
7173}
7174
7175static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7176{
7177 if (ufshcd_is_link_off(hba))
7178 ufshcd_setup_hba_vreg(hba, false);
7179}
7180
7181static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7182{
7183 if (ufshcd_is_link_off(hba))
7184 ufshcd_setup_hba_vreg(hba, true);
7185}
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7204{
7205 int ret = 0;
7206 enum ufs_pm_level pm_lvl;
7207 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7208 enum uic_link_state req_link_state;
7209
7210 hba->pm_op_in_progress = 1;
7211 if (!ufshcd_is_shutdown_pm(pm_op)) {
7212 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7213 hba->rpm_lvl : hba->spm_lvl;
7214 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7215 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7216 } else {
7217 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7218 req_link_state = UIC_LINK_OFF_STATE;
7219 }
7220
7221
7222
7223
7224
7225 ufshcd_hold(hba, false);
7226 hba->clk_gating.is_suspended = true;
7227
7228 if (hba->clk_scaling.is_allowed) {
7229 cancel_work_sync(&hba->clk_scaling.suspend_work);
7230 cancel_work_sync(&hba->clk_scaling.resume_work);
7231 ufshcd_suspend_clkscaling(hba);
7232 }
7233
7234 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7235 req_link_state == UIC_LINK_ACTIVE_STATE) {
7236 goto disable_clks;
7237 }
7238
7239 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7240 (req_link_state == hba->uic_link_state))
7241 goto enable_gating;
7242
7243
7244 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7245 ret = -EINVAL;
7246 goto enable_gating;
7247 }
7248
7249 if (ufshcd_is_runtime_pm(pm_op)) {
7250 if (ufshcd_can_autobkops_during_suspend(hba)) {
7251
7252
7253
7254
7255
7256 ret = ufshcd_urgent_bkops(hba);
7257 if (ret)
7258 goto enable_gating;
7259 } else {
7260
7261 ufshcd_disable_auto_bkops(hba);
7262 }
7263 }
7264
7265 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7266 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7267 !ufshcd_is_runtime_pm(pm_op))) {
7268
7269 ufshcd_disable_auto_bkops(hba);
7270 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7271 if (ret)
7272 goto enable_gating;
7273 }
7274
7275 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7276 if (ret)
7277 goto set_dev_active;
7278
7279 ufshcd_vreg_set_lpm(hba);
7280
7281disable_clks:
7282
7283
7284
7285
7286
7287 ret = ufshcd_vops_suspend(hba, pm_op);
7288 if (ret)
7289 goto set_link_active;
7290
7291 if (!ufshcd_is_link_active(hba))
7292 ufshcd_setup_clocks(hba, false);
7293 else
7294
7295 __ufshcd_setup_clocks(hba, false, true);
7296
7297 hba->clk_gating.state = CLKS_OFF;
7298 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7299
7300
7301
7302
7303 ufshcd_disable_irq(hba);
7304
7305 ufshcd_hba_vreg_set_lpm(hba);
7306 goto out;
7307
7308set_link_active:
7309 if (hba->clk_scaling.is_allowed)
7310 ufshcd_resume_clkscaling(hba);
7311 ufshcd_vreg_set_hpm(hba);
7312 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7313 ufshcd_set_link_active(hba);
7314 else if (ufshcd_is_link_off(hba))
7315 ufshcd_host_reset_and_restore(hba);
7316set_dev_active:
7317 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7318 ufshcd_disable_auto_bkops(hba);
7319enable_gating:
7320 if (hba->clk_scaling.is_allowed)
7321 ufshcd_resume_clkscaling(hba);
7322 hba->clk_gating.is_suspended = false;
7323 ufshcd_release(hba);
7324out:
7325 hba->pm_op_in_progress = 0;
7326 return ret;
7327}
7328
7329
7330
7331
7332
7333
7334
7335
7336
7337
7338
7339static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7340{
7341 int ret;
7342 enum uic_link_state old_link_state;
7343
7344 hba->pm_op_in_progress = 1;
7345 old_link_state = hba->uic_link_state;
7346
7347 ufshcd_hba_vreg_set_hpm(hba);
7348
7349 ret = ufshcd_setup_clocks(hba, true);
7350 if (ret)
7351 goto out;
7352
7353
7354 ret = ufshcd_enable_irq(hba);
7355 if (ret)
7356 goto disable_irq_and_vops_clks;
7357
7358 ret = ufshcd_vreg_set_hpm(hba);
7359 if (ret)
7360 goto disable_irq_and_vops_clks;
7361
7362
7363
7364
7365
7366
7367 ret = ufshcd_vops_resume(hba, pm_op);
7368 if (ret)
7369 goto disable_vreg;
7370
7371 if (ufshcd_is_link_hibern8(hba)) {
7372 ret = ufshcd_uic_hibern8_exit(hba);
7373 if (!ret)
7374 ufshcd_set_link_active(hba);
7375 else
7376 goto vendor_suspend;
7377 } else if (ufshcd_is_link_off(hba)) {
7378 ret = ufshcd_host_reset_and_restore(hba);
7379
7380
7381
7382
7383 if (ret || !ufshcd_is_link_active(hba))
7384 goto vendor_suspend;
7385 }
7386
7387 if (!ufshcd_is_ufs_dev_active(hba)) {
7388 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7389 if (ret)
7390 goto set_old_link_state;
7391 }
7392
7393 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7394 ufshcd_enable_auto_bkops(hba);
7395 else
7396
7397
7398
7399
7400 ufshcd_urgent_bkops(hba);
7401
7402 hba->clk_gating.is_suspended = false;
7403
7404 if (hba->clk_scaling.is_allowed)
7405 ufshcd_resume_clkscaling(hba);
7406
7407
7408 ufshcd_release(hba);
7409 goto out;
7410
7411set_old_link_state:
7412 ufshcd_link_state_transition(hba, old_link_state, 0);
7413vendor_suspend:
7414 ufshcd_vops_suspend(hba, pm_op);
7415disable_vreg:
7416 ufshcd_vreg_set_lpm(hba);
7417disable_irq_and_vops_clks:
7418 ufshcd_disable_irq(hba);
7419 if (hba->clk_scaling.is_allowed)
7420 ufshcd_suspend_clkscaling(hba);
7421 ufshcd_setup_clocks(hba, false);
7422out:
7423 hba->pm_op_in_progress = 0;
7424 return ret;
7425}
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436int ufshcd_system_suspend(struct ufs_hba *hba)
7437{
7438 int ret = 0;
7439 ktime_t start = ktime_get();
7440
7441 if (!hba || !hba->is_powered)
7442 return 0;
7443
7444 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7445 hba->curr_dev_pwr_mode) &&
7446 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7447 hba->uic_link_state))
7448 goto out;
7449
7450 if (pm_runtime_suspended(hba->dev)) {
7451
7452
7453
7454
7455
7456
7457
7458
7459 ret = ufshcd_runtime_resume(hba);
7460 if (ret)
7461 goto out;
7462 }
7463
7464 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7465out:
7466 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7467 ktime_to_us(ktime_sub(ktime_get(), start)),
7468 hba->curr_dev_pwr_mode, hba->uic_link_state);
7469 if (!ret)
7470 hba->is_sys_suspended = true;
7471 return ret;
7472}
7473EXPORT_SYMBOL(ufshcd_system_suspend);
7474
7475
7476
7477
7478
7479
7480
7481
7482int ufshcd_system_resume(struct ufs_hba *hba)
7483{
7484 int ret = 0;
7485 ktime_t start = ktime_get();
7486
7487 if (!hba)
7488 return -EINVAL;
7489
7490 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7491
7492
7493
7494
7495 goto out;
7496 else
7497 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7498out:
7499 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7500 ktime_to_us(ktime_sub(ktime_get(), start)),
7501 hba->curr_dev_pwr_mode, hba->uic_link_state);
7502 return ret;
7503}
7504EXPORT_SYMBOL(ufshcd_system_resume);
7505
7506
7507
7508
7509
7510
7511
7512
7513
7514int ufshcd_runtime_suspend(struct ufs_hba *hba)
7515{
7516 int ret = 0;
7517 ktime_t start = ktime_get();
7518
7519 if (!hba)
7520 return -EINVAL;
7521
7522 if (!hba->is_powered)
7523 goto out;
7524 else
7525 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7526out:
7527 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7528 ktime_to_us(ktime_sub(ktime_get(), start)),
7529 hba->curr_dev_pwr_mode, hba->uic_link_state);
7530 return ret;
7531}
7532EXPORT_SYMBOL(ufshcd_runtime_suspend);
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555int ufshcd_runtime_resume(struct ufs_hba *hba)
7556{
7557 int ret = 0;
7558 ktime_t start = ktime_get();
7559
7560 if (!hba)
7561 return -EINVAL;
7562
7563 if (!hba->is_powered)
7564 goto out;
7565 else
7566 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7567out:
7568 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7569 ktime_to_us(ktime_sub(ktime_get(), start)),
7570 hba->curr_dev_pwr_mode, hba->uic_link_state);
7571 return ret;
7572}
7573EXPORT_SYMBOL(ufshcd_runtime_resume);
7574
7575int ufshcd_runtime_idle(struct ufs_hba *hba)
7576{
7577 return 0;
7578}
7579EXPORT_SYMBOL(ufshcd_runtime_idle);
7580
7581static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7582 struct device_attribute *attr,
7583 const char *buf, size_t count,
7584 bool rpm)
7585{
7586 struct ufs_hba *hba = dev_get_drvdata(dev);
7587 unsigned long flags, value;
7588
7589 if (kstrtoul(buf, 0, &value))
7590 return -EINVAL;
7591
7592 if (value >= UFS_PM_LVL_MAX)
7593 return -EINVAL;
7594
7595 spin_lock_irqsave(hba->host->host_lock, flags);
7596 if (rpm)
7597 hba->rpm_lvl = value;
7598 else
7599 hba->spm_lvl = value;
7600 spin_unlock_irqrestore(hba->host->host_lock, flags);
7601 return count;
7602}
7603
7604static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
7605 struct device_attribute *attr, char *buf)
7606{
7607 struct ufs_hba *hba = dev_get_drvdata(dev);
7608 int curr_len;
7609 u8 lvl;
7610
7611 curr_len = snprintf(buf, PAGE_SIZE,
7612 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
7613 hba->rpm_lvl,
7614 ufschd_ufs_dev_pwr_mode_to_string(
7615 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
7616 ufschd_uic_link_state_to_string(
7617 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
7618
7619 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7620 "\nAll available Runtime PM levels info:\n");
7621 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7622 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7623 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
7624 lvl,
7625 ufschd_ufs_dev_pwr_mode_to_string(
7626 ufs_pm_lvl_states[lvl].dev_state),
7627 ufschd_uic_link_state_to_string(
7628 ufs_pm_lvl_states[lvl].link_state));
7629
7630 return curr_len;
7631}
7632
7633static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
7634 struct device_attribute *attr, const char *buf, size_t count)
7635{
7636 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
7637}
7638
7639static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
7640{
7641 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
7642 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
7643 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
7644 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
7645 hba->rpm_lvl_attr.attr.mode = 0644;
7646 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
7647 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
7648}
7649
7650static ssize_t ufshcd_spm_lvl_show(struct device *dev,
7651 struct device_attribute *attr, char *buf)
7652{
7653 struct ufs_hba *hba = dev_get_drvdata(dev);
7654 int curr_len;
7655 u8 lvl;
7656
7657 curr_len = snprintf(buf, PAGE_SIZE,
7658 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
7659 hba->spm_lvl,
7660 ufschd_ufs_dev_pwr_mode_to_string(
7661 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
7662 ufschd_uic_link_state_to_string(
7663 ufs_pm_lvl_states[hba->spm_lvl].link_state));
7664
7665 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7666 "\nAll available System PM levels info:\n");
7667 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7668 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7669 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
7670 lvl,
7671 ufschd_ufs_dev_pwr_mode_to_string(
7672 ufs_pm_lvl_states[lvl].dev_state),
7673 ufschd_uic_link_state_to_string(
7674 ufs_pm_lvl_states[lvl].link_state));
7675
7676 return curr_len;
7677}
7678
7679static ssize_t ufshcd_spm_lvl_store(struct device *dev,
7680 struct device_attribute *attr, const char *buf, size_t count)
7681{
7682 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
7683}
7684
7685static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
7686{
7687 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
7688 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
7689 sysfs_attr_init(&hba->spm_lvl_attr.attr);
7690 hba->spm_lvl_attr.attr.name = "spm_lvl";
7691 hba->spm_lvl_attr.attr.mode = 0644;
7692 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
7693 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
7694}
7695
7696static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7697{
7698 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
7699 ufshcd_add_spm_lvl_sysfs_nodes(hba);
7700}
7701
7702static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
7703{
7704 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
7705 device_remove_file(hba->dev, &hba->spm_lvl_attr);
7706}
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716int ufshcd_shutdown(struct ufs_hba *hba)
7717{
7718 int ret = 0;
7719
7720 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7721 goto out;
7722
7723 if (pm_runtime_suspended(hba->dev)) {
7724 ret = ufshcd_runtime_resume(hba);
7725 if (ret)
7726 goto out;
7727 }
7728
7729 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7730out:
7731 if (ret)
7732 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7733
7734 return 0;
7735}
7736EXPORT_SYMBOL(ufshcd_shutdown);
7737
7738
7739
7740
7741
7742
7743void ufshcd_remove(struct ufs_hba *hba)
7744{
7745 ufshcd_remove_sysfs_nodes(hba);
7746 scsi_remove_host(hba->host);
7747
7748 ufshcd_disable_intr(hba, hba->intr_mask);
7749 ufshcd_hba_stop(hba, true);
7750
7751 ufshcd_exit_clk_gating(hba);
7752 if (ufshcd_is_clkscaling_supported(hba))
7753 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
7754 ufshcd_hba_exit(hba);
7755}
7756EXPORT_SYMBOL_GPL(ufshcd_remove);
7757
7758
7759
7760
7761
7762void ufshcd_dealloc_host(struct ufs_hba *hba)
7763{
7764 scsi_host_put(hba->host);
7765}
7766EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7767
7768
7769
7770
7771
7772
7773
7774
7775static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7776{
7777 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7778 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7779 return 0;
7780 }
7781 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7782}
7783
7784
7785
7786
7787
7788
7789
7790int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7791{
7792 struct Scsi_Host *host;
7793 struct ufs_hba *hba;
7794 int err = 0;
7795
7796 if (!dev) {
7797 dev_err(dev,
7798 "Invalid memory reference for dev is NULL\n");
7799 err = -ENODEV;
7800 goto out_error;
7801 }
7802
7803 host = scsi_host_alloc(&ufshcd_driver_template,
7804 sizeof(struct ufs_hba));
7805 if (!host) {
7806 dev_err(dev, "scsi_host_alloc failed\n");
7807 err = -ENOMEM;
7808 goto out_error;
7809 }
7810 hba = shost_priv(host);
7811 hba->host = host;
7812 hba->dev = dev;
7813 *hba_handle = hba;
7814
7815 INIT_LIST_HEAD(&hba->clk_list_head);
7816
7817out_error:
7818 return err;
7819}
7820EXPORT_SYMBOL(ufshcd_alloc_host);
7821
7822
7823
7824
7825
7826
7827
7828
7829int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7830{
7831 int err;
7832 struct Scsi_Host *host = hba->host;
7833 struct device *dev = hba->dev;
7834
7835 if (!mmio_base) {
7836 dev_err(hba->dev,
7837 "Invalid memory reference for mmio_base is NULL\n");
7838 err = -ENODEV;
7839 goto out_error;
7840 }
7841
7842 hba->mmio_base = mmio_base;
7843 hba->irq = irq;
7844
7845
7846 ufshcd_def_desc_sizes(hba);
7847
7848 err = ufshcd_hba_init(hba);
7849 if (err)
7850 goto out_error;
7851
7852
7853 ufshcd_hba_capabilities(hba);
7854
7855
7856 hba->ufs_version = ufshcd_get_ufs_version(hba);
7857
7858 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7859 (hba->ufs_version != UFSHCI_VERSION_11) &&
7860 (hba->ufs_version != UFSHCI_VERSION_20) &&
7861 (hba->ufs_version != UFSHCI_VERSION_21))
7862 dev_err(hba->dev, "invalid UFS version 0x%x\n",
7863 hba->ufs_version);
7864
7865
7866 hba->intr_mask = ufshcd_get_intr_mask(hba);
7867
7868 err = ufshcd_set_dma_mask(hba);
7869 if (err) {
7870 dev_err(hba->dev, "set dma mask failed\n");
7871 goto out_disable;
7872 }
7873
7874
7875 err = ufshcd_memory_alloc(hba);
7876 if (err) {
7877 dev_err(hba->dev, "Memory allocation failed\n");
7878 goto out_disable;
7879 }
7880
7881
7882 ufshcd_host_memory_configure(hba);
7883
7884 host->can_queue = hba->nutrs;
7885 host->cmd_per_lun = hba->nutrs;
7886 host->max_id = UFSHCD_MAX_ID;
7887 host->max_lun = UFS_MAX_LUNS;
7888 host->max_channel = UFSHCD_MAX_CHANNEL;
7889 host->unique_id = host->host_no;
7890 host->max_cmd_len = MAX_CDB_SIZE;
7891
7892 hba->max_pwr_info.is_valid = false;
7893
7894
7895 init_waitqueue_head(&hba->tm_wq);
7896 init_waitqueue_head(&hba->tm_tag_wq);
7897
7898
7899 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
7900 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7901
7902
7903 mutex_init(&hba->uic_cmd_mutex);
7904
7905
7906 mutex_init(&hba->dev_cmd.lock);
7907
7908 init_rwsem(&hba->clk_scaling_lock);
7909
7910
7911 init_waitqueue_head(&hba->dev_cmd.tag_wq);
7912
7913 ufshcd_init_clk_gating(hba);
7914
7915
7916
7917
7918
7919
7920 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
7921 REG_INTERRUPT_STATUS);
7922 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
7923
7924
7925
7926
7927 mb();
7928
7929
7930 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7931 if (err) {
7932 dev_err(hba->dev, "request irq failed\n");
7933 goto exit_gating;
7934 } else {
7935 hba->is_irq_enabled = true;
7936 }
7937
7938 err = scsi_add_host(host, hba->dev);
7939 if (err) {
7940 dev_err(hba->dev, "scsi_add_host failed\n");
7941 goto exit_gating;
7942 }
7943
7944
7945 err = ufshcd_hba_enable(hba);
7946 if (err) {
7947 dev_err(hba->dev, "Host controller enable failed\n");
7948 ufshcd_print_host_regs(hba);
7949 ufshcd_print_host_state(hba);
7950 goto out_remove_scsi_host;
7951 }
7952
7953 if (ufshcd_is_clkscaling_supported(hba)) {
7954 char wq_name[sizeof("ufs_clkscaling_00")];
7955
7956 INIT_WORK(&hba->clk_scaling.suspend_work,
7957 ufshcd_clk_scaling_suspend_work);
7958 INIT_WORK(&hba->clk_scaling.resume_work,
7959 ufshcd_clk_scaling_resume_work);
7960
7961 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
7962 host->host_no);
7963 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
7964
7965 ufshcd_clkscaling_init_sysfs(hba);
7966 }
7967
7968
7969
7970
7971
7972
7973 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7974 UFS_SLEEP_PWR_MODE,
7975 UIC_LINK_HIBERN8_STATE);
7976 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7977 UFS_SLEEP_PWR_MODE,
7978 UIC_LINK_HIBERN8_STATE);
7979
7980
7981 pm_runtime_get_sync(dev);
7982
7983
7984
7985
7986
7987
7988
7989 ufshcd_set_ufs_dev_active(hba);
7990
7991 async_schedule(ufshcd_async_scan, hba);
7992 ufshcd_add_sysfs_nodes(hba);
7993
7994 return 0;
7995
7996out_remove_scsi_host:
7997 scsi_remove_host(hba->host);
7998exit_gating:
7999 ufshcd_exit_clk_gating(hba);
8000out_disable:
8001 hba->is_irq_enabled = false;
8002 ufshcd_hba_exit(hba);
8003out_error:
8004 return err;
8005}
8006EXPORT_SYMBOL_GPL(ufshcd_init);
8007
8008MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8009MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8010MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8011MODULE_LICENSE("GPL");
8012MODULE_VERSION(UFSHCD_DRIVER_VERSION);
8013