1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/async.h>
41#include <linux/devfreq.h>
42#include <linux/nls.h>
43#include <linux/of.h>
44#include <linux/bitfield.h>
45#include "ufshcd.h"
46#include "ufs_quirks.h"
47#include "unipro.h"
48#include "ufs-sysfs.h"
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/ufs.h>
52
53#define UFSHCD_REQ_SENSE_SIZE 18
54
55#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
56 UTP_TASK_REQ_COMPL |\
57 UFSHCD_ERROR_MASK)
58
59#define UIC_CMD_TIMEOUT 500
60
61
62#define NOP_OUT_RETRIES 10
63
64#define NOP_OUT_TIMEOUT 30
65
66
67#define QUERY_REQ_RETRIES 3
68
69#define QUERY_REQ_TIMEOUT 1500
70
71
72#define TM_CMD_TIMEOUT 100
73
74
75#define UFS_UIC_COMMAND_RETRIES 3
76
77
78#define DME_LINKSTARTUP_RETRIES 3
79
80
81#define UIC_HIBERN8_ENTER_RETRIES 3
82
83
84#define MAX_HOST_RESET_RETRIES 5
85
86
87#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
88
89
90#define INT_AGGR_DEF_TO 0x02
91
92#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
93 ({ \
94 int _ret; \
95 if (_on) \
96 _ret = ufshcd_enable_vreg(_dev, _vreg); \
97 else \
98 _ret = ufshcd_disable_vreg(_dev, _vreg); \
99 _ret; \
100 })
101
102#define ufshcd_hex_dump(prefix_str, buf, len) do { \
103 size_t __len = (len); \
104 print_hex_dump(KERN_ERR, prefix_str, \
105 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
106 16, 4, buf, __len, false); \
107} while (0)
108
109int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
110 const char *prefix)
111{
112 u8 *regs;
113
114 regs = kzalloc(len, GFP_KERNEL);
115 if (!regs)
116 return -ENOMEM;
117
118 memcpy_fromio(regs, hba->mmio_base + offset, len);
119 ufshcd_hex_dump(prefix, regs, len);
120 kfree(regs);
121
122 return 0;
123}
124EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
125
126enum {
127 UFSHCD_MAX_CHANNEL = 0,
128 UFSHCD_MAX_ID = 1,
129 UFSHCD_CMD_PER_LUN = 32,
130 UFSHCD_CAN_QUEUE = 32,
131};
132
133
134enum {
135 UFSHCD_STATE_RESET,
136 UFSHCD_STATE_ERROR,
137 UFSHCD_STATE_OPERATIONAL,
138 UFSHCD_STATE_EH_SCHEDULED,
139};
140
141
142enum {
143 UFSHCD_EH_IN_PROGRESS = (1 << 0),
144};
145
146
147enum {
148 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0),
149 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1),
150 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2),
151 UFSHCD_UIC_NL_ERROR = (1 << 3),
152 UFSHCD_UIC_TL_ERROR = (1 << 4),
153 UFSHCD_UIC_DME_ERROR = (1 << 5),
154};
155
156#define ufshcd_set_eh_in_progress(h) \
157 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
158#define ufshcd_eh_in_progress(h) \
159 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
160#define ufshcd_clear_eh_in_progress(h) \
161 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
162
163#define ufshcd_set_ufs_dev_active(h) \
164 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
165#define ufshcd_set_ufs_dev_sleep(h) \
166 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
167#define ufshcd_set_ufs_dev_poweroff(h) \
168 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
169#define ufshcd_is_ufs_dev_active(h) \
170 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
171#define ufshcd_is_ufs_dev_sleep(h) \
172 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
173#define ufshcd_is_ufs_dev_poweroff(h) \
174 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
175
176struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
177 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
178 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
179 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
180 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
181 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
182 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
183};
184
185static inline enum ufs_dev_pwr_mode
186ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].dev_state;
189}
190
191static inline enum uic_link_state
192ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
193{
194 return ufs_pm_lvl_states[lvl].link_state;
195}
196
197static inline enum ufs_pm_level
198ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
199 enum uic_link_state link_state)
200{
201 enum ufs_pm_level lvl;
202
203 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
204 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
205 (ufs_pm_lvl_states[lvl].link_state == link_state))
206 return lvl;
207 }
208
209
210 return UFS_PM_LVL_0;
211}
212
213static struct ufs_dev_fix ufs_fixups[] = {
214
215 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
216 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
217 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
218 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
219 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
220 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
221 UFS_DEVICE_NO_FASTAUTO),
222 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
223 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
224 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
225 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
226 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
227 UFS_DEVICE_QUIRK_PA_TACTIVATE),
228 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
229 UFS_DEVICE_QUIRK_PA_TACTIVATE),
230 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
231 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
232 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
233
234 END_FIX
235};
236
237static void ufshcd_tmc_handler(struct ufs_hba *hba);
238static void ufshcd_async_scan(void *data, async_cookie_t cookie);
239static int ufshcd_reset_and_restore(struct ufs_hba *hba);
240static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
241static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
242static void ufshcd_hba_exit(struct ufs_hba *hba);
243static int ufshcd_probe_hba(struct ufs_hba *hba);
244static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
245 bool skip_ref_clk);
246static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
247static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
248static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
249static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
250static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
251static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
252static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
253static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
254static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
255static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
256static irqreturn_t ufshcd_intr(int irq, void *__hba);
257static int ufshcd_change_power_mode(struct ufs_hba *hba,
258 struct ufs_pa_layer_attr *pwr_mode);
259static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
260{
261 return tag >= 0 && tag < hba->nutrs;
262}
263
264static inline int ufshcd_enable_irq(struct ufs_hba *hba)
265{
266 int ret = 0;
267
268 if (!hba->is_irq_enabled) {
269 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
270 hba);
271 if (ret)
272 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
273 __func__, ret);
274 hba->is_irq_enabled = true;
275 }
276
277 return ret;
278}
279
280static inline void ufshcd_disable_irq(struct ufs_hba *hba)
281{
282 if (hba->is_irq_enabled) {
283 free_irq(hba->irq, hba);
284 hba->is_irq_enabled = false;
285 }
286}
287
288static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
289{
290 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
291 scsi_unblock_requests(hba->host);
292}
293
294static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
295{
296 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
297 scsi_block_requests(hba->host);
298}
299
300
301static inline void ufshcd_remove_non_printable(char *val)
302{
303 if (!val)
304 return;
305
306 if (*val < 0x20 || *val > 0x7e)
307 *val = ' ';
308}
309
310static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
311 const char *str)
312{
313 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
314
315 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
316}
317
318static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
319 const char *str)
320{
321 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
322
323 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
324}
325
326static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
327 const char *str)
328{
329 struct utp_task_req_desc *descp;
330 struct utp_upiu_task_req *task_req;
331 int off = (int)tag - hba->nutrs;
332
333 descp = &hba->utmrdl_base_addr[off];
334 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
335 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
336 &task_req->input_param1);
337}
338
339static void ufshcd_add_command_trace(struct ufs_hba *hba,
340 unsigned int tag, const char *str)
341{
342 sector_t lba = -1;
343 u8 opcode = 0;
344 u32 intr, doorbell;
345 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
346 int transfer_len = -1;
347
348 if (!trace_ufshcd_command_enabled()) {
349
350 if (lrbp->cmd)
351 ufshcd_add_cmd_upiu_trace(hba, tag, str);
352 return;
353 }
354
355 if (lrbp->cmd) {
356
357 ufshcd_add_cmd_upiu_trace(hba, tag, str);
358 opcode = (u8)(*lrbp->cmd->cmnd);
359 if ((opcode == READ_10) || (opcode == WRITE_10)) {
360
361
362
363
364 if (lrbp->cmd->request && lrbp->cmd->request->bio)
365 lba =
366 lrbp->cmd->request->bio->bi_iter.bi_sector;
367 transfer_len = be32_to_cpu(
368 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
369 }
370 }
371
372 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
373 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
374 trace_ufshcd_command(dev_name(hba->dev), str, tag,
375 doorbell, transfer_len, intr, lba, opcode);
376}
377
378static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
379{
380 struct ufs_clk_info *clki;
381 struct list_head *head = &hba->clk_list_head;
382
383 if (list_empty(head))
384 return;
385
386 list_for_each_entry(clki, head, list) {
387 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
388 clki->max_freq)
389 dev_err(hba->dev, "clk: %s, rate: %u\n",
390 clki->name, clki->curr_freq);
391 }
392}
393
394static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
395 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
396{
397 int i;
398
399 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
400 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
401
402 if (err_hist->reg[p] == 0)
403 continue;
404 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
405 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
406 }
407}
408
409static void ufshcd_print_host_regs(struct ufs_hba *hba)
410{
411 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
412 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
413 hba->ufs_version, hba->capabilities);
414 dev_err(hba->dev,
415 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
416 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
417 dev_err(hba->dev,
418 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
419 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
420 hba->ufs_stats.hibern8_exit_cnt);
421
422 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
423 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
424 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
425 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
426 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
427
428 ufshcd_print_clk_freqs(hba);
429
430 if (hba->vops && hba->vops->dbg_register_dump)
431 hba->vops->dbg_register_dump(hba);
432}
433
434static
435void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
436{
437 struct ufshcd_lrb *lrbp;
438 int prdt_length;
439 int tag;
440
441 for_each_set_bit(tag, &bitmap, hba->nutrs) {
442 lrbp = &hba->lrb[tag];
443
444 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
445 tag, ktime_to_us(lrbp->issue_time_stamp));
446 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
447 tag, ktime_to_us(lrbp->compl_time_stamp));
448 dev_err(hba->dev,
449 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
450 tag, (u64)lrbp->utrd_dma_addr);
451
452 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
453 sizeof(struct utp_transfer_req_desc));
454 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
455 (u64)lrbp->ucd_req_dma_addr);
456 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
457 sizeof(struct utp_upiu_req));
458 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
459 (u64)lrbp->ucd_rsp_dma_addr);
460 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
461 sizeof(struct utp_upiu_rsp));
462
463 prdt_length = le16_to_cpu(
464 lrbp->utr_descriptor_ptr->prd_table_length);
465 dev_err(hba->dev,
466 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
467 tag, prdt_length,
468 (u64)lrbp->ucd_prdt_dma_addr);
469
470 if (pr_prdt)
471 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
472 sizeof(struct ufshcd_sg_entry) * prdt_length);
473 }
474}
475
476static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
477{
478 struct utp_task_req_desc *tmrdp;
479 int tag;
480
481 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
482 tmrdp = &hba->utmrdl_base_addr[tag];
483 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
484 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
485 sizeof(struct request_desc_header));
486 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
487 tag);
488 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
489 sizeof(struct utp_upiu_req));
490 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
491 tag);
492 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
493 sizeof(struct utp_task_req_desc));
494 }
495}
496
497static void ufshcd_print_host_state(struct ufs_hba *hba)
498{
499 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
500 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
501 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
502 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
503 hba->saved_err, hba->saved_uic_err);
504 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
505 hba->curr_dev_pwr_mode, hba->uic_link_state);
506 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
507 hba->pm_op_in_progress, hba->is_sys_suspended);
508 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
509 hba->auto_bkops_enabled, hba->host->host_self_blocked);
510 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
511 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
512 hba->eh_flags, hba->req_abort_count);
513 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
514 hba->capabilities, hba->caps);
515 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
516 hba->dev_quirks);
517}
518
519
520
521
522
523
524static void ufshcd_print_pwr_info(struct ufs_hba *hba)
525{
526 static const char * const names[] = {
527 "INVALID MODE",
528 "FAST MODE",
529 "SLOW_MODE",
530 "INVALID MODE",
531 "FASTAUTO_MODE",
532 "SLOWAUTO_MODE",
533 "INVALID MODE",
534 };
535
536 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
537 __func__,
538 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
539 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
540 names[hba->pwr_info.pwr_rx],
541 names[hba->pwr_info.pwr_tx],
542 hba->pwr_info.hs_rate);
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
558 u32 val, unsigned long interval_us,
559 unsigned long timeout_ms, bool can_sleep)
560{
561 int err = 0;
562 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
563
564
565 val = val & mask;
566
567 while ((ufshcd_readl(hba, reg) & mask) != val) {
568 if (can_sleep)
569 usleep_range(interval_us, interval_us + 50);
570 else
571 udelay(interval_us);
572 if (time_after(jiffies, timeout)) {
573 if ((ufshcd_readl(hba, reg) & mask) != val)
574 err = -ETIMEDOUT;
575 break;
576 }
577 }
578
579 return err;
580}
581
582
583
584
585
586
587
588static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
589{
590 u32 intr_mask = 0;
591
592 switch (hba->ufs_version) {
593 case UFSHCI_VERSION_10:
594 intr_mask = INTERRUPT_MASK_ALL_VER_10;
595 break;
596 case UFSHCI_VERSION_11:
597 case UFSHCI_VERSION_20:
598 intr_mask = INTERRUPT_MASK_ALL_VER_11;
599 break;
600 case UFSHCI_VERSION_21:
601 default:
602 intr_mask = INTERRUPT_MASK_ALL_VER_21;
603 break;
604 }
605
606 return intr_mask;
607}
608
609
610
611
612
613
614
615static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
616{
617 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
618 return ufshcd_vops_get_ufs_hci_version(hba);
619
620 return ufshcd_readl(hba, REG_UFS_VERSION);
621}
622
623
624
625
626
627
628
629
630static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
631{
632 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
633 DEVICE_PRESENT) ? true : false;
634}
635
636
637
638
639
640
641
642
643static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
644{
645 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
646}
647
648
649
650
651
652
653
654
655static inline int
656ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
657{
658 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
659}
660
661
662
663
664
665
666
667
668
669
670static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
671{
672 int tag;
673 bool ret = false;
674
675 if (!free_slot)
676 goto out;
677
678 do {
679 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
680 if (tag >= hba->nutmrs)
681 goto out;
682 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
683
684 *free_slot = tag;
685 ret = true;
686out:
687 return ret;
688}
689
690static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
691{
692 clear_bit_unlock(slot, &hba->tm_slots_in_use);
693}
694
695
696
697
698
699
700static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
701{
702 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
704 else
705 ufshcd_writel(hba, ~(1 << pos),
706 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
707}
708
709
710
711
712
713
714static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
715{
716 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
717 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
718 else
719 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
720}
721
722
723
724
725
726
727static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
728{
729 __clear_bit(tag, &hba->outstanding_reqs);
730}
731
732
733
734
735
736
737
738static inline int ufshcd_get_lists_status(u32 reg)
739{
740 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
741}
742
743
744
745
746
747
748
749
750static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
751{
752 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
753 MASK_UIC_COMMAND_RESULT;
754}
755
756
757
758
759
760
761
762
763static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
764{
765 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
766}
767
768
769
770
771
772static inline int
773ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
774{
775 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
776}
777
778
779
780
781
782
783
784
785static inline int
786ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
787{
788 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
789}
790
791
792
793
794
795
796
797
798static inline unsigned int
799ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
800{
801 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
802 MASK_RSP_UPIU_DATA_SEG_LEN;
803}
804
805
806
807
808
809
810
811
812
813
814static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
815{
816 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
817 MASK_RSP_EXCEPTION_EVENT ? true : false;
818}
819
820
821
822
823
824static inline void
825ufshcd_reset_intr_aggr(struct ufs_hba *hba)
826{
827 ufshcd_writel(hba, INT_AGGR_ENABLE |
828 INT_AGGR_COUNTER_AND_TIMER_RESET,
829 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
830}
831
832
833
834
835
836
837
838static inline void
839ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
840{
841 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
842 INT_AGGR_COUNTER_THLD_VAL(cnt) |
843 INT_AGGR_TIMEOUT_VAL(tmout),
844 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
845}
846
847
848
849
850
851static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
852{
853 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
854}
855
856
857
858
859
860
861
862static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
863{
864 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
865 REG_UTP_TASK_REQ_LIST_RUN_STOP);
866 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
867 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
868}
869
870
871
872
873
874static inline void ufshcd_hba_start(struct ufs_hba *hba)
875{
876 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
877}
878
879
880
881
882
883
884
885static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
886{
887 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
888 ? false : true;
889}
890
891u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
892{
893
894 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
895 (hba->ufs_version == UFSHCI_VERSION_11))
896 return UFS_UNIPRO_VER_1_41;
897 else
898 return UFS_UNIPRO_VER_1_6;
899}
900EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
901
902static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
903{
904
905
906
907
908
909
910
911
912
913 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
914 return true;
915 else
916 return false;
917}
918
919static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
920{
921 int ret = 0;
922 struct ufs_clk_info *clki;
923 struct list_head *head = &hba->clk_list_head;
924 ktime_t start = ktime_get();
925 bool clk_state_changed = false;
926
927 if (list_empty(head))
928 goto out;
929
930 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
931 if (ret)
932 return ret;
933
934 list_for_each_entry(clki, head, list) {
935 if (!IS_ERR_OR_NULL(clki->clk)) {
936 if (scale_up && clki->max_freq) {
937 if (clki->curr_freq == clki->max_freq)
938 continue;
939
940 clk_state_changed = true;
941 ret = clk_set_rate(clki->clk, clki->max_freq);
942 if (ret) {
943 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
944 __func__, clki->name,
945 clki->max_freq, ret);
946 break;
947 }
948 trace_ufshcd_clk_scaling(dev_name(hba->dev),
949 "scaled up", clki->name,
950 clki->curr_freq,
951 clki->max_freq);
952
953 clki->curr_freq = clki->max_freq;
954
955 } else if (!scale_up && clki->min_freq) {
956 if (clki->curr_freq == clki->min_freq)
957 continue;
958
959 clk_state_changed = true;
960 ret = clk_set_rate(clki->clk, clki->min_freq);
961 if (ret) {
962 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
963 __func__, clki->name,
964 clki->min_freq, ret);
965 break;
966 }
967 trace_ufshcd_clk_scaling(dev_name(hba->dev),
968 "scaled down", clki->name,
969 clki->curr_freq,
970 clki->min_freq);
971 clki->curr_freq = clki->min_freq;
972 }
973 }
974 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
975 clki->name, clk_get_rate(clki->clk));
976 }
977
978 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
979
980out:
981 if (clk_state_changed)
982 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
983 (scale_up ? "up" : "down"),
984 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
985 return ret;
986}
987
988
989
990
991
992
993
994
995static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
996 bool scale_up)
997{
998 struct ufs_clk_info *clki;
999 struct list_head *head = &hba->clk_list_head;
1000
1001 if (list_empty(head))
1002 return false;
1003
1004 list_for_each_entry(clki, head, list) {
1005 if (!IS_ERR_OR_NULL(clki->clk)) {
1006 if (scale_up && clki->max_freq) {
1007 if (clki->curr_freq == clki->max_freq)
1008 continue;
1009 return true;
1010 } else if (!scale_up && clki->min_freq) {
1011 if (clki->curr_freq == clki->min_freq)
1012 continue;
1013 return true;
1014 }
1015 }
1016 }
1017
1018 return false;
1019}
1020
1021static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1022 u64 wait_timeout_us)
1023{
1024 unsigned long flags;
1025 int ret = 0;
1026 u32 tm_doorbell;
1027 u32 tr_doorbell;
1028 bool timeout = false, do_last_check = false;
1029 ktime_t start;
1030
1031 ufshcd_hold(hba, false);
1032 spin_lock_irqsave(hba->host->host_lock, flags);
1033
1034
1035
1036
1037 start = ktime_get();
1038 do {
1039 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1040 ret = -EBUSY;
1041 goto out;
1042 }
1043
1044 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1045 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1046 if (!tm_doorbell && !tr_doorbell) {
1047 timeout = false;
1048 break;
1049 } else if (do_last_check) {
1050 break;
1051 }
1052
1053 spin_unlock_irqrestore(hba->host->host_lock, flags);
1054 schedule();
1055 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1056 wait_timeout_us) {
1057 timeout = true;
1058
1059
1060
1061
1062
1063 do_last_check = true;
1064 }
1065 spin_lock_irqsave(hba->host->host_lock, flags);
1066 } while (tm_doorbell || tr_doorbell);
1067
1068 if (timeout) {
1069 dev_err(hba->dev,
1070 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1071 __func__, tm_doorbell, tr_doorbell);
1072 ret = -EBUSY;
1073 }
1074out:
1075 spin_unlock_irqrestore(hba->host->host_lock, flags);
1076 ufshcd_release(hba);
1077 return ret;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1090{
1091 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1092 int ret = 0;
1093 struct ufs_pa_layer_attr new_pwr_info;
1094
1095 if (scale_up) {
1096 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1097 sizeof(struct ufs_pa_layer_attr));
1098 } else {
1099 memcpy(&new_pwr_info, &hba->pwr_info,
1100 sizeof(struct ufs_pa_layer_attr));
1101
1102 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1103 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1104
1105 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1106 &hba->pwr_info,
1107 sizeof(struct ufs_pa_layer_attr));
1108
1109
1110 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1111 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1112 }
1113 }
1114
1115
1116 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1117
1118 if (ret)
1119 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1120 __func__, ret,
1121 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1122 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1123
1124 return ret;
1125}
1126
1127static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1128{
1129 #define DOORBELL_CLR_TOUT_US (1000 * 1000)
1130 int ret = 0;
1131
1132
1133
1134
1135 ufshcd_scsi_block_requests(hba);
1136 down_write(&hba->clk_scaling_lock);
1137 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1138 ret = -EBUSY;
1139 up_write(&hba->clk_scaling_lock);
1140 ufshcd_scsi_unblock_requests(hba);
1141 }
1142
1143 return ret;
1144}
1145
1146static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1147{
1148 up_write(&hba->clk_scaling_lock);
1149 ufshcd_scsi_unblock_requests(hba);
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1162{
1163 int ret = 0;
1164
1165
1166 ufshcd_hold(hba, false);
1167
1168 ret = ufshcd_clock_scaling_prepare(hba);
1169 if (ret)
1170 return ret;
1171
1172
1173 if (!scale_up) {
1174 ret = ufshcd_scale_gear(hba, false);
1175 if (ret)
1176 goto out;
1177 }
1178
1179 ret = ufshcd_scale_clks(hba, scale_up);
1180 if (ret) {
1181 if (!scale_up)
1182 ufshcd_scale_gear(hba, true);
1183 goto out;
1184 }
1185
1186
1187 if (scale_up) {
1188 ret = ufshcd_scale_gear(hba, true);
1189 if (ret) {
1190 ufshcd_scale_clks(hba, false);
1191 goto out;
1192 }
1193 }
1194
1195 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1196
1197out:
1198 ufshcd_clock_scaling_unprepare(hba);
1199 ufshcd_release(hba);
1200 return ret;
1201}
1202
1203static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1204{
1205 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1206 clk_scaling.suspend_work);
1207 unsigned long irq_flags;
1208
1209 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1210 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1211 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1212 return;
1213 }
1214 hba->clk_scaling.is_suspended = true;
1215 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1216
1217 __ufshcd_suspend_clkscaling(hba);
1218}
1219
1220static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1221{
1222 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1223 clk_scaling.resume_work);
1224 unsigned long irq_flags;
1225
1226 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1227 if (!hba->clk_scaling.is_suspended) {
1228 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1229 return;
1230 }
1231 hba->clk_scaling.is_suspended = false;
1232 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1233
1234 devfreq_resume_device(hba->devfreq);
1235}
1236
1237static int ufshcd_devfreq_target(struct device *dev,
1238 unsigned long *freq, u32 flags)
1239{
1240 int ret = 0;
1241 struct ufs_hba *hba = dev_get_drvdata(dev);
1242 ktime_t start;
1243 bool scale_up, sched_clk_scaling_suspend_work = false;
1244 struct list_head *clk_list = &hba->clk_list_head;
1245 struct ufs_clk_info *clki;
1246 unsigned long irq_flags;
1247
1248 if (!ufshcd_is_clkscaling_supported(hba))
1249 return -EINVAL;
1250
1251 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1252 if (ufshcd_eh_in_progress(hba)) {
1253 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1254 return 0;
1255 }
1256
1257 if (!hba->clk_scaling.active_reqs)
1258 sched_clk_scaling_suspend_work = true;
1259
1260 if (list_empty(clk_list)) {
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1262 goto out;
1263 }
1264
1265 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1266 scale_up = (*freq == clki->max_freq) ? true : false;
1267 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1268 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1269 ret = 0;
1270 goto out;
1271 }
1272 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1273
1274 start = ktime_get();
1275 ret = ufshcd_devfreq_scale(hba, scale_up);
1276
1277 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1278 (scale_up ? "up" : "down"),
1279 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1280
1281out:
1282 if (sched_clk_scaling_suspend_work)
1283 queue_work(hba->clk_scaling.workq,
1284 &hba->clk_scaling.suspend_work);
1285
1286 return ret;
1287}
1288
1289
1290static int ufshcd_devfreq_get_dev_status(struct device *dev,
1291 struct devfreq_dev_status *stat)
1292{
1293 struct ufs_hba *hba = dev_get_drvdata(dev);
1294 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1295 unsigned long flags;
1296
1297 if (!ufshcd_is_clkscaling_supported(hba))
1298 return -EINVAL;
1299
1300 memset(stat, 0, sizeof(*stat));
1301
1302 spin_lock_irqsave(hba->host->host_lock, flags);
1303 if (!scaling->window_start_t)
1304 goto start_window;
1305
1306 if (scaling->is_busy_started)
1307 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1308 scaling->busy_start_t));
1309
1310 stat->total_time = jiffies_to_usecs((long)jiffies -
1311 (long)scaling->window_start_t);
1312 stat->busy_time = scaling->tot_busy_t;
1313start_window:
1314 scaling->window_start_t = jiffies;
1315 scaling->tot_busy_t = 0;
1316
1317 if (hba->outstanding_reqs) {
1318 scaling->busy_start_t = ktime_get();
1319 scaling->is_busy_started = true;
1320 } else {
1321 scaling->busy_start_t = 0;
1322 scaling->is_busy_started = false;
1323 }
1324 spin_unlock_irqrestore(hba->host->host_lock, flags);
1325 return 0;
1326}
1327
1328static struct devfreq_dev_profile ufs_devfreq_profile = {
1329 .polling_ms = 100,
1330 .target = ufshcd_devfreq_target,
1331 .get_dev_status = ufshcd_devfreq_get_dev_status,
1332};
1333
1334static int ufshcd_devfreq_init(struct ufs_hba *hba)
1335{
1336 struct list_head *clk_list = &hba->clk_list_head;
1337 struct ufs_clk_info *clki;
1338 struct devfreq *devfreq;
1339 int ret;
1340
1341
1342 if (list_empty(clk_list))
1343 return 0;
1344
1345 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1346 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1347 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1348
1349 devfreq = devfreq_add_device(hba->dev,
1350 &ufs_devfreq_profile,
1351 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1352 NULL);
1353 if (IS_ERR(devfreq)) {
1354 ret = PTR_ERR(devfreq);
1355 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1356
1357 dev_pm_opp_remove(hba->dev, clki->min_freq);
1358 dev_pm_opp_remove(hba->dev, clki->max_freq);
1359 return ret;
1360 }
1361
1362 hba->devfreq = devfreq;
1363
1364 return 0;
1365}
1366
1367static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1368{
1369 struct list_head *clk_list = &hba->clk_list_head;
1370 struct ufs_clk_info *clki;
1371
1372 if (!hba->devfreq)
1373 return;
1374
1375 devfreq_remove_device(hba->devfreq);
1376 hba->devfreq = NULL;
1377
1378 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1379 dev_pm_opp_remove(hba->dev, clki->min_freq);
1380 dev_pm_opp_remove(hba->dev, clki->max_freq);
1381}
1382
1383static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1384{
1385 unsigned long flags;
1386
1387 devfreq_suspend_device(hba->devfreq);
1388 spin_lock_irqsave(hba->host->host_lock, flags);
1389 hba->clk_scaling.window_start_t = 0;
1390 spin_unlock_irqrestore(hba->host->host_lock, flags);
1391}
1392
1393static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1394{
1395 unsigned long flags;
1396 bool suspend = false;
1397
1398 if (!ufshcd_is_clkscaling_supported(hba))
1399 return;
1400
1401 spin_lock_irqsave(hba->host->host_lock, flags);
1402 if (!hba->clk_scaling.is_suspended) {
1403 suspend = true;
1404 hba->clk_scaling.is_suspended = true;
1405 }
1406 spin_unlock_irqrestore(hba->host->host_lock, flags);
1407
1408 if (suspend)
1409 __ufshcd_suspend_clkscaling(hba);
1410}
1411
1412static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1413{
1414 unsigned long flags;
1415 bool resume = false;
1416
1417 if (!ufshcd_is_clkscaling_supported(hba))
1418 return;
1419
1420 spin_lock_irqsave(hba->host->host_lock, flags);
1421 if (hba->clk_scaling.is_suspended) {
1422 resume = true;
1423 hba->clk_scaling.is_suspended = false;
1424 }
1425 spin_unlock_irqrestore(hba->host->host_lock, flags);
1426
1427 if (resume)
1428 devfreq_resume_device(hba->devfreq);
1429}
1430
1431static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1432 struct device_attribute *attr, char *buf)
1433{
1434 struct ufs_hba *hba = dev_get_drvdata(dev);
1435
1436 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1437}
1438
1439static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1440 struct device_attribute *attr, const char *buf, size_t count)
1441{
1442 struct ufs_hba *hba = dev_get_drvdata(dev);
1443 u32 value;
1444 int err;
1445
1446 if (kstrtou32(buf, 0, &value))
1447 return -EINVAL;
1448
1449 value = !!value;
1450 if (value == hba->clk_scaling.is_allowed)
1451 goto out;
1452
1453 pm_runtime_get_sync(hba->dev);
1454 ufshcd_hold(hba, false);
1455
1456 cancel_work_sync(&hba->clk_scaling.suspend_work);
1457 cancel_work_sync(&hba->clk_scaling.resume_work);
1458
1459 hba->clk_scaling.is_allowed = value;
1460
1461 if (value) {
1462 ufshcd_resume_clkscaling(hba);
1463 } else {
1464 ufshcd_suspend_clkscaling(hba);
1465 err = ufshcd_devfreq_scale(hba, true);
1466 if (err)
1467 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1468 __func__, err);
1469 }
1470
1471 ufshcd_release(hba);
1472 pm_runtime_put_sync(hba->dev);
1473out:
1474 return count;
1475}
1476
1477static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1478{
1479 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1480 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1481 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1482 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1483 hba->clk_scaling.enable_attr.attr.mode = 0644;
1484 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1485 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1486}
1487
1488static void ufshcd_ungate_work(struct work_struct *work)
1489{
1490 int ret;
1491 unsigned long flags;
1492 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1493 clk_gating.ungate_work);
1494
1495 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1496
1497 spin_lock_irqsave(hba->host->host_lock, flags);
1498 if (hba->clk_gating.state == CLKS_ON) {
1499 spin_unlock_irqrestore(hba->host->host_lock, flags);
1500 goto unblock_reqs;
1501 }
1502
1503 spin_unlock_irqrestore(hba->host->host_lock, flags);
1504 ufshcd_setup_clocks(hba, true);
1505
1506
1507 if (ufshcd_can_hibern8_during_gating(hba)) {
1508
1509 hba->clk_gating.is_suspended = true;
1510 if (ufshcd_is_link_hibern8(hba)) {
1511 ret = ufshcd_uic_hibern8_exit(hba);
1512 if (ret)
1513 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1514 __func__, ret);
1515 else
1516 ufshcd_set_link_active(hba);
1517 }
1518 hba->clk_gating.is_suspended = false;
1519 }
1520unblock_reqs:
1521 ufshcd_scsi_unblock_requests(hba);
1522}
1523
1524
1525
1526
1527
1528
1529
1530int ufshcd_hold(struct ufs_hba *hba, bool async)
1531{
1532 int rc = 0;
1533 unsigned long flags;
1534
1535 if (!ufshcd_is_clkgating_allowed(hba))
1536 goto out;
1537 spin_lock_irqsave(hba->host->host_lock, flags);
1538 hba->clk_gating.active_reqs++;
1539
1540 if (ufshcd_eh_in_progress(hba)) {
1541 spin_unlock_irqrestore(hba->host->host_lock, flags);
1542 return 0;
1543 }
1544
1545start:
1546 switch (hba->clk_gating.state) {
1547 case CLKS_ON:
1548
1549
1550
1551
1552
1553
1554
1555
1556 if (ufshcd_can_hibern8_during_gating(hba) &&
1557 ufshcd_is_link_hibern8(hba)) {
1558 spin_unlock_irqrestore(hba->host->host_lock, flags);
1559 flush_work(&hba->clk_gating.ungate_work);
1560 spin_lock_irqsave(hba->host->host_lock, flags);
1561 goto start;
1562 }
1563 break;
1564 case REQ_CLKS_OFF:
1565 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1566 hba->clk_gating.state = CLKS_ON;
1567 trace_ufshcd_clk_gating(dev_name(hba->dev),
1568 hba->clk_gating.state);
1569 break;
1570 }
1571
1572
1573
1574
1575
1576 case CLKS_OFF:
1577 ufshcd_scsi_block_requests(hba);
1578 hba->clk_gating.state = REQ_CLKS_ON;
1579 trace_ufshcd_clk_gating(dev_name(hba->dev),
1580 hba->clk_gating.state);
1581 queue_work(hba->clk_gating.clk_gating_workq,
1582 &hba->clk_gating.ungate_work);
1583
1584
1585
1586
1587 case REQ_CLKS_ON:
1588 if (async) {
1589 rc = -EAGAIN;
1590 hba->clk_gating.active_reqs--;
1591 break;
1592 }
1593
1594 spin_unlock_irqrestore(hba->host->host_lock, flags);
1595 flush_work(&hba->clk_gating.ungate_work);
1596
1597 spin_lock_irqsave(hba->host->host_lock, flags);
1598 goto start;
1599 default:
1600 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1601 __func__, hba->clk_gating.state);
1602 break;
1603 }
1604 spin_unlock_irqrestore(hba->host->host_lock, flags);
1605out:
1606 return rc;
1607}
1608EXPORT_SYMBOL_GPL(ufshcd_hold);
1609
1610static void ufshcd_gate_work(struct work_struct *work)
1611{
1612 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1613 clk_gating.gate_work.work);
1614 unsigned long flags;
1615
1616 spin_lock_irqsave(hba->host->host_lock, flags);
1617
1618
1619
1620
1621
1622
1623 if (hba->clk_gating.is_suspended ||
1624 (hba->clk_gating.state == REQ_CLKS_ON)) {
1625 hba->clk_gating.state = CLKS_ON;
1626 trace_ufshcd_clk_gating(dev_name(hba->dev),
1627 hba->clk_gating.state);
1628 goto rel_lock;
1629 }
1630
1631 if (hba->clk_gating.active_reqs
1632 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1633 || hba->lrb_in_use || hba->outstanding_tasks
1634 || hba->active_uic_cmd || hba->uic_async_done)
1635 goto rel_lock;
1636
1637 spin_unlock_irqrestore(hba->host->host_lock, flags);
1638
1639
1640 if (ufshcd_can_hibern8_during_gating(hba)) {
1641 if (ufshcd_uic_hibern8_enter(hba)) {
1642 hba->clk_gating.state = CLKS_ON;
1643 trace_ufshcd_clk_gating(dev_name(hba->dev),
1644 hba->clk_gating.state);
1645 goto out;
1646 }
1647 ufshcd_set_link_hibern8(hba);
1648 }
1649
1650 if (!ufshcd_is_link_active(hba))
1651 ufshcd_setup_clocks(hba, false);
1652 else
1653
1654 __ufshcd_setup_clocks(hba, false, true);
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 spin_lock_irqsave(hba->host->host_lock, flags);
1666 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1667 hba->clk_gating.state = CLKS_OFF;
1668 trace_ufshcd_clk_gating(dev_name(hba->dev),
1669 hba->clk_gating.state);
1670 }
1671rel_lock:
1672 spin_unlock_irqrestore(hba->host->host_lock, flags);
1673out:
1674 return;
1675}
1676
1677
1678static void __ufshcd_release(struct ufs_hba *hba)
1679{
1680 if (!ufshcd_is_clkgating_allowed(hba))
1681 return;
1682
1683 hba->clk_gating.active_reqs--;
1684
1685 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1686 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1687 || hba->lrb_in_use || hba->outstanding_tasks
1688 || hba->active_uic_cmd || hba->uic_async_done
1689 || ufshcd_eh_in_progress(hba))
1690 return;
1691
1692 hba->clk_gating.state = REQ_CLKS_OFF;
1693 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1694 schedule_delayed_work(&hba->clk_gating.gate_work,
1695 msecs_to_jiffies(hba->clk_gating.delay_ms));
1696}
1697
1698void ufshcd_release(struct ufs_hba *hba)
1699{
1700 unsigned long flags;
1701
1702 spin_lock_irqsave(hba->host->host_lock, flags);
1703 __ufshcd_release(hba);
1704 spin_unlock_irqrestore(hba->host->host_lock, flags);
1705}
1706EXPORT_SYMBOL_GPL(ufshcd_release);
1707
1708static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1709 struct device_attribute *attr, char *buf)
1710{
1711 struct ufs_hba *hba = dev_get_drvdata(dev);
1712
1713 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1714}
1715
1716static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1717 struct device_attribute *attr, const char *buf, size_t count)
1718{
1719 struct ufs_hba *hba = dev_get_drvdata(dev);
1720 unsigned long flags, value;
1721
1722 if (kstrtoul(buf, 0, &value))
1723 return -EINVAL;
1724
1725 spin_lock_irqsave(hba->host->host_lock, flags);
1726 hba->clk_gating.delay_ms = value;
1727 spin_unlock_irqrestore(hba->host->host_lock, flags);
1728 return count;
1729}
1730
1731static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1732 struct device_attribute *attr, char *buf)
1733{
1734 struct ufs_hba *hba = dev_get_drvdata(dev);
1735
1736 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1737}
1738
1739static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1740 struct device_attribute *attr, const char *buf, size_t count)
1741{
1742 struct ufs_hba *hba = dev_get_drvdata(dev);
1743 unsigned long flags;
1744 u32 value;
1745
1746 if (kstrtou32(buf, 0, &value))
1747 return -EINVAL;
1748
1749 value = !!value;
1750 if (value == hba->clk_gating.is_enabled)
1751 goto out;
1752
1753 if (value) {
1754 ufshcd_release(hba);
1755 } else {
1756 spin_lock_irqsave(hba->host->host_lock, flags);
1757 hba->clk_gating.active_reqs++;
1758 spin_unlock_irqrestore(hba->host->host_lock, flags);
1759 }
1760
1761 hba->clk_gating.is_enabled = value;
1762out:
1763 return count;
1764}
1765
1766static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1767{
1768 char wq_name[sizeof("ufs_clk_gating_00")];
1769
1770 if (!ufshcd_is_clkgating_allowed(hba))
1771 return;
1772
1773 hba->clk_gating.delay_ms = 150;
1774 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1775 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1776
1777 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1778 hba->host->host_no);
1779 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1780 WQ_MEM_RECLAIM);
1781
1782 hba->clk_gating.is_enabled = true;
1783
1784 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1785 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1786 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1787 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1788 hba->clk_gating.delay_attr.attr.mode = 0644;
1789 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1790 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1791
1792 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1793 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1794 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1795 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1796 hba->clk_gating.enable_attr.attr.mode = 0644;
1797 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1798 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1799}
1800
1801static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1802{
1803 if (!ufshcd_is_clkgating_allowed(hba))
1804 return;
1805 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1806 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1807 cancel_work_sync(&hba->clk_gating.ungate_work);
1808 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1809 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1810}
1811
1812
1813static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1814{
1815 bool queue_resume_work = false;
1816
1817 if (!ufshcd_is_clkscaling_supported(hba))
1818 return;
1819
1820 if (!hba->clk_scaling.active_reqs++)
1821 queue_resume_work = true;
1822
1823 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1824 return;
1825
1826 if (queue_resume_work)
1827 queue_work(hba->clk_scaling.workq,
1828 &hba->clk_scaling.resume_work);
1829
1830 if (!hba->clk_scaling.window_start_t) {
1831 hba->clk_scaling.window_start_t = jiffies;
1832 hba->clk_scaling.tot_busy_t = 0;
1833 hba->clk_scaling.is_busy_started = false;
1834 }
1835
1836 if (!hba->clk_scaling.is_busy_started) {
1837 hba->clk_scaling.busy_start_t = ktime_get();
1838 hba->clk_scaling.is_busy_started = true;
1839 }
1840}
1841
1842static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1843{
1844 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1845
1846 if (!ufshcd_is_clkscaling_supported(hba))
1847 return;
1848
1849 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1850 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1851 scaling->busy_start_t));
1852 scaling->busy_start_t = 0;
1853 scaling->is_busy_started = false;
1854 }
1855}
1856
1857
1858
1859
1860
1861static inline
1862void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1863{
1864 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1865 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1866 ufshcd_clk_scaling_start_busy(hba);
1867 __set_bit(task_tag, &hba->outstanding_reqs);
1868 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1869
1870 wmb();
1871 ufshcd_add_command_trace(hba, task_tag, "send");
1872}
1873
1874
1875
1876
1877
1878static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1879{
1880 int len;
1881 if (lrbp->sense_buffer &&
1882 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1883 int len_to_copy;
1884
1885 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1886 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1887
1888 memcpy(lrbp->sense_buffer,
1889 lrbp->ucd_rsp_ptr->sr.sense_data,
1890 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1891 }
1892}
1893
1894
1895
1896
1897
1898
1899
1900static
1901int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1902{
1903 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1904
1905 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1906
1907
1908 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1909 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1910 GENERAL_UPIU_REQUEST_SIZE;
1911 u16 resp_len;
1912 u16 buf_len;
1913
1914
1915 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1916 MASK_QUERY_DATA_SEG_LEN;
1917 buf_len = be16_to_cpu(
1918 hba->dev_cmd.query.request.upiu_req.length);
1919 if (likely(buf_len >= resp_len)) {
1920 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1921 } else {
1922 dev_warn(hba->dev,
1923 "%s: Response size is bigger than buffer",
1924 __func__);
1925 return -EINVAL;
1926 }
1927 }
1928
1929 return 0;
1930}
1931
1932
1933
1934
1935
1936static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1937{
1938 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1939
1940
1941 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1942 hba->nutmrs =
1943 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1944}
1945
1946
1947
1948
1949
1950
1951
1952static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1953{
1954 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1955 return true;
1956 else
1957 return false;
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1968{
1969 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979static inline void
1980ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1981{
1982 WARN_ON(hba->active_uic_cmd);
1983
1984 hba->active_uic_cmd = uic_cmd;
1985
1986
1987 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1988 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1989 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1990
1991
1992 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1993 REG_UIC_COMMAND);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004static int
2005ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2006{
2007 int ret;
2008 unsigned long flags;
2009
2010 if (wait_for_completion_timeout(&uic_cmd->done,
2011 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2012 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2013 else
2014 ret = -ETIMEDOUT;
2015
2016 spin_lock_irqsave(hba->host->host_lock, flags);
2017 hba->active_uic_cmd = NULL;
2018 spin_unlock_irqrestore(hba->host->host_lock, flags);
2019
2020 return ret;
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static int
2034__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2035 bool completion)
2036{
2037 if (!ufshcd_ready_for_uic_cmd(hba)) {
2038 dev_err(hba->dev,
2039 "Controller not ready to accept UIC commands\n");
2040 return -EIO;
2041 }
2042
2043 if (completion)
2044 init_completion(&uic_cmd->done);
2045
2046 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2047
2048 return 0;
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058static int
2059ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2060{
2061 int ret;
2062 unsigned long flags;
2063
2064 ufshcd_hold(hba, false);
2065 mutex_lock(&hba->uic_cmd_mutex);
2066 ufshcd_add_delay_before_dme_cmd(hba);
2067
2068 spin_lock_irqsave(hba->host->host_lock, flags);
2069 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2070 spin_unlock_irqrestore(hba->host->host_lock, flags);
2071 if (!ret)
2072 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2073
2074 mutex_unlock(&hba->uic_cmd_mutex);
2075
2076 ufshcd_release(hba);
2077 return ret;
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2088{
2089 struct ufshcd_sg_entry *prd_table;
2090 struct scatterlist *sg;
2091 struct scsi_cmnd *cmd;
2092 int sg_segments;
2093 int i;
2094
2095 cmd = lrbp->cmd;
2096 sg_segments = scsi_dma_map(cmd);
2097 if (sg_segments < 0)
2098 return sg_segments;
2099
2100 if (sg_segments) {
2101 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2102 lrbp->utr_descriptor_ptr->prd_table_length =
2103 cpu_to_le16((u16)(sg_segments *
2104 sizeof(struct ufshcd_sg_entry)));
2105 else
2106 lrbp->utr_descriptor_ptr->prd_table_length =
2107 cpu_to_le16((u16) (sg_segments));
2108
2109 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2110
2111 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2112 prd_table[i].size =
2113 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2114 prd_table[i].base_addr =
2115 cpu_to_le32(lower_32_bits(sg->dma_address));
2116 prd_table[i].upper_addr =
2117 cpu_to_le32(upper_32_bits(sg->dma_address));
2118 prd_table[i].reserved = 0;
2119 }
2120 } else {
2121 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2122 }
2123
2124 return 0;
2125}
2126
2127
2128
2129
2130
2131
2132static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2133{
2134 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2135
2136 if (hba->ufs_version == UFSHCI_VERSION_10) {
2137 u32 rw;
2138 rw = set & INTERRUPT_MASK_RW_VER_10;
2139 set = rw | ((set ^ intrs) & intrs);
2140 } else {
2141 set |= intrs;
2142 }
2143
2144 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2145}
2146
2147
2148
2149
2150
2151
2152static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2153{
2154 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2155
2156 if (hba->ufs_version == UFSHCI_VERSION_10) {
2157 u32 rw;
2158 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2159 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2160 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2161
2162 } else {
2163 set &= ~intrs;
2164 }
2165
2166 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2167}
2168
2169
2170
2171
2172
2173
2174
2175
2176static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2177 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2178{
2179 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2180 u32 data_direction;
2181 u32 dword_0;
2182
2183 if (cmd_dir == DMA_FROM_DEVICE) {
2184 data_direction = UTP_DEVICE_TO_HOST;
2185 *upiu_flags = UPIU_CMD_FLAGS_READ;
2186 } else if (cmd_dir == DMA_TO_DEVICE) {
2187 data_direction = UTP_HOST_TO_DEVICE;
2188 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2189 } else {
2190 data_direction = UTP_NO_DATA_TRANSFER;
2191 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2192 }
2193
2194 dword_0 = data_direction | (lrbp->command_type
2195 << UPIU_COMMAND_TYPE_OFFSET);
2196 if (lrbp->intr_cmd)
2197 dword_0 |= UTP_REQ_DESC_INT_CMD;
2198
2199
2200 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2201
2202 req_desc->header.dword_1 = 0;
2203
2204
2205
2206
2207
2208 req_desc->header.dword_2 =
2209 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2210
2211 req_desc->header.dword_3 = 0;
2212
2213 req_desc->prd_table_length = 0;
2214}
2215
2216
2217
2218
2219
2220
2221
2222static
2223void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2224{
2225 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2226 unsigned short cdb_len;
2227
2228
2229 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2230 UPIU_TRANSACTION_COMMAND, upiu_flags,
2231 lrbp->lun, lrbp->task_tag);
2232 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2233 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2234
2235
2236 ucd_req_ptr->header.dword_2 = 0;
2237
2238 ucd_req_ptr->sc.exp_data_transfer_len =
2239 cpu_to_be32(lrbp->cmd->sdb.length);
2240
2241 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2242 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2243 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2244
2245 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2256 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2257{
2258 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2259 struct ufs_query *query = &hba->dev_cmd.query;
2260 u16 len = be16_to_cpu(query->request.upiu_req.length);
2261 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2262
2263
2264 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2265 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2266 lrbp->lun, lrbp->task_tag);
2267 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2268 0, query->request.query_func, 0, 0);
2269
2270
2271 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2272 ucd_req_ptr->header.dword_2 =
2273 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2274 else
2275 ucd_req_ptr->header.dword_2 = 0;
2276
2277
2278 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2279 QUERY_OSF_SIZE);
2280
2281
2282 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2283 memcpy(descp, query->descriptor, len);
2284
2285 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2286}
2287
2288static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2289{
2290 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2291
2292 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2293
2294
2295 ucd_req_ptr->header.dword_0 =
2296 UPIU_HEADER_DWORD(
2297 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2298
2299 ucd_req_ptr->header.dword_1 = 0;
2300 ucd_req_ptr->header.dword_2 = 0;
2301
2302 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2303}
2304
2305
2306
2307
2308
2309
2310
2311static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2312{
2313 u32 upiu_flags;
2314 int ret = 0;
2315
2316 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2317 (hba->ufs_version == UFSHCI_VERSION_11))
2318 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2319 else
2320 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2321
2322 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2323 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2324 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2325 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2326 ufshcd_prepare_utp_nop_upiu(lrbp);
2327 else
2328 ret = -EINVAL;
2329
2330 return ret;
2331}
2332
2333
2334
2335
2336
2337
2338
2339static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2340{
2341 u32 upiu_flags;
2342 int ret = 0;
2343
2344 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2345 (hba->ufs_version == UFSHCI_VERSION_11))
2346 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2347 else
2348 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2349
2350 if (likely(lrbp->cmd)) {
2351 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2352 lrbp->cmd->sc_data_direction);
2353 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2354 } else {
2355 ret = -EINVAL;
2356 }
2357
2358 return ret;
2359}
2360
2361
2362
2363
2364
2365
2366
2367static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2368{
2369 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2370}
2371
2372
2373
2374
2375
2376
2377
2378
2379static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2380{
2381 struct ufshcd_lrb *lrbp;
2382 struct ufs_hba *hba;
2383 unsigned long flags;
2384 int tag;
2385 int err = 0;
2386
2387 hba = shost_priv(host);
2388
2389 tag = cmd->request->tag;
2390 if (!ufshcd_valid_tag(hba, tag)) {
2391 dev_err(hba->dev,
2392 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2393 __func__, tag, cmd, cmd->request);
2394 BUG();
2395 }
2396
2397 if (!down_read_trylock(&hba->clk_scaling_lock))
2398 return SCSI_MLQUEUE_HOST_BUSY;
2399
2400 spin_lock_irqsave(hba->host->host_lock, flags);
2401 switch (hba->ufshcd_state) {
2402 case UFSHCD_STATE_OPERATIONAL:
2403 break;
2404 case UFSHCD_STATE_EH_SCHEDULED:
2405 case UFSHCD_STATE_RESET:
2406 err = SCSI_MLQUEUE_HOST_BUSY;
2407 goto out_unlock;
2408 case UFSHCD_STATE_ERROR:
2409 set_host_byte(cmd, DID_ERROR);
2410 cmd->scsi_done(cmd);
2411 goto out_unlock;
2412 default:
2413 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2414 __func__, hba->ufshcd_state);
2415 set_host_byte(cmd, DID_BAD_TARGET);
2416 cmd->scsi_done(cmd);
2417 goto out_unlock;
2418 }
2419
2420
2421 if (ufshcd_eh_in_progress(hba)) {
2422 set_host_byte(cmd, DID_ERROR);
2423 cmd->scsi_done(cmd);
2424 goto out_unlock;
2425 }
2426 spin_unlock_irqrestore(hba->host->host_lock, flags);
2427
2428 hba->req_abort_count = 0;
2429
2430
2431 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2432
2433
2434
2435
2436
2437
2438 err = SCSI_MLQUEUE_HOST_BUSY;
2439 goto out;
2440 }
2441
2442 err = ufshcd_hold(hba, true);
2443 if (err) {
2444 err = SCSI_MLQUEUE_HOST_BUSY;
2445 clear_bit_unlock(tag, &hba->lrb_in_use);
2446 goto out;
2447 }
2448 WARN_ON(hba->clk_gating.state != CLKS_ON);
2449
2450 lrbp = &hba->lrb[tag];
2451
2452 WARN_ON(lrbp->cmd);
2453 lrbp->cmd = cmd;
2454 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2455 lrbp->sense_buffer = cmd->sense_buffer;
2456 lrbp->task_tag = tag;
2457 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2458 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2459 lrbp->req_abort_skip = false;
2460
2461 ufshcd_comp_scsi_upiu(hba, lrbp);
2462
2463 err = ufshcd_map_sg(hba, lrbp);
2464 if (err) {
2465 lrbp->cmd = NULL;
2466 clear_bit_unlock(tag, &hba->lrb_in_use);
2467 goto out;
2468 }
2469
2470 wmb();
2471
2472
2473 spin_lock_irqsave(hba->host->host_lock, flags);
2474 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2475 ufshcd_send_command(hba, tag);
2476out_unlock:
2477 spin_unlock_irqrestore(hba->host->host_lock, flags);
2478out:
2479 up_read(&hba->clk_scaling_lock);
2480 return err;
2481}
2482
2483static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2484 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2485{
2486 lrbp->cmd = NULL;
2487 lrbp->sense_bufflen = 0;
2488 lrbp->sense_buffer = NULL;
2489 lrbp->task_tag = tag;
2490 lrbp->lun = 0;
2491 lrbp->intr_cmd = true;
2492 hba->dev_cmd.type = cmd_type;
2493
2494 return ufshcd_comp_devman_upiu(hba, lrbp);
2495}
2496
2497static int
2498ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2499{
2500 int err = 0;
2501 unsigned long flags;
2502 u32 mask = 1 << tag;
2503
2504
2505 spin_lock_irqsave(hba->host->host_lock, flags);
2506 ufshcd_utrl_clear(hba, tag);
2507 spin_unlock_irqrestore(hba->host->host_lock, flags);
2508
2509
2510
2511
2512
2513 err = ufshcd_wait_for_register(hba,
2514 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2515 mask, ~mask, 1000, 1000, true);
2516
2517 return err;
2518}
2519
2520static int
2521ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2522{
2523 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2524
2525
2526 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2527 UPIU_RSP_CODE_OFFSET;
2528 return query_res->response;
2529}
2530
2531
2532
2533
2534
2535
2536static int
2537ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2538{
2539 int resp;
2540 int err = 0;
2541
2542 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2543 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2544
2545 switch (resp) {
2546 case UPIU_TRANSACTION_NOP_IN:
2547 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2548 err = -EINVAL;
2549 dev_err(hba->dev, "%s: unexpected response %x\n",
2550 __func__, resp);
2551 }
2552 break;
2553 case UPIU_TRANSACTION_QUERY_RSP:
2554 err = ufshcd_check_query_response(hba, lrbp);
2555 if (!err)
2556 err = ufshcd_copy_query_response(hba, lrbp);
2557 break;
2558 case UPIU_TRANSACTION_REJECT_UPIU:
2559
2560 err = -EPERM;
2561 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2562 __func__);
2563 break;
2564 default:
2565 err = -EINVAL;
2566 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2567 __func__, resp);
2568 break;
2569 }
2570
2571 return err;
2572}
2573
2574static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2575 struct ufshcd_lrb *lrbp, int max_timeout)
2576{
2577 int err = 0;
2578 unsigned long time_left;
2579 unsigned long flags;
2580
2581 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2582 msecs_to_jiffies(max_timeout));
2583
2584
2585 wmb();
2586 spin_lock_irqsave(hba->host->host_lock, flags);
2587 hba->dev_cmd.complete = NULL;
2588 if (likely(time_left)) {
2589 err = ufshcd_get_tr_ocs(lrbp);
2590 if (!err)
2591 err = ufshcd_dev_cmd_completion(hba, lrbp);
2592 }
2593 spin_unlock_irqrestore(hba->host->host_lock, flags);
2594
2595 if (!time_left) {
2596 err = -ETIMEDOUT;
2597 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2598 __func__, lrbp->task_tag);
2599 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2600
2601 err = -EAGAIN;
2602
2603
2604
2605
2606
2607 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2608 }
2609
2610 return err;
2611}
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2625{
2626 int tag;
2627 bool ret = false;
2628 unsigned long tmp;
2629
2630 if (!tag_out)
2631 goto out;
2632
2633 do {
2634 tmp = ~hba->lrb_in_use;
2635 tag = find_last_bit(&tmp, hba->nutrs);
2636 if (tag >= hba->nutrs)
2637 goto out;
2638 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2639
2640 *tag_out = tag;
2641 ret = true;
2642out:
2643 return ret;
2644}
2645
2646static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2647{
2648 clear_bit_unlock(tag, &hba->lrb_in_use);
2649}
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2661 enum dev_cmd_type cmd_type, int timeout)
2662{
2663 struct ufshcd_lrb *lrbp;
2664 int err;
2665 int tag;
2666 struct completion wait;
2667 unsigned long flags;
2668
2669 down_read(&hba->clk_scaling_lock);
2670
2671
2672
2673
2674
2675
2676 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2677
2678 init_completion(&wait);
2679 lrbp = &hba->lrb[tag];
2680 WARN_ON(lrbp->cmd);
2681 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2682 if (unlikely(err))
2683 goto out_put_tag;
2684
2685 hba->dev_cmd.complete = &wait;
2686
2687 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2688
2689 wmb();
2690 spin_lock_irqsave(hba->host->host_lock, flags);
2691 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2692 ufshcd_send_command(hba, tag);
2693 spin_unlock_irqrestore(hba->host->host_lock, flags);
2694
2695 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2696
2697 ufshcd_add_query_upiu_trace(hba, tag,
2698 err ? "query_complete_err" : "query_complete");
2699
2700out_put_tag:
2701 ufshcd_put_dev_cmd_tag(hba, tag);
2702 wake_up(&hba->dev_cmd.tag_wq);
2703 up_read(&hba->clk_scaling_lock);
2704 return err;
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717static inline void ufshcd_init_query(struct ufs_hba *hba,
2718 struct ufs_query_req **request, struct ufs_query_res **response,
2719 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2720{
2721 *request = &hba->dev_cmd.query.request;
2722 *response = &hba->dev_cmd.query.response;
2723 memset(*request, 0, sizeof(struct ufs_query_req));
2724 memset(*response, 0, sizeof(struct ufs_query_res));
2725 (*request)->upiu_req.opcode = opcode;
2726 (*request)->upiu_req.idn = idn;
2727 (*request)->upiu_req.index = index;
2728 (*request)->upiu_req.selector = selector;
2729}
2730
2731static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2732 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2733{
2734 int ret;
2735 int retries;
2736
2737 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2738 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2739 if (ret)
2740 dev_dbg(hba->dev,
2741 "%s: failed with error %d, retries %d\n",
2742 __func__, ret, retries);
2743 else
2744 break;
2745 }
2746
2747 if (ret)
2748 dev_err(hba->dev,
2749 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2750 __func__, opcode, idn, ret, retries);
2751 return ret;
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2764 enum flag_idn idn, bool *flag_res)
2765{
2766 struct ufs_query_req *request = NULL;
2767 struct ufs_query_res *response = NULL;
2768 int err, index = 0, selector = 0;
2769 int timeout = QUERY_REQ_TIMEOUT;
2770
2771 BUG_ON(!hba);
2772
2773 ufshcd_hold(hba, false);
2774 mutex_lock(&hba->dev_cmd.lock);
2775 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2776 selector);
2777
2778 switch (opcode) {
2779 case UPIU_QUERY_OPCODE_SET_FLAG:
2780 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2781 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2782 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2783 break;
2784 case UPIU_QUERY_OPCODE_READ_FLAG:
2785 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2786 if (!flag_res) {
2787
2788 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2789 __func__);
2790 err = -EINVAL;
2791 goto out_unlock;
2792 }
2793 break;
2794 default:
2795 dev_err(hba->dev,
2796 "%s: Expected query flag opcode but got = %d\n",
2797 __func__, opcode);
2798 err = -EINVAL;
2799 goto out_unlock;
2800 }
2801
2802 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2803
2804 if (err) {
2805 dev_err(hba->dev,
2806 "%s: Sending flag query for idn %d failed, err = %d\n",
2807 __func__, idn, err);
2808 goto out_unlock;
2809 }
2810
2811 if (flag_res)
2812 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2813 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2814
2815out_unlock:
2816 mutex_unlock(&hba->dev_cmd.lock);
2817 ufshcd_release(hba);
2818 return err;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2833 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2834{
2835 struct ufs_query_req *request = NULL;
2836 struct ufs_query_res *response = NULL;
2837 int err;
2838
2839 BUG_ON(!hba);
2840
2841 ufshcd_hold(hba, false);
2842 if (!attr_val) {
2843 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2844 __func__, opcode);
2845 err = -EINVAL;
2846 goto out;
2847 }
2848
2849 mutex_lock(&hba->dev_cmd.lock);
2850 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2851 selector);
2852
2853 switch (opcode) {
2854 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2855 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2856 request->upiu_req.value = cpu_to_be32(*attr_val);
2857 break;
2858 case UPIU_QUERY_OPCODE_READ_ATTR:
2859 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2860 break;
2861 default:
2862 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2863 __func__, opcode);
2864 err = -EINVAL;
2865 goto out_unlock;
2866 }
2867
2868 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2869
2870 if (err) {
2871 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2872 __func__, opcode, idn, index, err);
2873 goto out_unlock;
2874 }
2875
2876 *attr_val = be32_to_cpu(response->upiu_res.value);
2877
2878out_unlock:
2879 mutex_unlock(&hba->dev_cmd.lock);
2880out:
2881 ufshcd_release(hba);
2882 return err;
2883}
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2899 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2900 u32 *attr_val)
2901{
2902 int ret = 0;
2903 u32 retries;
2904
2905 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2906 ret = ufshcd_query_attr(hba, opcode, idn, index,
2907 selector, attr_val);
2908 if (ret)
2909 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2910 __func__, ret, retries);
2911 else
2912 break;
2913 }
2914
2915 if (ret)
2916 dev_err(hba->dev,
2917 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2918 __func__, idn, ret, QUERY_REQ_RETRIES);
2919 return ret;
2920}
2921
2922static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2923 enum query_opcode opcode, enum desc_idn idn, u8 index,
2924 u8 selector, u8 *desc_buf, int *buf_len)
2925{
2926 struct ufs_query_req *request = NULL;
2927 struct ufs_query_res *response = NULL;
2928 int err;
2929
2930 BUG_ON(!hba);
2931
2932 ufshcd_hold(hba, false);
2933 if (!desc_buf) {
2934 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2935 __func__, opcode);
2936 err = -EINVAL;
2937 goto out;
2938 }
2939
2940 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2941 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2942 __func__, *buf_len);
2943 err = -EINVAL;
2944 goto out;
2945 }
2946
2947 mutex_lock(&hba->dev_cmd.lock);
2948 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2949 selector);
2950 hba->dev_cmd.query.descriptor = desc_buf;
2951 request->upiu_req.length = cpu_to_be16(*buf_len);
2952
2953 switch (opcode) {
2954 case UPIU_QUERY_OPCODE_WRITE_DESC:
2955 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2956 break;
2957 case UPIU_QUERY_OPCODE_READ_DESC:
2958 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2959 break;
2960 default:
2961 dev_err(hba->dev,
2962 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2963 __func__, opcode);
2964 err = -EINVAL;
2965 goto out_unlock;
2966 }
2967
2968 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2969
2970 if (err) {
2971 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2972 __func__, opcode, idn, index, err);
2973 goto out_unlock;
2974 }
2975
2976 hba->dev_cmd.query.descriptor = NULL;
2977 *buf_len = be16_to_cpu(response->upiu_res.length);
2978
2979out_unlock:
2980 mutex_unlock(&hba->dev_cmd.lock);
2981out:
2982 ufshcd_release(hba);
2983 return err;
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3001 enum query_opcode opcode,
3002 enum desc_idn idn, u8 index,
3003 u8 selector,
3004 u8 *desc_buf, int *buf_len)
3005{
3006 int err;
3007 int retries;
3008
3009 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3010 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3011 selector, desc_buf, buf_len);
3012 if (!err || err == -EINVAL)
3013 break;
3014 }
3015
3016 return err;
3017}
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028static int ufshcd_read_desc_length(struct ufs_hba *hba,
3029 enum desc_idn desc_id,
3030 int desc_index,
3031 int *desc_length)
3032{
3033 int ret;
3034 u8 header[QUERY_DESC_HDR_SIZE];
3035 int header_len = QUERY_DESC_HDR_SIZE;
3036
3037 if (desc_id >= QUERY_DESC_IDN_MAX)
3038 return -EINVAL;
3039
3040 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3041 desc_id, desc_index, 0, header,
3042 &header_len);
3043
3044 if (ret) {
3045 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3046 __func__, desc_id);
3047 return ret;
3048 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3049 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3050 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3051 desc_id);
3052 ret = -EINVAL;
3053 }
3054
3055 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3056 return ret;
3057
3058}
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3069 enum desc_idn desc_id, int *desc_len)
3070{
3071 switch (desc_id) {
3072 case QUERY_DESC_IDN_DEVICE:
3073 *desc_len = hba->desc_size.dev_desc;
3074 break;
3075 case QUERY_DESC_IDN_POWER:
3076 *desc_len = hba->desc_size.pwr_desc;
3077 break;
3078 case QUERY_DESC_IDN_GEOMETRY:
3079 *desc_len = hba->desc_size.geom_desc;
3080 break;
3081 case QUERY_DESC_IDN_CONFIGURATION:
3082 *desc_len = hba->desc_size.conf_desc;
3083 break;
3084 case QUERY_DESC_IDN_UNIT:
3085 *desc_len = hba->desc_size.unit_desc;
3086 break;
3087 case QUERY_DESC_IDN_INTERCONNECT:
3088 *desc_len = hba->desc_size.interc_desc;
3089 break;
3090 case QUERY_DESC_IDN_STRING:
3091 *desc_len = QUERY_DESC_MAX_SIZE;
3092 break;
3093 case QUERY_DESC_IDN_HEALTH:
3094 *desc_len = hba->desc_size.hlth_desc;
3095 break;
3096 case QUERY_DESC_IDN_RFU_0:
3097 case QUERY_DESC_IDN_RFU_1:
3098 *desc_len = 0;
3099 break;
3100 default:
3101 *desc_len = 0;
3102 return -EINVAL;
3103 }
3104 return 0;
3105}
3106EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119int ufshcd_read_desc_param(struct ufs_hba *hba,
3120 enum desc_idn desc_id,
3121 int desc_index,
3122 u8 param_offset,
3123 u8 *param_read_buf,
3124 u8 param_size)
3125{
3126 int ret;
3127 u8 *desc_buf;
3128 int buff_len;
3129 bool is_kmalloc = true;
3130
3131
3132 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3133 return -EINVAL;
3134
3135
3136
3137
3138 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3139
3140
3141 if (ret || !buff_len) {
3142 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3143 __func__);
3144 return ret;
3145 }
3146
3147
3148 if (param_offset != 0 || param_size < buff_len) {
3149 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3150 if (!desc_buf)
3151 return -ENOMEM;
3152 } else {
3153 desc_buf = param_read_buf;
3154 is_kmalloc = false;
3155 }
3156
3157
3158 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3159 desc_id, desc_index, 0,
3160 desc_buf, &buff_len);
3161
3162 if (ret) {
3163 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3164 __func__, desc_id, desc_index, param_offset, ret);
3165 goto out;
3166 }
3167
3168
3169 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3170 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3171 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3172 ret = -EINVAL;
3173 goto out;
3174 }
3175
3176
3177 if (is_kmalloc && param_size > buff_len)
3178 param_size = buff_len;
3179
3180 if (is_kmalloc)
3181 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3182out:
3183 if (is_kmalloc)
3184 kfree(desc_buf);
3185 return ret;
3186}
3187
3188static inline int ufshcd_read_desc(struct ufs_hba *hba,
3189 enum desc_idn desc_id,
3190 int desc_index,
3191 u8 *buf,
3192 u32 size)
3193{
3194 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3195}
3196
3197static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3198 u8 *buf,
3199 u32 size)
3200{
3201 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3202}
3203
3204static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3205{
3206 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3207}
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3220 u8 *buf, u32 size, bool ascii)
3221{
3222 int err = 0;
3223
3224 err = ufshcd_read_desc(hba,
3225 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3226
3227 if (err) {
3228 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3229 __func__, QUERY_REQ_RETRIES, err);
3230 goto out;
3231 }
3232
3233 if (ascii) {
3234 int desc_len;
3235 int ascii_len;
3236 int i;
3237 char *buff_ascii;
3238
3239 desc_len = buf[0];
3240
3241 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3242 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3243 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3244 __func__);
3245 err = -ENOMEM;
3246 goto out;
3247 }
3248
3249 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3250 if (!buff_ascii) {
3251 err = -ENOMEM;
3252 goto out;
3253 }
3254
3255
3256
3257
3258
3259 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3260 desc_len - QUERY_DESC_HDR_SIZE,
3261 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3262
3263
3264 for (i = 0; i < ascii_len; i++)
3265 ufshcd_remove_non_printable(&buff_ascii[i]);
3266
3267 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3268 size - QUERY_DESC_HDR_SIZE);
3269 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3270 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3271 kfree(buff_ascii);
3272 }
3273out:
3274 return err;
3275}
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3288 int lun,
3289 enum unit_desc_param param_offset,
3290 u8 *param_read_buf,
3291 u32 param_size)
3292{
3293
3294
3295
3296
3297 if (!ufs_is_valid_unit_desc_lun(lun))
3298 return -EOPNOTSUPP;
3299
3300 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3301 param_offset, param_read_buf, param_size);
3302}
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317static int ufshcd_memory_alloc(struct ufs_hba *hba)
3318{
3319 size_t utmrdl_size, utrdl_size, ucdl_size;
3320
3321
3322 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3323 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3324 ucdl_size,
3325 &hba->ucdl_dma_addr,
3326 GFP_KERNEL);
3327
3328
3329
3330
3331
3332
3333
3334 if (!hba->ucdl_base_addr ||
3335 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3336 dev_err(hba->dev,
3337 "Command Descriptor Memory allocation failed\n");
3338 goto out;
3339 }
3340
3341
3342
3343
3344
3345 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3346 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3347 utrdl_size,
3348 &hba->utrdl_dma_addr,
3349 GFP_KERNEL);
3350 if (!hba->utrdl_base_addr ||
3351 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3352 dev_err(hba->dev,
3353 "Transfer Descriptor Memory allocation failed\n");
3354 goto out;
3355 }
3356
3357
3358
3359
3360
3361 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3362 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3363 utmrdl_size,
3364 &hba->utmrdl_dma_addr,
3365 GFP_KERNEL);
3366 if (!hba->utmrdl_base_addr ||
3367 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3368 dev_err(hba->dev,
3369 "Task Management Descriptor Memory allocation failed\n");
3370 goto out;
3371 }
3372
3373
3374 hba->lrb = devm_kcalloc(hba->dev,
3375 hba->nutrs, sizeof(struct ufshcd_lrb),
3376 GFP_KERNEL);
3377 if (!hba->lrb) {
3378 dev_err(hba->dev, "LRB Memory allocation failed\n");
3379 goto out;
3380 }
3381 return 0;
3382out:
3383 return -ENOMEM;
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3400{
3401 struct utp_transfer_cmd_desc *cmd_descp;
3402 struct utp_transfer_req_desc *utrdlp;
3403 dma_addr_t cmd_desc_dma_addr;
3404 dma_addr_t cmd_desc_element_addr;
3405 u16 response_offset;
3406 u16 prdt_offset;
3407 int cmd_desc_size;
3408 int i;
3409
3410 utrdlp = hba->utrdl_base_addr;
3411 cmd_descp = hba->ucdl_base_addr;
3412
3413 response_offset =
3414 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3415 prdt_offset =
3416 offsetof(struct utp_transfer_cmd_desc, prd_table);
3417
3418 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3419 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3420
3421 for (i = 0; i < hba->nutrs; i++) {
3422
3423 cmd_desc_element_addr =
3424 (cmd_desc_dma_addr + (cmd_desc_size * i));
3425 utrdlp[i].command_desc_base_addr_lo =
3426 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3427 utrdlp[i].command_desc_base_addr_hi =
3428 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3429
3430
3431 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3432 utrdlp[i].response_upiu_offset =
3433 cpu_to_le16(response_offset);
3434 utrdlp[i].prd_table_offset =
3435 cpu_to_le16(prdt_offset);
3436 utrdlp[i].response_upiu_length =
3437 cpu_to_le16(ALIGNED_UPIU_SIZE);
3438 } else {
3439 utrdlp[i].response_upiu_offset =
3440 cpu_to_le16((response_offset >> 2));
3441 utrdlp[i].prd_table_offset =
3442 cpu_to_le16((prdt_offset >> 2));
3443 utrdlp[i].response_upiu_length =
3444 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3445 }
3446
3447 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3448 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3449 (i * sizeof(struct utp_transfer_req_desc));
3450 hba->lrb[i].ucd_req_ptr =
3451 (struct utp_upiu_req *)(cmd_descp + i);
3452 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3453 hba->lrb[i].ucd_rsp_ptr =
3454 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3455 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3456 response_offset;
3457 hba->lrb[i].ucd_prdt_ptr =
3458 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3459 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3460 prdt_offset;
3461 }
3462}
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3476{
3477 struct uic_command uic_cmd = {0};
3478 int ret;
3479
3480 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3481
3482 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3483 if (ret)
3484 dev_dbg(hba->dev,
3485 "dme-link-startup: error code %d\n", ret);
3486 return ret;
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497static int ufshcd_dme_reset(struct ufs_hba *hba)
3498{
3499 struct uic_command uic_cmd = {0};
3500 int ret;
3501
3502 uic_cmd.command = UIC_CMD_DME_RESET;
3503
3504 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3505 if (ret)
3506 dev_err(hba->dev,
3507 "dme-reset: error code %d\n", ret);
3508
3509 return ret;
3510}
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520static int ufshcd_dme_enable(struct ufs_hba *hba)
3521{
3522 struct uic_command uic_cmd = {0};
3523 int ret;
3524
3525 uic_cmd.command = UIC_CMD_DME_ENABLE;
3526
3527 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3528 if (ret)
3529 dev_err(hba->dev,
3530 "dme-reset: error code %d\n", ret);
3531
3532 return ret;
3533}
3534
3535static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3536{
3537 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3538 unsigned long min_sleep_time_us;
3539
3540 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3541 return;
3542
3543
3544
3545
3546
3547 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3548 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3549 } else {
3550 unsigned long delta =
3551 (unsigned long) ktime_to_us(
3552 ktime_sub(ktime_get(),
3553 hba->last_dme_cmd_tstamp));
3554
3555 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3556 min_sleep_time_us =
3557 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3558 else
3559 return;
3560 }
3561
3562
3563 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3577 u8 attr_set, u32 mib_val, u8 peer)
3578{
3579 struct uic_command uic_cmd = {0};
3580 static const char *const action[] = {
3581 "dme-set",
3582 "dme-peer-set"
3583 };
3584 const char *set = action[!!peer];
3585 int ret;
3586 int retries = UFS_UIC_COMMAND_RETRIES;
3587
3588 uic_cmd.command = peer ?
3589 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3590 uic_cmd.argument1 = attr_sel;
3591 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3592 uic_cmd.argument3 = mib_val;
3593
3594 do {
3595
3596 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3597 if (ret)
3598 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3599 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3600 } while (ret && peer && --retries);
3601
3602 if (ret)
3603 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3604 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3605 UFS_UIC_COMMAND_RETRIES - retries);
3606
3607 return ret;
3608}
3609EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3621 u32 *mib_val, u8 peer)
3622{
3623 struct uic_command uic_cmd = {0};
3624 static const char *const action[] = {
3625 "dme-get",
3626 "dme-peer-get"
3627 };
3628 const char *get = action[!!peer];
3629 int ret;
3630 int retries = UFS_UIC_COMMAND_RETRIES;
3631 struct ufs_pa_layer_attr orig_pwr_info;
3632 struct ufs_pa_layer_attr temp_pwr_info;
3633 bool pwr_mode_change = false;
3634
3635 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3636 orig_pwr_info = hba->pwr_info;
3637 temp_pwr_info = orig_pwr_info;
3638
3639 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3640 orig_pwr_info.pwr_rx == FAST_MODE) {
3641 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3642 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3643 pwr_mode_change = true;
3644 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3645 orig_pwr_info.pwr_rx == SLOW_MODE) {
3646 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3647 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3648 pwr_mode_change = true;
3649 }
3650 if (pwr_mode_change) {
3651 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3652 if (ret)
3653 goto out;
3654 }
3655 }
3656
3657 uic_cmd.command = peer ?
3658 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3659 uic_cmd.argument1 = attr_sel;
3660
3661 do {
3662
3663 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3664 if (ret)
3665 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3666 get, UIC_GET_ATTR_ID(attr_sel), ret);
3667 } while (ret && peer && --retries);
3668
3669 if (ret)
3670 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3671 get, UIC_GET_ATTR_ID(attr_sel),
3672 UFS_UIC_COMMAND_RETRIES - retries);
3673
3674 if (mib_val && !ret)
3675 *mib_val = uic_cmd.argument3;
3676
3677 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3678 && pwr_mode_change)
3679 ufshcd_change_power_mode(hba, &orig_pwr_info);
3680out:
3681 return ret;
3682}
3683EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3702{
3703 struct completion uic_async_done;
3704 unsigned long flags;
3705 u8 status;
3706 int ret;
3707 bool reenable_intr = false;
3708
3709 mutex_lock(&hba->uic_cmd_mutex);
3710 init_completion(&uic_async_done);
3711 ufshcd_add_delay_before_dme_cmd(hba);
3712
3713 spin_lock_irqsave(hba->host->host_lock, flags);
3714 hba->uic_async_done = &uic_async_done;
3715 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3716 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3717
3718
3719
3720
3721 wmb();
3722 reenable_intr = true;
3723 }
3724 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3725 spin_unlock_irqrestore(hba->host->host_lock, flags);
3726 if (ret) {
3727 dev_err(hba->dev,
3728 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3729 cmd->command, cmd->argument3, ret);
3730 goto out;
3731 }
3732
3733 if (!wait_for_completion_timeout(hba->uic_async_done,
3734 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3735 dev_err(hba->dev,
3736 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3737 cmd->command, cmd->argument3);
3738 ret = -ETIMEDOUT;
3739 goto out;
3740 }
3741
3742 status = ufshcd_get_upmcrs(hba);
3743 if (status != PWR_LOCAL) {
3744 dev_err(hba->dev,
3745 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3746 cmd->command, status);
3747 ret = (status != PWR_OK) ? status : -1;
3748 }
3749out:
3750 if (ret) {
3751 ufshcd_print_host_state(hba);
3752 ufshcd_print_pwr_info(hba);
3753 ufshcd_print_host_regs(hba);
3754 }
3755
3756 spin_lock_irqsave(hba->host->host_lock, flags);
3757 hba->active_uic_cmd = NULL;
3758 hba->uic_async_done = NULL;
3759 if (reenable_intr)
3760 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3761 spin_unlock_irqrestore(hba->host->host_lock, flags);
3762 mutex_unlock(&hba->uic_cmd_mutex);
3763
3764 return ret;
3765}
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3776{
3777 struct uic_command uic_cmd = {0};
3778 int ret;
3779
3780 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3781 ret = ufshcd_dme_set(hba,
3782 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3783 if (ret) {
3784 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3785 __func__, ret);
3786 goto out;
3787 }
3788 }
3789
3790 uic_cmd.command = UIC_CMD_DME_SET;
3791 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3792 uic_cmd.argument3 = mode;
3793 ufshcd_hold(hba, false);
3794 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3795 ufshcd_release(hba);
3796
3797out:
3798 return ret;
3799}
3800
3801static int ufshcd_link_recovery(struct ufs_hba *hba)
3802{
3803 int ret;
3804 unsigned long flags;
3805
3806 spin_lock_irqsave(hba->host->host_lock, flags);
3807 hba->ufshcd_state = UFSHCD_STATE_RESET;
3808 ufshcd_set_eh_in_progress(hba);
3809 spin_unlock_irqrestore(hba->host->host_lock, flags);
3810
3811 ret = ufshcd_host_reset_and_restore(hba);
3812
3813 spin_lock_irqsave(hba->host->host_lock, flags);
3814 if (ret)
3815 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3816 ufshcd_clear_eh_in_progress(hba);
3817 spin_unlock_irqrestore(hba->host->host_lock, flags);
3818
3819 if (ret)
3820 dev_err(hba->dev, "%s: link recovery failed, err %d",
3821 __func__, ret);
3822
3823 return ret;
3824}
3825
3826static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3827{
3828 int ret;
3829 struct uic_command uic_cmd = {0};
3830 ktime_t start = ktime_get();
3831
3832 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3833
3834 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3835 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3836 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3837 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3838
3839 if (ret) {
3840 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3841 __func__, ret);
3842
3843
3844
3845
3846
3847 if (ufshcd_link_recovery(hba))
3848 ret = -ENOLINK;
3849 } else
3850 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3851 POST_CHANGE);
3852
3853 return ret;
3854}
3855
3856static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3857{
3858 int ret = 0, retries;
3859
3860 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3861 ret = __ufshcd_uic_hibern8_enter(hba);
3862 if (!ret || ret == -ENOLINK)
3863 goto out;
3864 }
3865out:
3866 return ret;
3867}
3868
3869static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3870{
3871 struct uic_command uic_cmd = {0};
3872 int ret;
3873 ktime_t start = ktime_get();
3874
3875 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3876
3877 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3878 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3879 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3880 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3881
3882 if (ret) {
3883 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3884 __func__, ret);
3885 ret = ufshcd_link_recovery(hba);
3886 } else {
3887 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3888 POST_CHANGE);
3889 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3890 hba->ufs_stats.hibern8_exit_cnt++;
3891 }
3892
3893 return ret;
3894}
3895
3896static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3897{
3898 unsigned long flags;
3899
3900 if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
3901 return;
3902
3903 spin_lock_irqsave(hba->host->host_lock, flags);
3904 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3905 spin_unlock_irqrestore(hba->host->host_lock, flags);
3906}
3907
3908
3909
3910
3911
3912
3913static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3914{
3915 hba->pwr_info.gear_rx = UFS_PWM_G1;
3916 hba->pwr_info.gear_tx = UFS_PWM_G1;
3917 hba->pwr_info.lane_rx = 1;
3918 hba->pwr_info.lane_tx = 1;
3919 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3920 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3921 hba->pwr_info.hs_rate = 0;
3922}
3923
3924
3925
3926
3927
3928static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3929{
3930 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3931
3932 if (hba->max_pwr_info.is_valid)
3933 return 0;
3934
3935 pwr_info->pwr_tx = FAST_MODE;
3936 pwr_info->pwr_rx = FAST_MODE;
3937 pwr_info->hs_rate = PA_HS_MODE_B;
3938
3939
3940 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3941 &pwr_info->lane_rx);
3942 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3943 &pwr_info->lane_tx);
3944
3945 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3946 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3947 __func__,
3948 pwr_info->lane_rx,
3949 pwr_info->lane_tx);
3950 return -EINVAL;
3951 }
3952
3953
3954
3955
3956
3957
3958 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3959 if (!pwr_info->gear_rx) {
3960 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3961 &pwr_info->gear_rx);
3962 if (!pwr_info->gear_rx) {
3963 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3964 __func__, pwr_info->gear_rx);
3965 return -EINVAL;
3966 }
3967 pwr_info->pwr_rx = SLOW_MODE;
3968 }
3969
3970 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3971 &pwr_info->gear_tx);
3972 if (!pwr_info->gear_tx) {
3973 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3974 &pwr_info->gear_tx);
3975 if (!pwr_info->gear_tx) {
3976 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3977 __func__, pwr_info->gear_tx);
3978 return -EINVAL;
3979 }
3980 pwr_info->pwr_tx = SLOW_MODE;
3981 }
3982
3983 hba->max_pwr_info.is_valid = true;
3984 return 0;
3985}
3986
3987static int ufshcd_change_power_mode(struct ufs_hba *hba,
3988 struct ufs_pa_layer_attr *pwr_mode)
3989{
3990 int ret;
3991
3992
3993 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3994 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3995 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3996 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3997 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3998 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3999 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4000 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4001 return 0;
4002 }
4003
4004
4005
4006
4007
4008
4009
4010 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4011 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4012 pwr_mode->lane_rx);
4013 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4014 pwr_mode->pwr_rx == FAST_MODE)
4015 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4016 else
4017 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4018
4019 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4020 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4021 pwr_mode->lane_tx);
4022 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4023 pwr_mode->pwr_tx == FAST_MODE)
4024 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4025 else
4026 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4027
4028 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4029 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4030 pwr_mode->pwr_rx == FAST_MODE ||
4031 pwr_mode->pwr_tx == FAST_MODE)
4032 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4033 pwr_mode->hs_rate);
4034
4035 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4036 | pwr_mode->pwr_tx);
4037
4038 if (ret) {
4039 dev_err(hba->dev,
4040 "%s: power mode change failed %d\n", __func__, ret);
4041 } else {
4042 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4043 pwr_mode);
4044
4045 memcpy(&hba->pwr_info, pwr_mode,
4046 sizeof(struct ufs_pa_layer_attr));
4047 }
4048
4049 return ret;
4050}
4051
4052
4053
4054
4055
4056
4057int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4058 struct ufs_pa_layer_attr *desired_pwr_mode)
4059{
4060 struct ufs_pa_layer_attr final_params = { 0 };
4061 int ret;
4062
4063 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4064 desired_pwr_mode, &final_params);
4065
4066 if (ret)
4067 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4068
4069 ret = ufshcd_change_power_mode(hba, &final_params);
4070 if (!ret)
4071 ufshcd_print_pwr_info(hba);
4072
4073 return ret;
4074}
4075EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4076
4077
4078
4079
4080
4081
4082
4083static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4084{
4085 int i;
4086 int err;
4087 bool flag_res = 1;
4088
4089 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4090 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4091 if (err) {
4092 dev_err(hba->dev,
4093 "%s setting fDeviceInit flag failed with error %d\n",
4094 __func__, err);
4095 goto out;
4096 }
4097
4098
4099 for (i = 0; i < 1000 && !err && flag_res; i++)
4100 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4101 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4102
4103 if (err)
4104 dev_err(hba->dev,
4105 "%s reading fDeviceInit flag failed with error %d\n",
4106 __func__, err);
4107 else if (flag_res)
4108 dev_err(hba->dev,
4109 "%s fDeviceInit was not cleared by the device\n",
4110 __func__);
4111
4112out:
4113 return err;
4114}
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4129{
4130 int err = 0;
4131 u32 reg;
4132
4133
4134 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4135
4136
4137 if (ufshcd_is_intr_aggr_allowed(hba))
4138 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4139 else
4140 ufshcd_disable_intr_aggr(hba);
4141
4142
4143 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4144 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4145 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4146 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4147 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4148 REG_UTP_TASK_REQ_LIST_BASE_L);
4149 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4150 REG_UTP_TASK_REQ_LIST_BASE_H);
4151
4152
4153
4154
4155
4156 wmb();
4157
4158
4159
4160
4161 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4162 if (!(ufshcd_get_lists_status(reg))) {
4163 ufshcd_enable_run_stop_reg(hba);
4164 } else {
4165 dev_err(hba->dev,
4166 "Host controller not ready to process requests");
4167 err = -EIO;
4168 goto out;
4169 }
4170
4171out:
4172 return err;
4173}
4174
4175
4176
4177
4178
4179
4180static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4181{
4182 int err;
4183
4184 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4185 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4186 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4187 10, 1, can_sleep);
4188 if (err)
4189 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4190}
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4203{
4204 int retry;
4205
4206
4207
4208
4209
4210
4211
4212 if (!ufshcd_is_hba_active(hba))
4213
4214 ufshcd_hba_stop(hba, true);
4215
4216
4217 ufshcd_set_link_off(hba);
4218
4219 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4220
4221
4222 ufshcd_hba_start(hba);
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234 msleep(1);
4235
4236
4237 retry = 10;
4238 while (ufshcd_is_hba_active(hba)) {
4239 if (retry) {
4240 retry--;
4241 } else {
4242 dev_err(hba->dev,
4243 "Controller enable failed\n");
4244 return -EIO;
4245 }
4246 msleep(5);
4247 }
4248
4249
4250 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4251
4252 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4253
4254 return 0;
4255}
4256
4257static int ufshcd_hba_enable(struct ufs_hba *hba)
4258{
4259 int ret;
4260
4261 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4262 ufshcd_set_link_off(hba);
4263 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4264
4265
4266 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4267 ret = ufshcd_dme_reset(hba);
4268 if (!ret) {
4269 ret = ufshcd_dme_enable(hba);
4270 if (!ret)
4271 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4272 if (ret)
4273 dev_err(hba->dev,
4274 "Host controller enable failed with non-hce\n");
4275 }
4276 } else {
4277 ret = ufshcd_hba_execute_hce(hba);
4278 }
4279
4280 return ret;
4281}
4282static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4283{
4284 int tx_lanes, i, err = 0;
4285
4286 if (!peer)
4287 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4288 &tx_lanes);
4289 else
4290 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4291 &tx_lanes);
4292 for (i = 0; i < tx_lanes; i++) {
4293 if (!peer)
4294 err = ufshcd_dme_set(hba,
4295 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4296 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4297 0);
4298 else
4299 err = ufshcd_dme_peer_set(hba,
4300 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4301 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4302 0);
4303 if (err) {
4304 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4305 __func__, peer, i, err);
4306 break;
4307 }
4308 }
4309
4310 return err;
4311}
4312
4313static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4314{
4315 return ufshcd_disable_tx_lcc(hba, true);
4316}
4317
4318
4319
4320
4321
4322
4323
4324static int ufshcd_link_startup(struct ufs_hba *hba)
4325{
4326 int ret;
4327 int retries = DME_LINKSTARTUP_RETRIES;
4328 bool link_startup_again = false;
4329
4330
4331
4332
4333
4334 if (!ufshcd_is_ufs_dev_active(hba))
4335 link_startup_again = true;
4336
4337link_startup:
4338 do {
4339 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4340
4341 ret = ufshcd_dme_link_startup(hba);
4342
4343
4344 if (!ret && !ufshcd_is_device_present(hba)) {
4345 dev_err(hba->dev, "%s: Device not present\n", __func__);
4346 ret = -ENXIO;
4347 goto out;
4348 }
4349
4350
4351
4352
4353
4354
4355 if (ret && ufshcd_hba_enable(hba))
4356 goto out;
4357 } while (ret && retries--);
4358
4359 if (ret)
4360
4361 goto out;
4362
4363 if (link_startup_again) {
4364 link_startup_again = false;
4365 retries = DME_LINKSTARTUP_RETRIES;
4366 goto link_startup;
4367 }
4368
4369
4370 ufshcd_init_pwr_info(hba);
4371 ufshcd_print_pwr_info(hba);
4372
4373 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4374 ret = ufshcd_disable_device_tx_lcc(hba);
4375 if (ret)
4376 goto out;
4377 }
4378
4379
4380 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4381 if (ret)
4382 goto out;
4383
4384 ret = ufshcd_make_hba_operational(hba);
4385out:
4386 if (ret) {
4387 dev_err(hba->dev, "link startup failed %d\n", ret);
4388 ufshcd_print_host_state(hba);
4389 ufshcd_print_pwr_info(hba);
4390 ufshcd_print_host_regs(hba);
4391 }
4392 return ret;
4393}
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4406{
4407 int err = 0;
4408 int retries;
4409
4410 ufshcd_hold(hba, false);
4411 mutex_lock(&hba->dev_cmd.lock);
4412 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4413 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4414 NOP_OUT_TIMEOUT);
4415
4416 if (!err || err == -ETIMEDOUT)
4417 break;
4418
4419 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4420 }
4421 mutex_unlock(&hba->dev_cmd.lock);
4422 ufshcd_release(hba);
4423
4424 if (err)
4425 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4426 return err;
4427}
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4439{
4440 int ret = 0;
4441 u8 lun_qdepth;
4442 struct ufs_hba *hba;
4443
4444 hba = shost_priv(sdev->host);
4445
4446 lun_qdepth = hba->nutrs;
4447 ret = ufshcd_read_unit_desc_param(hba,
4448 ufshcd_scsi_to_upiu_lun(sdev->lun),
4449 UNIT_DESC_PARAM_LU_Q_DEPTH,
4450 &lun_qdepth,
4451 sizeof(lun_qdepth));
4452
4453
4454 if (ret == -EOPNOTSUPP)
4455 lun_qdepth = 1;
4456 else if (!lun_qdepth)
4457
4458 lun_qdepth = hba->nutrs;
4459 else
4460 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4461
4462 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4463 __func__, lun_qdepth);
4464 scsi_change_queue_depth(sdev, lun_qdepth);
4465}
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4479 u8 lun,
4480 u8 *b_lu_write_protect)
4481{
4482 int ret;
4483
4484 if (!b_lu_write_protect)
4485 ret = -EINVAL;
4486
4487
4488
4489
4490
4491 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4492 ret = -ENOTSUPP;
4493 else
4494 ret = ufshcd_read_unit_desc_param(hba,
4495 lun,
4496 UNIT_DESC_PARAM_LU_WR_PROTECT,
4497 b_lu_write_protect,
4498 sizeof(*b_lu_write_protect));
4499 return ret;
4500}
4501
4502
4503
4504
4505
4506
4507
4508
4509static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4510 struct scsi_device *sdev)
4511{
4512 if (hba->dev_info.f_power_on_wp_en &&
4513 !hba->dev_info.is_lu_power_on_wp) {
4514 u8 b_lu_write_protect;
4515
4516 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4517 &b_lu_write_protect) &&
4518 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4519 hba->dev_info.is_lu_power_on_wp = true;
4520 }
4521}
4522
4523
4524
4525
4526
4527
4528
4529static int ufshcd_slave_alloc(struct scsi_device *sdev)
4530{
4531 struct ufs_hba *hba;
4532
4533 hba = shost_priv(sdev->host);
4534
4535
4536 sdev->use_10_for_ms = 1;
4537
4538
4539 sdev->allow_restart = 1;
4540
4541
4542 sdev->no_report_opcodes = 1;
4543
4544
4545 sdev->no_write_same = 1;
4546
4547 ufshcd_set_queue_depth(sdev);
4548
4549 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4550
4551 return 0;
4552}
4553
4554
4555
4556
4557
4558
4559
4560
4561static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4562{
4563 struct ufs_hba *hba = shost_priv(sdev->host);
4564
4565 if (depth > hba->nutrs)
4566 depth = hba->nutrs;
4567 return scsi_change_queue_depth(sdev, depth);
4568}
4569
4570
4571
4572
4573
4574static int ufshcd_slave_configure(struct scsi_device *sdev)
4575{
4576 struct request_queue *q = sdev->request_queue;
4577
4578 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4579 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4580
4581 return 0;
4582}
4583
4584
4585
4586
4587
4588static void ufshcd_slave_destroy(struct scsi_device *sdev)
4589{
4590 struct ufs_hba *hba;
4591
4592 hba = shost_priv(sdev->host);
4593
4594 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4595 unsigned long flags;
4596
4597 spin_lock_irqsave(hba->host->host_lock, flags);
4598 hba->sdev_ufs_device = NULL;
4599 spin_unlock_irqrestore(hba->host->host_lock, flags);
4600 }
4601}
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4612{
4613 struct utp_task_req_desc *task_req_descp;
4614 struct utp_upiu_task_rsp *task_rsp_upiup;
4615 unsigned long flags;
4616 int ocs_value;
4617 int task_result;
4618
4619 spin_lock_irqsave(hba->host->host_lock, flags);
4620
4621
4622 __clear_bit(index, &hba->outstanding_tasks);
4623
4624 task_req_descp = hba->utmrdl_base_addr;
4625 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4626
4627 if (ocs_value == OCS_SUCCESS) {
4628 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4629 task_req_descp[index].task_rsp_upiu;
4630 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4631 task_result = task_result & MASK_TM_SERVICE_RESP;
4632 if (resp)
4633 *resp = (u8)task_result;
4634 } else {
4635 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4636 __func__, ocs_value);
4637 }
4638 spin_unlock_irqrestore(hba->host->host_lock, flags);
4639
4640 return ocs_value;
4641}
4642
4643
4644
4645
4646
4647
4648
4649
4650static inline int
4651ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4652{
4653 int result = 0;
4654
4655 switch (scsi_status) {
4656 case SAM_STAT_CHECK_CONDITION:
4657 ufshcd_copy_sense_data(lrbp);
4658 case SAM_STAT_GOOD:
4659 result |= DID_OK << 16 |
4660 COMMAND_COMPLETE << 8 |
4661 scsi_status;
4662 break;
4663 case SAM_STAT_TASK_SET_FULL:
4664 case SAM_STAT_BUSY:
4665 case SAM_STAT_TASK_ABORTED:
4666 ufshcd_copy_sense_data(lrbp);
4667 result |= scsi_status;
4668 break;
4669 default:
4670 result |= DID_ERROR << 16;
4671 break;
4672 }
4673
4674 return result;
4675}
4676
4677
4678
4679
4680
4681
4682
4683
4684static inline int
4685ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4686{
4687 int result = 0;
4688 int scsi_status;
4689 int ocs;
4690
4691
4692 ocs = ufshcd_get_tr_ocs(lrbp);
4693
4694 switch (ocs) {
4695 case OCS_SUCCESS:
4696 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4697 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4698 switch (result) {
4699 case UPIU_TRANSACTION_RESPONSE:
4700
4701
4702
4703
4704 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4705
4706
4707
4708
4709
4710 scsi_status = result & MASK_SCSI_STATUS;
4711 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725 if (!hba->pm_op_in_progress &&
4726 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4727 schedule_work(&hba->eeh_work);
4728 break;
4729 case UPIU_TRANSACTION_REJECT_UPIU:
4730
4731 result = DID_ERROR << 16;
4732 dev_err(hba->dev,
4733 "Reject UPIU not fully implemented\n");
4734 break;
4735 default:
4736 result = DID_ERROR << 16;
4737 dev_err(hba->dev,
4738 "Unexpected request response code = %x\n",
4739 result);
4740 break;
4741 }
4742 break;
4743 case OCS_ABORTED:
4744 result |= DID_ABORT << 16;
4745 break;
4746 case OCS_INVALID_COMMAND_STATUS:
4747 result |= DID_REQUEUE << 16;
4748 break;
4749 case OCS_INVALID_CMD_TABLE_ATTR:
4750 case OCS_INVALID_PRDT_ATTR:
4751 case OCS_MISMATCH_DATA_BUF_SIZE:
4752 case OCS_MISMATCH_RESP_UPIU_SIZE:
4753 case OCS_PEER_COMM_FAILURE:
4754 case OCS_FATAL_ERROR:
4755 default:
4756 result |= DID_ERROR << 16;
4757 dev_err(hba->dev,
4758 "OCS error from controller = %x for tag %d\n",
4759 ocs, lrbp->task_tag);
4760 ufshcd_print_host_regs(hba);
4761 ufshcd_print_host_state(hba);
4762 break;
4763 }
4764
4765 if (host_byte(result) != DID_OK)
4766 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4767 return result;
4768}
4769
4770
4771
4772
4773
4774
4775static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4776{
4777 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4778 hba->active_uic_cmd->argument2 |=
4779 ufshcd_get_uic_cmd_result(hba);
4780 hba->active_uic_cmd->argument3 =
4781 ufshcd_get_dme_attr_val(hba);
4782 complete(&hba->active_uic_cmd->done);
4783 }
4784
4785 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4786 complete(hba->uic_async_done);
4787}
4788
4789
4790
4791
4792
4793
4794static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4795 unsigned long completed_reqs)
4796{
4797 struct ufshcd_lrb *lrbp;
4798 struct scsi_cmnd *cmd;
4799 int result;
4800 int index;
4801
4802 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4803 lrbp = &hba->lrb[index];
4804 cmd = lrbp->cmd;
4805 if (cmd) {
4806 ufshcd_add_command_trace(hba, index, "complete");
4807 result = ufshcd_transfer_rsp_status(hba, lrbp);
4808 scsi_dma_unmap(cmd);
4809 cmd->result = result;
4810
4811 lrbp->cmd = NULL;
4812 clear_bit_unlock(index, &hba->lrb_in_use);
4813
4814 cmd->scsi_done(cmd);
4815 __ufshcd_release(hba);
4816 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4817 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4818 if (hba->dev_cmd.complete) {
4819 ufshcd_add_command_trace(hba, index,
4820 "dev_complete");
4821 complete(hba->dev_cmd.complete);
4822 }
4823 }
4824 if (ufshcd_is_clkscaling_supported(hba))
4825 hba->clk_scaling.active_reqs--;
4826
4827 lrbp->compl_time_stamp = ktime_get();
4828 }
4829
4830
4831 hba->outstanding_reqs ^= completed_reqs;
4832
4833 ufshcd_clk_scaling_update_busy(hba);
4834
4835
4836 wake_up(&hba->dev_cmd.tag_wq);
4837}
4838
4839
4840
4841
4842
4843static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4844{
4845 unsigned long completed_reqs;
4846 u32 tr_doorbell;
4847
4848
4849
4850
4851
4852
4853
4854
4855 if (ufshcd_is_intr_aggr_allowed(hba) &&
4856 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4857 ufshcd_reset_intr_aggr(hba);
4858
4859 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4860 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4861
4862 __ufshcd_transfer_req_compl(hba, completed_reqs);
4863}
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4876{
4877 int err = 0;
4878 u32 val;
4879
4880 if (!(hba->ee_ctrl_mask & mask))
4881 goto out;
4882
4883 val = hba->ee_ctrl_mask & ~mask;
4884 val &= MASK_EE_STATUS;
4885 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4886 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4887 if (!err)
4888 hba->ee_ctrl_mask &= ~mask;
4889out:
4890 return err;
4891}
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4904{
4905 int err = 0;
4906 u32 val;
4907
4908 if (hba->ee_ctrl_mask & mask)
4909 goto out;
4910
4911 val = hba->ee_ctrl_mask | mask;
4912 val &= MASK_EE_STATUS;
4913 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4914 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4915 if (!err)
4916 hba->ee_ctrl_mask |= mask;
4917out:
4918 return err;
4919}
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4933{
4934 int err = 0;
4935
4936 if (hba->auto_bkops_enabled)
4937 goto out;
4938
4939 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4940 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4941 if (err) {
4942 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4943 __func__, err);
4944 goto out;
4945 }
4946
4947 hba->auto_bkops_enabled = true;
4948 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4949
4950
4951 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4952 if (err)
4953 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4954 __func__, err);
4955out:
4956 return err;
4957}
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4972{
4973 int err = 0;
4974
4975 if (!hba->auto_bkops_enabled)
4976 goto out;
4977
4978
4979
4980
4981
4982 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4983 if (err) {
4984 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4985 __func__, err);
4986 goto out;
4987 }
4988
4989 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4990 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4991 if (err) {
4992 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4993 __func__, err);
4994 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4995 goto out;
4996 }
4997
4998 hba->auto_bkops_enabled = false;
4999 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5000out:
5001 return err;
5002}
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5014{
5015 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5016 hba->auto_bkops_enabled = false;
5017 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5018 ufshcd_enable_auto_bkops(hba);
5019 } else {
5020 hba->auto_bkops_enabled = true;
5021 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5022 ufshcd_disable_auto_bkops(hba);
5023 }
5024}
5025
5026static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5027{
5028 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5029 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5030}
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5049 enum bkops_status status)
5050{
5051 int err;
5052 u32 curr_status = 0;
5053
5054 err = ufshcd_get_bkops_status(hba, &curr_status);
5055 if (err) {
5056 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5057 __func__, err);
5058 goto out;
5059 } else if (curr_status > BKOPS_STATUS_MAX) {
5060 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5061 __func__, curr_status);
5062 err = -EINVAL;
5063 goto out;
5064 }
5065
5066 if (curr_status >= status)
5067 err = ufshcd_enable_auto_bkops(hba);
5068 else
5069 err = ufshcd_disable_auto_bkops(hba);
5070out:
5071 return err;
5072}
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5085{
5086 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5087}
5088
5089static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5090{
5091 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5092 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5093}
5094
5095static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5096{
5097 int err;
5098 u32 curr_status = 0;
5099
5100 if (hba->is_urgent_bkops_lvl_checked)
5101 goto enable_auto_bkops;
5102
5103 err = ufshcd_get_bkops_status(hba, &curr_status);
5104 if (err) {
5105 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5106 __func__, err);
5107 goto out;
5108 }
5109
5110
5111
5112
5113
5114
5115
5116 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5117 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5118 __func__, curr_status);
5119
5120 hba->urgent_bkops_lvl = curr_status;
5121 hba->is_urgent_bkops_lvl_checked = true;
5122 }
5123
5124enable_auto_bkops:
5125 err = ufshcd_enable_auto_bkops(hba);
5126out:
5127 if (err < 0)
5128 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5129 __func__, err);
5130}
5131
5132
5133
5134
5135
5136
5137
5138
5139static void ufshcd_exception_event_handler(struct work_struct *work)
5140{
5141 struct ufs_hba *hba;
5142 int err;
5143 u32 status = 0;
5144 hba = container_of(work, struct ufs_hba, eeh_work);
5145
5146 pm_runtime_get_sync(hba->dev);
5147 scsi_block_requests(hba->host);
5148 err = ufshcd_get_ee_status(hba, &status);
5149 if (err) {
5150 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5151 __func__, err);
5152 goto out;
5153 }
5154
5155 status &= hba->ee_ctrl_mask;
5156
5157 if (status & MASK_EE_URGENT_BKOPS)
5158 ufshcd_bkops_exception_event_handler(hba);
5159
5160out:
5161 scsi_unblock_requests(hba->host);
5162 pm_runtime_put_sync(hba->dev);
5163 return;
5164}
5165
5166
5167static void ufshcd_complete_requests(struct ufs_hba *hba)
5168{
5169 ufshcd_transfer_req_compl(hba);
5170 ufshcd_tmc_handler(hba);
5171}
5172
5173
5174
5175
5176
5177
5178
5179
5180static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5181{
5182 unsigned long flags;
5183 bool err_handling = true;
5184
5185 spin_lock_irqsave(hba->host->host_lock, flags);
5186
5187
5188
5189
5190 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5191 goto out;
5192
5193 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5194 ((hba->saved_err & UIC_ERROR) &&
5195 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5196 goto out;
5197
5198 if ((hba->saved_err & UIC_ERROR) &&
5199 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5200 int err;
5201
5202
5203
5204 spin_unlock_irqrestore(hba->host->host_lock, flags);
5205 msleep(50);
5206 spin_lock_irqsave(hba->host->host_lock, flags);
5207
5208
5209
5210
5211
5212 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5213 ((hba->saved_err & UIC_ERROR) &&
5214 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5215 goto out;
5216
5217
5218
5219
5220
5221
5222
5223
5224 spin_unlock_irqrestore(hba->host->host_lock, flags);
5225 err = ufshcd_verify_dev_init(hba);
5226 spin_lock_irqsave(hba->host->host_lock, flags);
5227
5228 if (err)
5229 goto out;
5230
5231
5232 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5233 hba->saved_err &= ~UIC_ERROR;
5234
5235 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5236 if (!hba->saved_uic_err) {
5237 err_handling = false;
5238 goto out;
5239 }
5240 }
5241out:
5242 spin_unlock_irqrestore(hba->host->host_lock, flags);
5243 return err_handling;
5244}
5245
5246
5247
5248
5249
5250static void ufshcd_err_handler(struct work_struct *work)
5251{
5252 struct ufs_hba *hba;
5253 unsigned long flags;
5254 u32 err_xfer = 0;
5255 u32 err_tm = 0;
5256 int err = 0;
5257 int tag;
5258 bool needs_reset = false;
5259
5260 hba = container_of(work, struct ufs_hba, eh_work);
5261
5262 pm_runtime_get_sync(hba->dev);
5263 ufshcd_hold(hba, false);
5264
5265 spin_lock_irqsave(hba->host->host_lock, flags);
5266 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5267 goto out;
5268
5269 hba->ufshcd_state = UFSHCD_STATE_RESET;
5270 ufshcd_set_eh_in_progress(hba);
5271
5272
5273 ufshcd_complete_requests(hba);
5274
5275 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5276 bool ret;
5277
5278 spin_unlock_irqrestore(hba->host->host_lock, flags);
5279
5280 ret = ufshcd_quirk_dl_nac_errors(hba);
5281 spin_lock_irqsave(hba->host->host_lock, flags);
5282 if (!ret)
5283 goto skip_err_handling;
5284 }
5285 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5286 ((hba->saved_err & UIC_ERROR) &&
5287 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5288 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5289 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5290 needs_reset = true;
5291
5292
5293
5294
5295
5296
5297 if (needs_reset)
5298 goto skip_pending_xfer_clear;
5299
5300
5301 spin_unlock_irqrestore(hba->host->host_lock, flags);
5302
5303 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5304 if (ufshcd_clear_cmd(hba, tag)) {
5305 err_xfer = true;
5306 goto lock_skip_pending_xfer_clear;
5307 }
5308 }
5309
5310
5311 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5312 if (ufshcd_clear_tm_cmd(hba, tag)) {
5313 err_tm = true;
5314 goto lock_skip_pending_xfer_clear;
5315 }
5316 }
5317
5318lock_skip_pending_xfer_clear:
5319 spin_lock_irqsave(hba->host->host_lock, flags);
5320
5321
5322 ufshcd_complete_requests(hba);
5323
5324 if (err_xfer || err_tm)
5325 needs_reset = true;
5326
5327skip_pending_xfer_clear:
5328
5329 if (needs_reset) {
5330 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5331
5332
5333
5334
5335
5336
5337
5338
5339 if (hba->outstanding_reqs == max_doorbells)
5340 __ufshcd_transfer_req_compl(hba,
5341 (1UL << (hba->nutrs - 1)));
5342
5343 spin_unlock_irqrestore(hba->host->host_lock, flags);
5344 err = ufshcd_reset_and_restore(hba);
5345 spin_lock_irqsave(hba->host->host_lock, flags);
5346 if (err) {
5347 dev_err(hba->dev, "%s: reset and restore failed\n",
5348 __func__);
5349 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5350 }
5351
5352
5353
5354
5355 scsi_report_bus_reset(hba->host, 0);
5356 hba->saved_err = 0;
5357 hba->saved_uic_err = 0;
5358 }
5359
5360skip_err_handling:
5361 if (!needs_reset) {
5362 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5363 if (hba->saved_err || hba->saved_uic_err)
5364 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5365 __func__, hba->saved_err, hba->saved_uic_err);
5366 }
5367
5368 ufshcd_clear_eh_in_progress(hba);
5369
5370out:
5371 spin_unlock_irqrestore(hba->host->host_lock, flags);
5372 ufshcd_scsi_unblock_requests(hba);
5373 ufshcd_release(hba);
5374 pm_runtime_put_sync(hba->dev);
5375}
5376
5377static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5378 u32 reg)
5379{
5380 reg_hist->reg[reg_hist->pos] = reg;
5381 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5382 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5383}
5384
5385
5386
5387
5388
5389static void ufshcd_update_uic_error(struct ufs_hba *hba)
5390{
5391 u32 reg;
5392
5393
5394 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5395
5396 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5397 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5398
5399
5400
5401
5402 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5403 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5404 }
5405
5406
5407 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5408 if (reg)
5409 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5410
5411 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5412 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5413 else if (hba->dev_quirks &
5414 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5415 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5416 hba->uic_error |=
5417 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5418 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5419 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5420 }
5421
5422
5423 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5424 if (reg) {
5425 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5426 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5427 }
5428
5429 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5430 if (reg) {
5431 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5432 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5433 }
5434
5435 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5436 if (reg) {
5437 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5438 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5439 }
5440
5441 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5442 __func__, hba->uic_error);
5443}
5444
5445
5446
5447
5448
5449static void ufshcd_check_errors(struct ufs_hba *hba)
5450{
5451 bool queue_eh_work = false;
5452
5453 if (hba->errors & INT_FATAL_ERRORS)
5454 queue_eh_work = true;
5455
5456 if (hba->errors & UIC_ERROR) {
5457 hba->uic_error = 0;
5458 ufshcd_update_uic_error(hba);
5459 if (hba->uic_error)
5460 queue_eh_work = true;
5461 }
5462
5463 if (queue_eh_work) {
5464
5465
5466
5467
5468 hba->saved_err |= hba->errors;
5469 hba->saved_uic_err |= hba->uic_error;
5470
5471
5472 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5473
5474 ufshcd_scsi_block_requests(hba);
5475
5476 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5477
5478
5479 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5480 bool pr_prdt = !!(hba->saved_err &
5481 SYSTEM_BUS_FATAL_ERROR);
5482
5483 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5484 __func__, hba->saved_err,
5485 hba->saved_uic_err);
5486
5487 ufshcd_print_host_regs(hba);
5488 ufshcd_print_pwr_info(hba);
5489 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5490 ufshcd_print_trs(hba, hba->outstanding_reqs,
5491 pr_prdt);
5492 }
5493 schedule_work(&hba->eh_work);
5494 }
5495 }
5496
5497
5498
5499
5500
5501
5502}
5503
5504
5505
5506
5507
5508static void ufshcd_tmc_handler(struct ufs_hba *hba)
5509{
5510 u32 tm_doorbell;
5511
5512 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5513 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5514 wake_up(&hba->tm_wq);
5515}
5516
5517
5518
5519
5520
5521
5522static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5523{
5524 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5525 if (hba->errors)
5526 ufshcd_check_errors(hba);
5527
5528 if (intr_status & UFSHCD_UIC_MASK)
5529 ufshcd_uic_cmd_compl(hba, intr_status);
5530
5531 if (intr_status & UTP_TASK_REQ_COMPL)
5532 ufshcd_tmc_handler(hba);
5533
5534 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5535 ufshcd_transfer_req_compl(hba);
5536}
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546static irqreturn_t ufshcd_intr(int irq, void *__hba)
5547{
5548 u32 intr_status, enabled_intr_status;
5549 irqreturn_t retval = IRQ_NONE;
5550 struct ufs_hba *hba = __hba;
5551 int retries = hba->nutrs;
5552
5553 spin_lock(hba->host->host_lock);
5554 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5555
5556
5557
5558
5559
5560
5561
5562 do {
5563 enabled_intr_status =
5564 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5565 if (intr_status)
5566 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5567 if (enabled_intr_status) {
5568 ufshcd_sl_intr(hba, enabled_intr_status);
5569 retval = IRQ_HANDLED;
5570 }
5571
5572 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5573 } while (intr_status && --retries);
5574
5575 spin_unlock(hba->host->host_lock);
5576 return retval;
5577}
5578
5579static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5580{
5581 int err = 0;
5582 u32 mask = 1 << tag;
5583 unsigned long flags;
5584
5585 if (!test_bit(tag, &hba->outstanding_tasks))
5586 goto out;
5587
5588 spin_lock_irqsave(hba->host->host_lock, flags);
5589 ufshcd_utmrl_clear(hba, tag);
5590 spin_unlock_irqrestore(hba->host->host_lock, flags);
5591
5592
5593 err = ufshcd_wait_for_register(hba,
5594 REG_UTP_TASK_REQ_DOOR_BELL,
5595 mask, 0, 1000, 1000, true);
5596out:
5597 return err;
5598}
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5611 u8 tm_function, u8 *tm_response)
5612{
5613 struct utp_task_req_desc *task_req_descp;
5614 struct utp_upiu_task_req *task_req_upiup;
5615 struct Scsi_Host *host;
5616 unsigned long flags;
5617 int free_slot;
5618 int err;
5619 int task_tag;
5620
5621 host = hba->host;
5622
5623
5624
5625
5626
5627
5628 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5629 ufshcd_hold(hba, false);
5630
5631 spin_lock_irqsave(host->host_lock, flags);
5632 task_req_descp = hba->utmrdl_base_addr;
5633 task_req_descp += free_slot;
5634
5635
5636 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5637 task_req_descp->header.dword_2 =
5638 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5639
5640
5641 task_req_upiup =
5642 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5643 task_tag = hba->nutrs + free_slot;
5644 task_req_upiup->header.dword_0 =
5645 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5646 lun_id, task_tag);
5647 task_req_upiup->header.dword_1 =
5648 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5649
5650
5651
5652
5653 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5654 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5655
5656 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5657
5658
5659 __set_bit(free_slot, &hba->outstanding_tasks);
5660
5661
5662 wmb();
5663
5664 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5665
5666 wmb();
5667
5668 spin_unlock_irqrestore(host->host_lock, flags);
5669
5670 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5671
5672
5673 err = wait_event_timeout(hba->tm_wq,
5674 test_bit(free_slot, &hba->tm_condition),
5675 msecs_to_jiffies(TM_CMD_TIMEOUT));
5676 if (!err) {
5677 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5678 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5679 __func__, tm_function);
5680 if (ufshcd_clear_tm_cmd(hba, free_slot))
5681 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5682 __func__, free_slot);
5683 err = -ETIMEDOUT;
5684 } else {
5685 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5686 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5687 }
5688
5689 clear_bit(free_slot, &hba->tm_condition);
5690 ufshcd_put_tm_slot(hba, free_slot);
5691 wake_up(&hba->tm_tag_wq);
5692
5693 ufshcd_release(hba);
5694 return err;
5695}
5696
5697
5698
5699
5700
5701
5702
5703
5704static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5705{
5706 struct Scsi_Host *host;
5707 struct ufs_hba *hba;
5708 unsigned int tag;
5709 u32 pos;
5710 int err;
5711 u8 resp = 0xF;
5712 struct ufshcd_lrb *lrbp;
5713 unsigned long flags;
5714
5715 host = cmd->device->host;
5716 hba = shost_priv(host);
5717 tag = cmd->request->tag;
5718
5719 lrbp = &hba->lrb[tag];
5720 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5721 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5722 if (!err)
5723 err = resp;
5724 goto out;
5725 }
5726
5727
5728 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5729 if (hba->lrb[pos].lun == lrbp->lun) {
5730 err = ufshcd_clear_cmd(hba, pos);
5731 if (err)
5732 break;
5733 }
5734 }
5735 spin_lock_irqsave(host->host_lock, flags);
5736 ufshcd_transfer_req_compl(hba);
5737 spin_unlock_irqrestore(host->host_lock, flags);
5738
5739out:
5740 hba->req_abort_count = 0;
5741 if (!err) {
5742 err = SUCCESS;
5743 } else {
5744 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5745 err = FAILED;
5746 }
5747 return err;
5748}
5749
5750static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5751{
5752 struct ufshcd_lrb *lrbp;
5753 int tag;
5754
5755 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5756 lrbp = &hba->lrb[tag];
5757 lrbp->req_abort_skip = true;
5758 }
5759}
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773static int ufshcd_abort(struct scsi_cmnd *cmd)
5774{
5775 struct Scsi_Host *host;
5776 struct ufs_hba *hba;
5777 unsigned long flags;
5778 unsigned int tag;
5779 int err = 0;
5780 int poll_cnt;
5781 u8 resp = 0xF;
5782 struct ufshcd_lrb *lrbp;
5783 u32 reg;
5784
5785 host = cmd->device->host;
5786 hba = shost_priv(host);
5787 tag = cmd->request->tag;
5788 lrbp = &hba->lrb[tag];
5789 if (!ufshcd_valid_tag(hba, tag)) {
5790 dev_err(hba->dev,
5791 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5792 __func__, tag, cmd, cmd->request);
5793 BUG();
5794 }
5795
5796
5797
5798
5799
5800
5801
5802
5803 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5804 return ufshcd_eh_host_reset_handler(cmd);
5805
5806 ufshcd_hold(hba, false);
5807 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5808
5809 if (!(test_bit(tag, &hba->outstanding_reqs))) {
5810 dev_err(hba->dev,
5811 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5812 __func__, tag, hba->outstanding_reqs, reg);
5813 goto out;
5814 }
5815
5816 if (!(reg & (1 << tag))) {
5817 dev_err(hba->dev,
5818 "%s: cmd was completed, but without a notifying intr, tag = %d",
5819 __func__, tag);
5820 }
5821
5822
5823 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5824
5825
5826
5827
5828
5829
5830
5831
5832 scsi_print_command(hba->lrb[tag].cmd);
5833 if (!hba->req_abort_count) {
5834 ufshcd_print_host_regs(hba);
5835 ufshcd_print_host_state(hba);
5836 ufshcd_print_pwr_info(hba);
5837 ufshcd_print_trs(hba, 1 << tag, true);
5838 } else {
5839 ufshcd_print_trs(hba, 1 << tag, false);
5840 }
5841 hba->req_abort_count++;
5842
5843
5844 if (lrbp->req_abort_skip) {
5845 err = -EIO;
5846 goto out;
5847 }
5848
5849 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5850 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5851 UFS_QUERY_TASK, &resp);
5852 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5853
5854 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5855 __func__, tag);
5856 break;
5857 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5858
5859
5860
5861
5862 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5863 __func__, tag);
5864 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5865 if (reg & (1 << tag)) {
5866
5867 usleep_range(100, 200);
5868 continue;
5869 }
5870
5871 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5872 __func__, tag);
5873 goto out;
5874 } else {
5875 dev_err(hba->dev,
5876 "%s: no response from device. tag = %d, err %d\n",
5877 __func__, tag, err);
5878 if (!err)
5879 err = resp;
5880 goto out;
5881 }
5882 }
5883
5884 if (!poll_cnt) {
5885 err = -EBUSY;
5886 goto out;
5887 }
5888
5889 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5890 UFS_ABORT_TASK, &resp);
5891 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5892 if (!err) {
5893 err = resp;
5894 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5895 __func__, tag, err);
5896 }
5897 goto out;
5898 }
5899
5900 err = ufshcd_clear_cmd(hba, tag);
5901 if (err) {
5902 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5903 __func__, tag, err);
5904 goto out;
5905 }
5906
5907 scsi_dma_unmap(cmd);
5908
5909 spin_lock_irqsave(host->host_lock, flags);
5910 ufshcd_outstanding_req_clear(hba, tag);
5911 hba->lrb[tag].cmd = NULL;
5912 spin_unlock_irqrestore(host->host_lock, flags);
5913
5914 clear_bit_unlock(tag, &hba->lrb_in_use);
5915 wake_up(&hba->dev_cmd.tag_wq);
5916
5917out:
5918 if (!err) {
5919 err = SUCCESS;
5920 } else {
5921 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5922 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
5923 err = FAILED;
5924 }
5925
5926
5927
5928
5929
5930 ufshcd_release(hba);
5931 return err;
5932}
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5945{
5946 int err;
5947 unsigned long flags;
5948
5949
5950 spin_lock_irqsave(hba->host->host_lock, flags);
5951 ufshcd_hba_stop(hba, false);
5952 spin_unlock_irqrestore(hba->host->host_lock, flags);
5953
5954
5955 ufshcd_scale_clks(hba, true);
5956
5957 err = ufshcd_hba_enable(hba);
5958 if (err)
5959 goto out;
5960
5961
5962 err = ufshcd_probe_hba(hba);
5963
5964 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
5965 err = -EIO;
5966out:
5967 if (err)
5968 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5969
5970 return err;
5971}
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5983{
5984 int err = 0;
5985 unsigned long flags;
5986 int retries = MAX_HOST_RESET_RETRIES;
5987
5988 do {
5989 err = ufshcd_host_reset_and_restore(hba);
5990 } while (err && --retries);
5991
5992
5993
5994
5995
5996 spin_lock_irqsave(hba->host->host_lock, flags);
5997 ufshcd_transfer_req_compl(hba);
5998 ufshcd_tmc_handler(hba);
5999 spin_unlock_irqrestore(hba->host->host_lock, flags);
6000
6001 return err;
6002}
6003
6004
6005
6006
6007
6008
6009
6010static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6011{
6012 int err;
6013 unsigned long flags;
6014 struct ufs_hba *hba;
6015
6016 hba = shost_priv(cmd->device->host);
6017
6018 ufshcd_hold(hba, false);
6019
6020
6021
6022
6023
6024
6025 do {
6026 spin_lock_irqsave(hba->host->host_lock, flags);
6027 if (!(work_pending(&hba->eh_work) ||
6028 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6029 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6030 break;
6031 spin_unlock_irqrestore(hba->host->host_lock, flags);
6032 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6033 flush_work(&hba->eh_work);
6034 } while (1);
6035
6036 hba->ufshcd_state = UFSHCD_STATE_RESET;
6037 ufshcd_set_eh_in_progress(hba);
6038 spin_unlock_irqrestore(hba->host->host_lock, flags);
6039
6040 err = ufshcd_reset_and_restore(hba);
6041
6042 spin_lock_irqsave(hba->host->host_lock, flags);
6043 if (!err) {
6044 err = SUCCESS;
6045 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6046 } else {
6047 err = FAILED;
6048 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6049 }
6050 ufshcd_clear_eh_in_progress(hba);
6051 spin_unlock_irqrestore(hba->host->host_lock, flags);
6052
6053 ufshcd_release(hba);
6054 return err;
6055}
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6066{
6067 int i;
6068 int curr_uA;
6069 u16 data;
6070 u16 unit;
6071
6072 for (i = start_scan; i >= 0; i--) {
6073 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6074 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6075 ATTR_ICC_LVL_UNIT_OFFSET;
6076 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6077 switch (unit) {
6078 case UFSHCD_NANO_AMP:
6079 curr_uA = curr_uA / 1000;
6080 break;
6081 case UFSHCD_MILI_AMP:
6082 curr_uA = curr_uA * 1000;
6083 break;
6084 case UFSHCD_AMP:
6085 curr_uA = curr_uA * 1000 * 1000;
6086 break;
6087 case UFSHCD_MICRO_AMP:
6088 default:
6089 break;
6090 }
6091 if (sup_curr_uA >= curr_uA)
6092 break;
6093 }
6094 if (i < 0) {
6095 i = 0;
6096 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6097 }
6098
6099 return (u32)i;
6100}
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6112 u8 *desc_buf, int len)
6113{
6114 u32 icc_level = 0;
6115
6116 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6117 !hba->vreg_info.vccq2) {
6118 dev_err(hba->dev,
6119 "%s: Regulator capability was not set, actvIccLevel=%d",
6120 __func__, icc_level);
6121 goto out;
6122 }
6123
6124 if (hba->vreg_info.vcc)
6125 icc_level = ufshcd_get_max_icc_level(
6126 hba->vreg_info.vcc->max_uA,
6127 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6128 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6129
6130 if (hba->vreg_info.vccq)
6131 icc_level = ufshcd_get_max_icc_level(
6132 hba->vreg_info.vccq->max_uA,
6133 icc_level,
6134 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6135
6136 if (hba->vreg_info.vccq2)
6137 icc_level = ufshcd_get_max_icc_level(
6138 hba->vreg_info.vccq2->max_uA,
6139 icc_level,
6140 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6141out:
6142 return icc_level;
6143}
6144
6145static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6146{
6147 int ret;
6148 int buff_len = hba->desc_size.pwr_desc;
6149 u8 *desc_buf;
6150
6151 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6152 if (!desc_buf)
6153 return;
6154
6155 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6156 if (ret) {
6157 dev_err(hba->dev,
6158 "%s: Failed reading power descriptor.len = %d ret = %d",
6159 __func__, buff_len, ret);
6160 goto out;
6161 }
6162
6163 hba->init_prefetch_data.icc_level =
6164 ufshcd_find_max_sup_active_icc_level(hba,
6165 desc_buf, buff_len);
6166 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6167 __func__, hba->init_prefetch_data.icc_level);
6168
6169 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6170 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6171 &hba->init_prefetch_data.icc_level);
6172
6173 if (ret)
6174 dev_err(hba->dev,
6175 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6176 __func__, hba->init_prefetch_data.icc_level , ret);
6177
6178out:
6179 kfree(desc_buf);
6180}
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6209{
6210 int ret = 0;
6211 struct scsi_device *sdev_rpmb;
6212 struct scsi_device *sdev_boot;
6213
6214 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6215 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6216 if (IS_ERR(hba->sdev_ufs_device)) {
6217 ret = PTR_ERR(hba->sdev_ufs_device);
6218 hba->sdev_ufs_device = NULL;
6219 goto out;
6220 }
6221 scsi_device_put(hba->sdev_ufs_device);
6222
6223 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6224 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6225 if (IS_ERR(sdev_rpmb)) {
6226 ret = PTR_ERR(sdev_rpmb);
6227 goto remove_sdev_ufs_device;
6228 }
6229 scsi_device_put(sdev_rpmb);
6230
6231 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6232 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6233 if (IS_ERR(sdev_boot))
6234 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6235 else
6236 scsi_device_put(sdev_boot);
6237 goto out;
6238
6239remove_sdev_ufs_device:
6240 scsi_remove_device(hba->sdev_ufs_device);
6241out:
6242 return ret;
6243}
6244
6245static int ufs_get_device_desc(struct ufs_hba *hba,
6246 struct ufs_dev_desc *dev_desc)
6247{
6248 int err;
6249 size_t buff_len;
6250 u8 model_index;
6251 u8 *desc_buf;
6252
6253 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6254 QUERY_DESC_MAX_SIZE + 1);
6255 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6256 if (!desc_buf) {
6257 err = -ENOMEM;
6258 goto out;
6259 }
6260
6261 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6262 if (err) {
6263 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6264 __func__, err);
6265 goto out;
6266 }
6267
6268
6269
6270
6271
6272 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6273 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6274
6275 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6276
6277
6278 memset(desc_buf, 0, buff_len);
6279
6280 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6281 QUERY_DESC_MAX_SIZE, true);
6282 if (err) {
6283 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6284 __func__, err);
6285 goto out;
6286 }
6287
6288 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6289 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6290 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6291 MAX_MODEL_LEN));
6292
6293
6294 dev_desc->model[MAX_MODEL_LEN] = '\0';
6295
6296out:
6297 kfree(desc_buf);
6298 return err;
6299}
6300
6301static void ufs_fixup_device_setup(struct ufs_hba *hba,
6302 struct ufs_dev_desc *dev_desc)
6303{
6304 struct ufs_dev_fix *f;
6305
6306 for (f = ufs_fixups; f->quirk; f++) {
6307 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6308 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6309 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6310 !strcmp(f->card.model, UFS_ANY_MODEL)))
6311 hba->dev_quirks |= f->quirk;
6312 }
6313}
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6327{
6328 int ret = 0;
6329 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6330
6331 ret = ufshcd_dme_peer_get(hba,
6332 UIC_ARG_MIB_SEL(
6333 RX_MIN_ACTIVATETIME_CAPABILITY,
6334 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6335 &peer_rx_min_activatetime);
6336 if (ret)
6337 goto out;
6338
6339
6340 tuned_pa_tactivate =
6341 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6342 / PA_TACTIVATE_TIME_UNIT_US);
6343 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6344 tuned_pa_tactivate);
6345
6346out:
6347 return ret;
6348}
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6362{
6363 int ret = 0;
6364 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6365 u32 max_hibern8_time, tuned_pa_hibern8time;
6366
6367 ret = ufshcd_dme_get(hba,
6368 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6369 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6370 &local_tx_hibern8_time_cap);
6371 if (ret)
6372 goto out;
6373
6374 ret = ufshcd_dme_peer_get(hba,
6375 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6376 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6377 &peer_rx_hibern8_time_cap);
6378 if (ret)
6379 goto out;
6380
6381 max_hibern8_time = max(local_tx_hibern8_time_cap,
6382 peer_rx_hibern8_time_cap);
6383
6384 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6385 / PA_HIBERN8_TIME_UNIT_US);
6386 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6387 tuned_pa_hibern8time);
6388out:
6389 return ret;
6390}
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6404{
6405 int ret = 0;
6406 u32 granularity, peer_granularity;
6407 u32 pa_tactivate, peer_pa_tactivate;
6408 u32 pa_tactivate_us, peer_pa_tactivate_us;
6409 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6410
6411 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6412 &granularity);
6413 if (ret)
6414 goto out;
6415
6416 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6417 &peer_granularity);
6418 if (ret)
6419 goto out;
6420
6421 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6422 (granularity > PA_GRANULARITY_MAX_VAL)) {
6423 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6424 __func__, granularity);
6425 return -EINVAL;
6426 }
6427
6428 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6429 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6430 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6431 __func__, peer_granularity);
6432 return -EINVAL;
6433 }
6434
6435 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6436 if (ret)
6437 goto out;
6438
6439 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6440 &peer_pa_tactivate);
6441 if (ret)
6442 goto out;
6443
6444 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6445 peer_pa_tactivate_us = peer_pa_tactivate *
6446 gran_to_us_table[peer_granularity - 1];
6447
6448 if (pa_tactivate_us > peer_pa_tactivate_us) {
6449 u32 new_peer_pa_tactivate;
6450
6451 new_peer_pa_tactivate = pa_tactivate_us /
6452 gran_to_us_table[peer_granularity - 1];
6453 new_peer_pa_tactivate++;
6454 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6455 new_peer_pa_tactivate);
6456 }
6457
6458out:
6459 return ret;
6460}
6461
6462static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6463{
6464 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6465 ufshcd_tune_pa_tactivate(hba);
6466 ufshcd_tune_pa_hibern8time(hba);
6467 }
6468
6469 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6470
6471 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6472
6473 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6474 ufshcd_quirk_tune_host_pa_tactivate(hba);
6475
6476 ufshcd_vops_apply_dev_quirks(hba);
6477}
6478
6479static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6480{
6481 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6482
6483 hba->ufs_stats.hibern8_exit_cnt = 0;
6484 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6485
6486 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6487 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6488 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6489 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6490 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6491
6492 hba->req_abort_count = 0;
6493}
6494
6495static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6496{
6497 int err;
6498
6499 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6500 &hba->desc_size.dev_desc);
6501 if (err)
6502 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6503
6504 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6505 &hba->desc_size.pwr_desc);
6506 if (err)
6507 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6508
6509 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6510 &hba->desc_size.interc_desc);
6511 if (err)
6512 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6513
6514 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6515 &hba->desc_size.conf_desc);
6516 if (err)
6517 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6518
6519 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6520 &hba->desc_size.unit_desc);
6521 if (err)
6522 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6523
6524 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6525 &hba->desc_size.geom_desc);
6526 if (err)
6527 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6528 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6529 &hba->desc_size.hlth_desc);
6530 if (err)
6531 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6532}
6533
6534static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6535{
6536 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6537 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6538 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6539 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6540 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6541 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6542 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6543}
6544
6545
6546
6547
6548
6549
6550
6551static int ufshcd_probe_hba(struct ufs_hba *hba)
6552{
6553 struct ufs_dev_desc card = {0};
6554 int ret;
6555 ktime_t start = ktime_get();
6556
6557 ret = ufshcd_link_startup(hba);
6558 if (ret)
6559 goto out;
6560
6561
6562 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6563 hba->is_urgent_bkops_lvl_checked = false;
6564
6565
6566 ufshcd_clear_dbg_ufs_stats(hba);
6567
6568
6569 ufshcd_set_link_active(hba);
6570
6571
6572 ufshcd_auto_hibern8_enable(hba);
6573
6574 ret = ufshcd_verify_dev_init(hba);
6575 if (ret)
6576 goto out;
6577
6578 ret = ufshcd_complete_dev_init(hba);
6579 if (ret)
6580 goto out;
6581
6582
6583 ufshcd_init_desc_sizes(hba);
6584
6585 ret = ufs_get_device_desc(hba, &card);
6586 if (ret) {
6587 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6588 __func__, ret);
6589 goto out;
6590 }
6591
6592 ufs_fixup_device_setup(hba, &card);
6593 ufshcd_tune_unipro_params(hba);
6594
6595 ret = ufshcd_set_vccq_rail_unused(hba,
6596 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6597 if (ret)
6598 goto out;
6599
6600
6601 ufshcd_set_ufs_dev_active(hba);
6602 ufshcd_force_reset_auto_bkops(hba);
6603 hba->wlun_dev_clr_ua = true;
6604
6605 if (ufshcd_get_max_pwr_mode(hba)) {
6606 dev_err(hba->dev,
6607 "%s: Failed getting max supported power mode\n",
6608 __func__);
6609 } else {
6610 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6611 if (ret) {
6612 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6613 __func__, ret);
6614 goto out;
6615 }
6616 }
6617
6618
6619 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6620
6621
6622
6623
6624
6625 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6626 bool flag;
6627
6628
6629 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6630 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6631 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6632 hba->dev_info.f_power_on_wp_en = flag;
6633
6634 if (!hba->is_init_prefetch)
6635 ufshcd_init_icc_levels(hba);
6636
6637
6638 if (ufshcd_scsi_add_wlus(hba))
6639 goto out;
6640
6641
6642 if (ufshcd_is_clkscaling_supported(hba)) {
6643 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6644 &hba->pwr_info,
6645 sizeof(struct ufs_pa_layer_attr));
6646 hba->clk_scaling.saved_pwr_info.is_valid = true;
6647 if (!hba->devfreq) {
6648 ret = ufshcd_devfreq_init(hba);
6649 if (ret)
6650 goto out;
6651 }
6652 hba->clk_scaling.is_allowed = true;
6653 }
6654
6655 scsi_scan_host(hba->host);
6656 pm_runtime_put_sync(hba->dev);
6657 }
6658
6659 if (!hba->is_init_prefetch)
6660 hba->is_init_prefetch = true;
6661
6662out:
6663
6664
6665
6666
6667 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6668 pm_runtime_put_sync(hba->dev);
6669 ufshcd_hba_exit(hba);
6670 }
6671
6672 trace_ufshcd_init(dev_name(hba->dev), ret,
6673 ktime_to_us(ktime_sub(ktime_get(), start)),
6674 hba->curr_dev_pwr_mode, hba->uic_link_state);
6675 return ret;
6676}
6677
6678
6679
6680
6681
6682
6683static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6684{
6685 struct ufs_hba *hba = (struct ufs_hba *)data;
6686
6687 ufshcd_probe_hba(hba);
6688}
6689
6690static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6691{
6692 unsigned long flags;
6693 struct Scsi_Host *host;
6694 struct ufs_hba *hba;
6695 int index;
6696 bool found = false;
6697
6698 if (!scmd || !scmd->device || !scmd->device->host)
6699 return BLK_EH_DONE;
6700
6701 host = scmd->device->host;
6702 hba = shost_priv(host);
6703 if (!hba)
6704 return BLK_EH_DONE;
6705
6706 spin_lock_irqsave(host->host_lock, flags);
6707
6708 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6709 if (hba->lrb[index].cmd == scmd) {
6710 found = true;
6711 break;
6712 }
6713 }
6714
6715 spin_unlock_irqrestore(host->host_lock, flags);
6716
6717
6718
6719
6720
6721
6722 return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
6723}
6724
6725static const struct attribute_group *ufshcd_driver_groups[] = {
6726 &ufs_sysfs_unit_descriptor_group,
6727 &ufs_sysfs_lun_attributes_group,
6728 NULL,
6729};
6730
6731static struct scsi_host_template ufshcd_driver_template = {
6732 .module = THIS_MODULE,
6733 .name = UFSHCD,
6734 .proc_name = UFSHCD,
6735 .queuecommand = ufshcd_queuecommand,
6736 .slave_alloc = ufshcd_slave_alloc,
6737 .slave_configure = ufshcd_slave_configure,
6738 .slave_destroy = ufshcd_slave_destroy,
6739 .change_queue_depth = ufshcd_change_queue_depth,
6740 .eh_abort_handler = ufshcd_abort,
6741 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6742 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6743 .eh_timed_out = ufshcd_eh_timed_out,
6744 .this_id = -1,
6745 .sg_tablesize = SG_ALL,
6746 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
6747 .can_queue = UFSHCD_CAN_QUEUE,
6748 .max_host_blocked = 1,
6749 .track_queue_depth = 1,
6750 .sdev_groups = ufshcd_driver_groups,
6751};
6752
6753static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6754 int ua)
6755{
6756 int ret;
6757
6758 if (!vreg)
6759 return 0;
6760
6761 ret = regulator_set_load(vreg->reg, ua);
6762 if (ret < 0) {
6763 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6764 __func__, vreg->name, ua, ret);
6765 }
6766
6767 return ret;
6768}
6769
6770static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6771 struct ufs_vreg *vreg)
6772{
6773 if (!vreg)
6774 return 0;
6775 else if (vreg->unused)
6776 return 0;
6777 else
6778 return ufshcd_config_vreg_load(hba->dev, vreg,
6779 UFS_VREG_LPM_LOAD_UA);
6780}
6781
6782static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6783 struct ufs_vreg *vreg)
6784{
6785 if (!vreg)
6786 return 0;
6787 else if (vreg->unused)
6788 return 0;
6789 else
6790 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6791}
6792
6793static int ufshcd_config_vreg(struct device *dev,
6794 struct ufs_vreg *vreg, bool on)
6795{
6796 int ret = 0;
6797 struct regulator *reg;
6798 const char *name;
6799 int min_uV, uA_load;
6800
6801 BUG_ON(!vreg);
6802
6803 reg = vreg->reg;
6804 name = vreg->name;
6805
6806 if (regulator_count_voltages(reg) > 0) {
6807 min_uV = on ? vreg->min_uV : 0;
6808 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6809 if (ret) {
6810 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6811 __func__, name, ret);
6812 goto out;
6813 }
6814
6815 uA_load = on ? vreg->max_uA : 0;
6816 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6817 if (ret)
6818 goto out;
6819 }
6820out:
6821 return ret;
6822}
6823
6824static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6825{
6826 int ret = 0;
6827
6828 if (!vreg)
6829 goto out;
6830 else if (vreg->enabled || vreg->unused)
6831 goto out;
6832
6833 ret = ufshcd_config_vreg(dev, vreg, true);
6834 if (!ret)
6835 ret = regulator_enable(vreg->reg);
6836
6837 if (!ret)
6838 vreg->enabled = true;
6839 else
6840 dev_err(dev, "%s: %s enable failed, err=%d\n",
6841 __func__, vreg->name, ret);
6842out:
6843 return ret;
6844}
6845
6846static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6847{
6848 int ret = 0;
6849
6850 if (!vreg)
6851 goto out;
6852 else if (!vreg->enabled || vreg->unused)
6853 goto out;
6854
6855 ret = regulator_disable(vreg->reg);
6856
6857 if (!ret) {
6858
6859 ufshcd_config_vreg(dev, vreg, false);
6860 vreg->enabled = false;
6861 } else {
6862 dev_err(dev, "%s: %s disable failed, err=%d\n",
6863 __func__, vreg->name, ret);
6864 }
6865out:
6866 return ret;
6867}
6868
6869static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6870{
6871 int ret = 0;
6872 struct device *dev = hba->dev;
6873 struct ufs_vreg_info *info = &hba->vreg_info;
6874
6875 if (!info)
6876 goto out;
6877
6878 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6879 if (ret)
6880 goto out;
6881
6882 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6883 if (ret)
6884 goto out;
6885
6886 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6887 if (ret)
6888 goto out;
6889
6890out:
6891 if (ret) {
6892 ufshcd_toggle_vreg(dev, info->vccq2, false);
6893 ufshcd_toggle_vreg(dev, info->vccq, false);
6894 ufshcd_toggle_vreg(dev, info->vcc, false);
6895 }
6896 return ret;
6897}
6898
6899static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6900{
6901 struct ufs_vreg_info *info = &hba->vreg_info;
6902
6903 if (info)
6904 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6905
6906 return 0;
6907}
6908
6909static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6910{
6911 int ret = 0;
6912
6913 if (!vreg)
6914 goto out;
6915
6916 vreg->reg = devm_regulator_get(dev, vreg->name);
6917 if (IS_ERR(vreg->reg)) {
6918 ret = PTR_ERR(vreg->reg);
6919 dev_err(dev, "%s: %s get failed, err=%d\n",
6920 __func__, vreg->name, ret);
6921 }
6922out:
6923 return ret;
6924}
6925
6926static int ufshcd_init_vreg(struct ufs_hba *hba)
6927{
6928 int ret = 0;
6929 struct device *dev = hba->dev;
6930 struct ufs_vreg_info *info = &hba->vreg_info;
6931
6932 if (!info)
6933 goto out;
6934
6935 ret = ufshcd_get_vreg(dev, info->vcc);
6936 if (ret)
6937 goto out;
6938
6939 ret = ufshcd_get_vreg(dev, info->vccq);
6940 if (ret)
6941 goto out;
6942
6943 ret = ufshcd_get_vreg(dev, info->vccq2);
6944out:
6945 return ret;
6946}
6947
6948static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6949{
6950 struct ufs_vreg_info *info = &hba->vreg_info;
6951
6952 if (info)
6953 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6954
6955 return 0;
6956}
6957
6958static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6959{
6960 int ret = 0;
6961 struct ufs_vreg_info *info = &hba->vreg_info;
6962
6963 if (!info)
6964 goto out;
6965 else if (!info->vccq)
6966 goto out;
6967
6968 if (unused) {
6969
6970 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
6971
6972
6973
6974
6975 if (!ret)
6976 info->vccq->unused = true;
6977 } else {
6978
6979
6980
6981
6982 info->vccq->unused = false;
6983 }
6984out:
6985 return ret;
6986}
6987
6988static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6989 bool skip_ref_clk)
6990{
6991 int ret = 0;
6992 struct ufs_clk_info *clki;
6993 struct list_head *head = &hba->clk_list_head;
6994 unsigned long flags;
6995 ktime_t start = ktime_get();
6996 bool clk_state_changed = false;
6997
6998 if (list_empty(head))
6999 goto out;
7000
7001
7002
7003
7004
7005
7006 if (!on) {
7007 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7008 if (ret)
7009 return ret;
7010 }
7011
7012 list_for_each_entry(clki, head, list) {
7013 if (!IS_ERR_OR_NULL(clki->clk)) {
7014 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7015 continue;
7016
7017 clk_state_changed = on ^ clki->enabled;
7018 if (on && !clki->enabled) {
7019 ret = clk_prepare_enable(clki->clk);
7020 if (ret) {
7021 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7022 __func__, clki->name, ret);
7023 goto out;
7024 }
7025 } else if (!on && clki->enabled) {
7026 clk_disable_unprepare(clki->clk);
7027 }
7028 clki->enabled = on;
7029 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7030 clki->name, on ? "en" : "dis");
7031 }
7032 }
7033
7034
7035
7036
7037
7038
7039 if (on) {
7040 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7041 if (ret)
7042 return ret;
7043 }
7044
7045out:
7046 if (ret) {
7047 list_for_each_entry(clki, head, list) {
7048 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7049 clk_disable_unprepare(clki->clk);
7050 }
7051 } else if (!ret && on) {
7052 spin_lock_irqsave(hba->host->host_lock, flags);
7053 hba->clk_gating.state = CLKS_ON;
7054 trace_ufshcd_clk_gating(dev_name(hba->dev),
7055 hba->clk_gating.state);
7056 spin_unlock_irqrestore(hba->host->host_lock, flags);
7057 }
7058
7059 if (clk_state_changed)
7060 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7061 (on ? "on" : "off"),
7062 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7063 return ret;
7064}
7065
7066static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7067{
7068 return __ufshcd_setup_clocks(hba, on, false);
7069}
7070
7071static int ufshcd_init_clocks(struct ufs_hba *hba)
7072{
7073 int ret = 0;
7074 struct ufs_clk_info *clki;
7075 struct device *dev = hba->dev;
7076 struct list_head *head = &hba->clk_list_head;
7077
7078 if (list_empty(head))
7079 goto out;
7080
7081 list_for_each_entry(clki, head, list) {
7082 if (!clki->name)
7083 continue;
7084
7085 clki->clk = devm_clk_get(dev, clki->name);
7086 if (IS_ERR(clki->clk)) {
7087 ret = PTR_ERR(clki->clk);
7088 dev_err(dev, "%s: %s clk get failed, %d\n",
7089 __func__, clki->name, ret);
7090 goto out;
7091 }
7092
7093 if (clki->max_freq) {
7094 ret = clk_set_rate(clki->clk, clki->max_freq);
7095 if (ret) {
7096 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7097 __func__, clki->name,
7098 clki->max_freq, ret);
7099 goto out;
7100 }
7101 clki->curr_freq = clki->max_freq;
7102 }
7103 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7104 clki->name, clk_get_rate(clki->clk));
7105 }
7106out:
7107 return ret;
7108}
7109
7110static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7111{
7112 int err = 0;
7113
7114 if (!hba->vops)
7115 goto out;
7116
7117 err = ufshcd_vops_init(hba);
7118 if (err)
7119 goto out;
7120
7121 err = ufshcd_vops_setup_regulators(hba, true);
7122 if (err)
7123 goto out_exit;
7124
7125 goto out;
7126
7127out_exit:
7128 ufshcd_vops_exit(hba);
7129out:
7130 if (err)
7131 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7132 __func__, ufshcd_get_var_name(hba), err);
7133 return err;
7134}
7135
7136static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7137{
7138 if (!hba->vops)
7139 return;
7140
7141 ufshcd_vops_setup_regulators(hba, false);
7142
7143 ufshcd_vops_exit(hba);
7144}
7145
7146static int ufshcd_hba_init(struct ufs_hba *hba)
7147{
7148 int err;
7149
7150
7151
7152
7153
7154
7155
7156
7157 err = ufshcd_init_hba_vreg(hba);
7158 if (err)
7159 goto out;
7160
7161 err = ufshcd_setup_hba_vreg(hba, true);
7162 if (err)
7163 goto out;
7164
7165 err = ufshcd_init_clocks(hba);
7166 if (err)
7167 goto out_disable_hba_vreg;
7168
7169 err = ufshcd_setup_clocks(hba, true);
7170 if (err)
7171 goto out_disable_hba_vreg;
7172
7173 err = ufshcd_init_vreg(hba);
7174 if (err)
7175 goto out_disable_clks;
7176
7177 err = ufshcd_setup_vreg(hba, true);
7178 if (err)
7179 goto out_disable_clks;
7180
7181 err = ufshcd_variant_hba_init(hba);
7182 if (err)
7183 goto out_disable_vreg;
7184
7185 hba->is_powered = true;
7186 goto out;
7187
7188out_disable_vreg:
7189 ufshcd_setup_vreg(hba, false);
7190out_disable_clks:
7191 ufshcd_setup_clocks(hba, false);
7192out_disable_hba_vreg:
7193 ufshcd_setup_hba_vreg(hba, false);
7194out:
7195 return err;
7196}
7197
7198static void ufshcd_hba_exit(struct ufs_hba *hba)
7199{
7200 if (hba->is_powered) {
7201 ufshcd_variant_hba_exit(hba);
7202 ufshcd_setup_vreg(hba, false);
7203 ufshcd_suspend_clkscaling(hba);
7204 if (ufshcd_is_clkscaling_supported(hba)) {
7205 if (hba->devfreq)
7206 ufshcd_suspend_clkscaling(hba);
7207 destroy_workqueue(hba->clk_scaling.workq);
7208 ufshcd_devfreq_remove(hba);
7209 }
7210 ufshcd_setup_clocks(hba, false);
7211 ufshcd_setup_hba_vreg(hba, false);
7212 hba->is_powered = false;
7213 }
7214}
7215
7216static int
7217ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7218{
7219 unsigned char cmd[6] = {REQUEST_SENSE,
7220 0,
7221 0,
7222 0,
7223 UFSHCD_REQ_SENSE_SIZE,
7224 0};
7225 char *buffer;
7226 int ret;
7227
7228 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7229 if (!buffer) {
7230 ret = -ENOMEM;
7231 goto out;
7232 }
7233
7234 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7235 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7236 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7237 if (ret)
7238 pr_err("%s: failed with err %d\n", __func__, ret);
7239
7240 kfree(buffer);
7241out:
7242 return ret;
7243}
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7255 enum ufs_dev_pwr_mode pwr_mode)
7256{
7257 unsigned char cmd[6] = { START_STOP };
7258 struct scsi_sense_hdr sshdr;
7259 struct scsi_device *sdp;
7260 unsigned long flags;
7261 int ret;
7262
7263 spin_lock_irqsave(hba->host->host_lock, flags);
7264 sdp = hba->sdev_ufs_device;
7265 if (sdp) {
7266 ret = scsi_device_get(sdp);
7267 if (!ret && !scsi_device_online(sdp)) {
7268 ret = -ENODEV;
7269 scsi_device_put(sdp);
7270 }
7271 } else {
7272 ret = -ENODEV;
7273 }
7274 spin_unlock_irqrestore(hba->host->host_lock, flags);
7275
7276 if (ret)
7277 return ret;
7278
7279
7280
7281
7282
7283
7284
7285 hba->host->eh_noresume = 1;
7286 if (hba->wlun_dev_clr_ua) {
7287 ret = ufshcd_send_request_sense(hba, sdp);
7288 if (ret)
7289 goto out;
7290
7291 hba->wlun_dev_clr_ua = false;
7292 }
7293
7294 cmd[4] = pwr_mode << 4;
7295
7296
7297
7298
7299
7300
7301 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7302 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7303 if (ret) {
7304 sdev_printk(KERN_WARNING, sdp,
7305 "START_STOP failed for power mode: %d, result %x\n",
7306 pwr_mode, ret);
7307 if (driver_byte(ret) == DRIVER_SENSE)
7308 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7309 }
7310
7311 if (!ret)
7312 hba->curr_dev_pwr_mode = pwr_mode;
7313out:
7314 scsi_device_put(sdp);
7315 hba->host->eh_noresume = 0;
7316 return ret;
7317}
7318
7319static int ufshcd_link_state_transition(struct ufs_hba *hba,
7320 enum uic_link_state req_link_state,
7321 int check_for_bkops)
7322{
7323 int ret = 0;
7324
7325 if (req_link_state == hba->uic_link_state)
7326 return 0;
7327
7328 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7329 ret = ufshcd_uic_hibern8_enter(hba);
7330 if (!ret)
7331 ufshcd_set_link_hibern8(hba);
7332 else
7333 goto out;
7334 }
7335
7336
7337
7338
7339 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7340 (!check_for_bkops || (check_for_bkops &&
7341 !hba->auto_bkops_enabled))) {
7342
7343
7344
7345
7346
7347
7348
7349 ret = ufshcd_uic_hibern8_enter(hba);
7350 if (ret)
7351 goto out;
7352
7353
7354
7355
7356 ufshcd_hba_stop(hba, true);
7357
7358
7359
7360
7361 ufshcd_set_link_off(hba);
7362 }
7363
7364out:
7365 return ret;
7366}
7367
7368static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7369{
7370
7371
7372
7373
7374
7375
7376 if (!ufshcd_is_link_active(hba) &&
7377 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7378 usleep_range(2000, 2100);
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7393 !hba->dev_info.is_lu_power_on_wp) {
7394 ufshcd_setup_vreg(hba, false);
7395 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7396 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7397 if (!ufshcd_is_link_active(hba)) {
7398 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7399 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7400 }
7401 }
7402}
7403
7404static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7405{
7406 int ret = 0;
7407
7408 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7409 !hba->dev_info.is_lu_power_on_wp) {
7410 ret = ufshcd_setup_vreg(hba, true);
7411 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7412 if (!ret && !ufshcd_is_link_active(hba)) {
7413 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7414 if (ret)
7415 goto vcc_disable;
7416 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7417 if (ret)
7418 goto vccq_lpm;
7419 }
7420 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7421 }
7422 goto out;
7423
7424vccq_lpm:
7425 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7426vcc_disable:
7427 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7428out:
7429 return ret;
7430}
7431
7432static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7433{
7434 if (ufshcd_is_link_off(hba))
7435 ufshcd_setup_hba_vreg(hba, false);
7436}
7437
7438static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7439{
7440 if (ufshcd_is_link_off(hba))
7441 ufshcd_setup_hba_vreg(hba, true);
7442}
7443
7444
7445
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7461{
7462 int ret = 0;
7463 enum ufs_pm_level pm_lvl;
7464 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7465 enum uic_link_state req_link_state;
7466
7467 hba->pm_op_in_progress = 1;
7468 if (!ufshcd_is_shutdown_pm(pm_op)) {
7469 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7470 hba->rpm_lvl : hba->spm_lvl;
7471 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7472 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7473 } else {
7474 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7475 req_link_state = UIC_LINK_OFF_STATE;
7476 }
7477
7478
7479
7480
7481
7482 ufshcd_hold(hba, false);
7483 hba->clk_gating.is_suspended = true;
7484
7485 if (hba->clk_scaling.is_allowed) {
7486 cancel_work_sync(&hba->clk_scaling.suspend_work);
7487 cancel_work_sync(&hba->clk_scaling.resume_work);
7488 ufshcd_suspend_clkscaling(hba);
7489 }
7490
7491 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7492 req_link_state == UIC_LINK_ACTIVE_STATE) {
7493 goto disable_clks;
7494 }
7495
7496 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7497 (req_link_state == hba->uic_link_state))
7498 goto enable_gating;
7499
7500
7501 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7502 ret = -EINVAL;
7503 goto enable_gating;
7504 }
7505
7506 if (ufshcd_is_runtime_pm(pm_op)) {
7507 if (ufshcd_can_autobkops_during_suspend(hba)) {
7508
7509
7510
7511
7512
7513 ret = ufshcd_urgent_bkops(hba);
7514 if (ret)
7515 goto enable_gating;
7516 } else {
7517
7518 ufshcd_disable_auto_bkops(hba);
7519 }
7520 }
7521
7522 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7523 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7524 !ufshcd_is_runtime_pm(pm_op))) {
7525
7526 ufshcd_disable_auto_bkops(hba);
7527 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7528 if (ret)
7529 goto enable_gating;
7530 }
7531
7532 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7533 if (ret)
7534 goto set_dev_active;
7535
7536 ufshcd_vreg_set_lpm(hba);
7537
7538disable_clks:
7539
7540
7541
7542
7543
7544 ret = ufshcd_vops_suspend(hba, pm_op);
7545 if (ret)
7546 goto set_link_active;
7547
7548 if (!ufshcd_is_link_active(hba))
7549 ufshcd_setup_clocks(hba, false);
7550 else
7551
7552 __ufshcd_setup_clocks(hba, false, true);
7553
7554 hba->clk_gating.state = CLKS_OFF;
7555 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7556
7557
7558
7559
7560 ufshcd_disable_irq(hba);
7561
7562 ufshcd_hba_vreg_set_lpm(hba);
7563 goto out;
7564
7565set_link_active:
7566 if (hba->clk_scaling.is_allowed)
7567 ufshcd_resume_clkscaling(hba);
7568 ufshcd_vreg_set_hpm(hba);
7569 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7570 ufshcd_set_link_active(hba);
7571 else if (ufshcd_is_link_off(hba))
7572 ufshcd_host_reset_and_restore(hba);
7573set_dev_active:
7574 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7575 ufshcd_disable_auto_bkops(hba);
7576enable_gating:
7577 if (hba->clk_scaling.is_allowed)
7578 ufshcd_resume_clkscaling(hba);
7579 hba->clk_gating.is_suspended = false;
7580 ufshcd_release(hba);
7581out:
7582 hba->pm_op_in_progress = 0;
7583 return ret;
7584}
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7597{
7598 int ret;
7599 enum uic_link_state old_link_state;
7600
7601 hba->pm_op_in_progress = 1;
7602 old_link_state = hba->uic_link_state;
7603
7604 ufshcd_hba_vreg_set_hpm(hba);
7605
7606 ret = ufshcd_setup_clocks(hba, true);
7607 if (ret)
7608 goto out;
7609
7610
7611 ret = ufshcd_enable_irq(hba);
7612 if (ret)
7613 goto disable_irq_and_vops_clks;
7614
7615 ret = ufshcd_vreg_set_hpm(hba);
7616 if (ret)
7617 goto disable_irq_and_vops_clks;
7618
7619
7620
7621
7622
7623
7624 ret = ufshcd_vops_resume(hba, pm_op);
7625 if (ret)
7626 goto disable_vreg;
7627
7628 if (ufshcd_is_link_hibern8(hba)) {
7629 ret = ufshcd_uic_hibern8_exit(hba);
7630 if (!ret)
7631 ufshcd_set_link_active(hba);
7632 else
7633 goto vendor_suspend;
7634 } else if (ufshcd_is_link_off(hba)) {
7635 ret = ufshcd_host_reset_and_restore(hba);
7636
7637
7638
7639
7640 if (ret || !ufshcd_is_link_active(hba))
7641 goto vendor_suspend;
7642 }
7643
7644 if (!ufshcd_is_ufs_dev_active(hba)) {
7645 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7646 if (ret)
7647 goto set_old_link_state;
7648 }
7649
7650 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7651 ufshcd_enable_auto_bkops(hba);
7652 else
7653
7654
7655
7656
7657 ufshcd_urgent_bkops(hba);
7658
7659 hba->clk_gating.is_suspended = false;
7660
7661 if (hba->clk_scaling.is_allowed)
7662 ufshcd_resume_clkscaling(hba);
7663
7664
7665 ufshcd_release(hba);
7666
7667
7668 ufshcd_auto_hibern8_enable(hba);
7669
7670 goto out;
7671
7672set_old_link_state:
7673 ufshcd_link_state_transition(hba, old_link_state, 0);
7674vendor_suspend:
7675 ufshcd_vops_suspend(hba, pm_op);
7676disable_vreg:
7677 ufshcd_vreg_set_lpm(hba);
7678disable_irq_and_vops_clks:
7679 ufshcd_disable_irq(hba);
7680 if (hba->clk_scaling.is_allowed)
7681 ufshcd_suspend_clkscaling(hba);
7682 ufshcd_setup_clocks(hba, false);
7683out:
7684 hba->pm_op_in_progress = 0;
7685 return ret;
7686}
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696int ufshcd_system_suspend(struct ufs_hba *hba)
7697{
7698 int ret = 0;
7699 ktime_t start = ktime_get();
7700
7701 if (!hba || !hba->is_powered)
7702 return 0;
7703
7704 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7705 hba->curr_dev_pwr_mode) &&
7706 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7707 hba->uic_link_state))
7708 goto out;
7709
7710 if (pm_runtime_suspended(hba->dev)) {
7711
7712
7713
7714
7715
7716
7717
7718
7719 ret = ufshcd_runtime_resume(hba);
7720 if (ret)
7721 goto out;
7722 }
7723
7724 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7725out:
7726 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7727 ktime_to_us(ktime_sub(ktime_get(), start)),
7728 hba->curr_dev_pwr_mode, hba->uic_link_state);
7729 if (!ret)
7730 hba->is_sys_suspended = true;
7731 return ret;
7732}
7733EXPORT_SYMBOL(ufshcd_system_suspend);
7734
7735
7736
7737
7738
7739
7740
7741
7742int ufshcd_system_resume(struct ufs_hba *hba)
7743{
7744 int ret = 0;
7745 ktime_t start = ktime_get();
7746
7747 if (!hba)
7748 return -EINVAL;
7749
7750 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7751
7752
7753
7754
7755 goto out;
7756 else
7757 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7758out:
7759 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7760 ktime_to_us(ktime_sub(ktime_get(), start)),
7761 hba->curr_dev_pwr_mode, hba->uic_link_state);
7762 return ret;
7763}
7764EXPORT_SYMBOL(ufshcd_system_resume);
7765
7766
7767
7768
7769
7770
7771
7772
7773
7774int ufshcd_runtime_suspend(struct ufs_hba *hba)
7775{
7776 int ret = 0;
7777 ktime_t start = ktime_get();
7778
7779 if (!hba)
7780 return -EINVAL;
7781
7782 if (!hba->is_powered)
7783 goto out;
7784 else
7785 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7786out:
7787 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7788 ktime_to_us(ktime_sub(ktime_get(), start)),
7789 hba->curr_dev_pwr_mode, hba->uic_link_state);
7790 return ret;
7791}
7792EXPORT_SYMBOL(ufshcd_runtime_suspend);
7793
7794
7795
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815int ufshcd_runtime_resume(struct ufs_hba *hba)
7816{
7817 int ret = 0;
7818 ktime_t start = ktime_get();
7819
7820 if (!hba)
7821 return -EINVAL;
7822
7823 if (!hba->is_powered)
7824 goto out;
7825 else
7826 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7827out:
7828 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7829 ktime_to_us(ktime_sub(ktime_get(), start)),
7830 hba->curr_dev_pwr_mode, hba->uic_link_state);
7831 return ret;
7832}
7833EXPORT_SYMBOL(ufshcd_runtime_resume);
7834
7835int ufshcd_runtime_idle(struct ufs_hba *hba)
7836{
7837 return 0;
7838}
7839EXPORT_SYMBOL(ufshcd_runtime_idle);
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849int ufshcd_shutdown(struct ufs_hba *hba)
7850{
7851 int ret = 0;
7852
7853 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7854 goto out;
7855
7856 if (pm_runtime_suspended(hba->dev)) {
7857 ret = ufshcd_runtime_resume(hba);
7858 if (ret)
7859 goto out;
7860 }
7861
7862 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7863out:
7864 if (ret)
7865 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7866
7867 return 0;
7868}
7869EXPORT_SYMBOL(ufshcd_shutdown);
7870
7871
7872
7873
7874
7875
7876void ufshcd_remove(struct ufs_hba *hba)
7877{
7878 ufs_sysfs_remove_nodes(hba->dev);
7879 scsi_remove_host(hba->host);
7880
7881 ufshcd_disable_intr(hba, hba->intr_mask);
7882 ufshcd_hba_stop(hba, true);
7883
7884 ufshcd_exit_clk_gating(hba);
7885 if (ufshcd_is_clkscaling_supported(hba))
7886 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
7887 ufshcd_hba_exit(hba);
7888}
7889EXPORT_SYMBOL_GPL(ufshcd_remove);
7890
7891
7892
7893
7894
7895void ufshcd_dealloc_host(struct ufs_hba *hba)
7896{
7897 scsi_host_put(hba->host);
7898}
7899EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7900
7901
7902
7903
7904
7905
7906
7907
7908static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7909{
7910 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7911 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7912 return 0;
7913 }
7914 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7915}
7916
7917
7918
7919
7920
7921
7922
7923int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7924{
7925 struct Scsi_Host *host;
7926 struct ufs_hba *hba;
7927 int err = 0;
7928
7929 if (!dev) {
7930 dev_err(dev,
7931 "Invalid memory reference for dev is NULL\n");
7932 err = -ENODEV;
7933 goto out_error;
7934 }
7935
7936 host = scsi_host_alloc(&ufshcd_driver_template,
7937 sizeof(struct ufs_hba));
7938 if (!host) {
7939 dev_err(dev, "scsi_host_alloc failed\n");
7940 err = -ENOMEM;
7941 goto out_error;
7942 }
7943
7944
7945
7946
7947
7948 host->use_blk_mq = false;
7949
7950 hba = shost_priv(host);
7951 hba->host = host;
7952 hba->dev = dev;
7953 *hba_handle = hba;
7954
7955 INIT_LIST_HEAD(&hba->clk_list_head);
7956
7957out_error:
7958 return err;
7959}
7960EXPORT_SYMBOL(ufshcd_alloc_host);
7961
7962
7963
7964
7965
7966
7967
7968
7969int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7970{
7971 int err;
7972 struct Scsi_Host *host = hba->host;
7973 struct device *dev = hba->dev;
7974
7975 if (!mmio_base) {
7976 dev_err(hba->dev,
7977 "Invalid memory reference for mmio_base is NULL\n");
7978 err = -ENODEV;
7979 goto out_error;
7980 }
7981
7982 hba->mmio_base = mmio_base;
7983 hba->irq = irq;
7984
7985
7986 ufshcd_def_desc_sizes(hba);
7987
7988 err = ufshcd_hba_init(hba);
7989 if (err)
7990 goto out_error;
7991
7992
7993 ufshcd_hba_capabilities(hba);
7994
7995
7996 hba->ufs_version = ufshcd_get_ufs_version(hba);
7997
7998 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7999 (hba->ufs_version != UFSHCI_VERSION_11) &&
8000 (hba->ufs_version != UFSHCI_VERSION_20) &&
8001 (hba->ufs_version != UFSHCI_VERSION_21))
8002 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8003 hba->ufs_version);
8004
8005
8006 hba->intr_mask = ufshcd_get_intr_mask(hba);
8007
8008 err = ufshcd_set_dma_mask(hba);
8009 if (err) {
8010 dev_err(hba->dev, "set dma mask failed\n");
8011 goto out_disable;
8012 }
8013
8014
8015 err = ufshcd_memory_alloc(hba);
8016 if (err) {
8017 dev_err(hba->dev, "Memory allocation failed\n");
8018 goto out_disable;
8019 }
8020
8021
8022 ufshcd_host_memory_configure(hba);
8023
8024 host->can_queue = hba->nutrs;
8025 host->cmd_per_lun = hba->nutrs;
8026 host->max_id = UFSHCD_MAX_ID;
8027 host->max_lun = UFS_MAX_LUNS;
8028 host->max_channel = UFSHCD_MAX_CHANNEL;
8029 host->unique_id = host->host_no;
8030 host->max_cmd_len = MAX_CDB_SIZE;
8031
8032 hba->max_pwr_info.is_valid = false;
8033
8034
8035 init_waitqueue_head(&hba->tm_wq);
8036 init_waitqueue_head(&hba->tm_tag_wq);
8037
8038
8039 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8040 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8041
8042
8043 mutex_init(&hba->uic_cmd_mutex);
8044
8045
8046 mutex_init(&hba->dev_cmd.lock);
8047
8048 init_rwsem(&hba->clk_scaling_lock);
8049
8050
8051 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8052
8053 ufshcd_init_clk_gating(hba);
8054
8055
8056
8057
8058
8059
8060 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8061 REG_INTERRUPT_STATUS);
8062 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8063
8064
8065
8066
8067 mb();
8068
8069
8070 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8071 if (err) {
8072 dev_err(hba->dev, "request irq failed\n");
8073 goto exit_gating;
8074 } else {
8075 hba->is_irq_enabled = true;
8076 }
8077
8078 err = scsi_add_host(host, hba->dev);
8079 if (err) {
8080 dev_err(hba->dev, "scsi_add_host failed\n");
8081 goto exit_gating;
8082 }
8083
8084
8085 err = ufshcd_hba_enable(hba);
8086 if (err) {
8087 dev_err(hba->dev, "Host controller enable failed\n");
8088 ufshcd_print_host_regs(hba);
8089 ufshcd_print_host_state(hba);
8090 goto out_remove_scsi_host;
8091 }
8092
8093 if (ufshcd_is_clkscaling_supported(hba)) {
8094 char wq_name[sizeof("ufs_clkscaling_00")];
8095
8096 INIT_WORK(&hba->clk_scaling.suspend_work,
8097 ufshcd_clk_scaling_suspend_work);
8098 INIT_WORK(&hba->clk_scaling.resume_work,
8099 ufshcd_clk_scaling_resume_work);
8100
8101 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8102 host->host_no);
8103 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8104
8105 ufshcd_clkscaling_init_sysfs(hba);
8106 }
8107
8108
8109
8110
8111
8112
8113 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8114 UFS_SLEEP_PWR_MODE,
8115 UIC_LINK_HIBERN8_STATE);
8116 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8117 UFS_SLEEP_PWR_MODE,
8118 UIC_LINK_HIBERN8_STATE);
8119
8120
8121 if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
8122 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8123 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8124 }
8125
8126
8127 pm_runtime_get_sync(dev);
8128 atomic_set(&hba->scsi_block_reqs_cnt, 0);
8129
8130
8131
8132
8133
8134
8135 ufshcd_set_ufs_dev_active(hba);
8136
8137 async_schedule(ufshcd_async_scan, hba);
8138 ufs_sysfs_add_nodes(hba->dev);
8139
8140 return 0;
8141
8142out_remove_scsi_host:
8143 scsi_remove_host(hba->host);
8144exit_gating:
8145 ufshcd_exit_clk_gating(hba);
8146out_disable:
8147 hba->is_irq_enabled = false;
8148 ufshcd_hba_exit(hba);
8149out_error:
8150 return err;
8151}
8152EXPORT_SYMBOL_GPL(ufshcd_init);
8153
8154MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8155MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8156MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8157MODULE_LICENSE("GPL");
8158MODULE_VERSION(UFSHCD_DRIVER_VERSION);
8159