1
2
3
4
5
6
7
8
9
10
11
12#include <linux/async.h>
13#include <linux/devfreq.h>
14#include <linux/nls.h>
15#include <linux/of.h>
16#include <linux/bitfield.h>
17#include <linux/blk-pm.h>
18#include <linux/blkdev.h>
19#include <scsi/scsi_driver.h>
20#include "ufshcd.h"
21#include "ufs_quirks.h"
22#include "unipro.h"
23#include "ufs-sysfs.h"
24#include "ufs-debugfs.h"
25#include "ufs_bsg.h"
26#include "ufshcd-crypto.h"
27#include <asm/unaligned.h>
28#include "../sd.h"
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/ufs.h>
32
33#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
34 UTP_TASK_REQ_COMPL |\
35 UFSHCD_ERROR_MASK)
36
37#define UIC_CMD_TIMEOUT 500
38
39
40#define NOP_OUT_RETRIES 10
41
42#define NOP_OUT_TIMEOUT 50
43
44
45#define QUERY_REQ_RETRIES 3
46
47#define QUERY_REQ_TIMEOUT 1500
48
49
50#define TM_CMD_TIMEOUT 100
51
52
53#define UFS_UIC_COMMAND_RETRIES 3
54
55
56#define DME_LINKSTARTUP_RETRIES 3
57
58
59#define UIC_HIBERN8_ENTER_RETRIES 3
60
61
62#define MAX_HOST_RESET_RETRIES 5
63
64
65#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
66
67
68#define INT_AGGR_DEF_TO 0x02
69
70
71#define RPM_AUTOSUSPEND_DELAY_MS 2000
72
73
74#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
75
76
77#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF
78
79
80#define FDEVICEINIT_COMPL_TIMEOUT 1500
81
82#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
83
84#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
85 ({ \
86 int _ret; \
87 if (_on) \
88 _ret = ufshcd_enable_vreg(_dev, _vreg); \
89 else \
90 _ret = ufshcd_disable_vreg(_dev, _vreg); \
91 _ret; \
92 })
93
94#define ufshcd_hex_dump(prefix_str, buf, len) do { \
95 size_t __len = (len); \
96 print_hex_dump(KERN_ERR, prefix_str, \
97 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
98 16, 4, buf, __len, false); \
99} while (0)
100
101int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
102 const char *prefix)
103{
104 u32 *regs;
105 size_t pos;
106
107 if (offset % 4 != 0 || len % 4 != 0)
108 return -EINVAL;
109
110 regs = kzalloc(len, GFP_ATOMIC);
111 if (!regs)
112 return -ENOMEM;
113
114 for (pos = 0; pos < len; pos += 4)
115 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
116
117 ufshcd_hex_dump(prefix, regs, len);
118 kfree(regs);
119
120 return 0;
121}
122EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
123
124enum {
125 UFSHCD_MAX_CHANNEL = 0,
126 UFSHCD_MAX_ID = 1,
127 UFSHCD_CMD_PER_LUN = 32,
128 UFSHCD_CAN_QUEUE = 32,
129};
130
131
132enum {
133 UFSHCD_STATE_RESET,
134 UFSHCD_STATE_ERROR,
135 UFSHCD_STATE_OPERATIONAL,
136 UFSHCD_STATE_EH_SCHEDULED_FATAL,
137 UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
138};
139
140
141enum {
142 UFSHCD_EH_IN_PROGRESS = (1 << 0),
143};
144
145
146enum {
147 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0),
148 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1),
149 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2),
150 UFSHCD_UIC_NL_ERROR = (1 << 3),
151 UFSHCD_UIC_TL_ERROR = (1 << 4),
152 UFSHCD_UIC_DME_ERROR = (1 << 5),
153 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6),
154};
155
156#define ufshcd_set_eh_in_progress(h) \
157 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
158#define ufshcd_eh_in_progress(h) \
159 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
160#define ufshcd_clear_eh_in_progress(h) \
161 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
162
163struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
164 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
165 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
166 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
167 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
168 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
169 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
170
171
172
173
174 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
175};
176
177static inline enum ufs_dev_pwr_mode
178ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
179{
180 return ufs_pm_lvl_states[lvl].dev_state;
181}
182
183static inline enum uic_link_state
184ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
185{
186 return ufs_pm_lvl_states[lvl].link_state;
187}
188
189static inline enum ufs_pm_level
190ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
191 enum uic_link_state link_state)
192{
193 enum ufs_pm_level lvl;
194
195 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
196 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
197 (ufs_pm_lvl_states[lvl].link_state == link_state))
198 return lvl;
199 }
200
201
202 return UFS_PM_LVL_0;
203}
204
205static struct ufs_dev_fix ufs_fixups[] = {
206
207 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
208 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
209 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
210 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
211 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
212 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
213 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
214 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
215 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" ,
216 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
217 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
218 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
219 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
220 UFS_DEVICE_QUIRK_PA_TACTIVATE),
221 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
222 UFS_DEVICE_QUIRK_PA_TACTIVATE),
223 END_FIX
224};
225
226static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
227static void ufshcd_async_scan(void *data, async_cookie_t cookie);
228static int ufshcd_reset_and_restore(struct ufs_hba *hba);
229static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
230static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
231static void ufshcd_hba_exit(struct ufs_hba *hba);
232static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
233static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
234static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
235static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
236static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
237static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
238static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
239static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
240static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
241static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
242static irqreturn_t ufshcd_intr(int irq, void *__hba);
243static int ufshcd_change_power_mode(struct ufs_hba *hba,
244 struct ufs_pa_layer_attr *pwr_mode);
245static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
246static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
247static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
248static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
249 struct ufs_vreg *vreg);
250static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
251static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
252static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
253static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
254static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
255
256static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
257{
258 return tag >= 0 && tag < hba->nutrs;
259}
260
261static inline void ufshcd_enable_irq(struct ufs_hba *hba)
262{
263 if (!hba->is_irq_enabled) {
264 enable_irq(hba->irq);
265 hba->is_irq_enabled = true;
266 }
267}
268
269static inline void ufshcd_disable_irq(struct ufs_hba *hba)
270{
271 if (hba->is_irq_enabled) {
272 disable_irq(hba->irq);
273 hba->is_irq_enabled = false;
274 }
275}
276
277static inline void ufshcd_wb_config(struct ufs_hba *hba)
278{
279 if (!ufshcd_is_wb_allowed(hba))
280 return;
281
282 ufshcd_wb_toggle(hba, true);
283
284 ufshcd_wb_toggle_flush_during_h8(hba, true);
285 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
286 ufshcd_wb_toggle_flush(hba, true);
287}
288
289static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
290{
291 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
292 scsi_unblock_requests(hba->host);
293}
294
295static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
296{
297 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
298 scsi_block_requests(hba->host);
299}
300
301static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
302 enum ufs_trace_str_t str_t)
303{
304 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
305 struct utp_upiu_header *header;
306
307 if (!trace_ufshcd_upiu_enabled())
308 return;
309
310 if (str_t == UFS_CMD_SEND)
311 header = &rq->header;
312 else
313 header = &hba->lrb[tag].ucd_rsp_ptr->header;
314
315 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
316 UFS_TSF_CDB);
317}
318
319static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
320 enum ufs_trace_str_t str_t,
321 struct utp_upiu_req *rq_rsp)
322{
323 if (!trace_ufshcd_upiu_enabled())
324 return;
325
326 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
327 &rq_rsp->qr, UFS_TSF_OSF);
328}
329
330static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
331 enum ufs_trace_str_t str_t)
332{
333 int off = (int)tag - hba->nutrs;
334 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
335
336 if (!trace_ufshcd_upiu_enabled())
337 return;
338
339 if (str_t == UFS_TM_SEND)
340 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
341 &descp->upiu_req.req_header,
342 &descp->upiu_req.input_param1,
343 UFS_TSF_TM_INPUT);
344 else
345 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
346 &descp->upiu_rsp.rsp_header,
347 &descp->upiu_rsp.output_param1,
348 UFS_TSF_TM_OUTPUT);
349}
350
351static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
352 struct uic_command *ucmd,
353 enum ufs_trace_str_t str_t)
354{
355 u32 cmd;
356
357 if (!trace_ufshcd_uic_command_enabled())
358 return;
359
360 if (str_t == UFS_CMD_SEND)
361 cmd = ucmd->command;
362 else
363 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
364
365 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
366 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
367 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
368 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
369}
370
371static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
372 enum ufs_trace_str_t str_t)
373{
374 u64 lba = -1;
375 u8 opcode = 0, group_id = 0;
376 u32 intr, doorbell;
377 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
378 struct scsi_cmnd *cmd = lrbp->cmd;
379 int transfer_len = -1;
380
381 if (!cmd)
382 return;
383
384 if (!trace_ufshcd_command_enabled()) {
385
386 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
387 return;
388 }
389
390
391 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
392 opcode = cmd->cmnd[0];
393 lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
394
395 if (opcode == READ_10 || opcode == WRITE_10) {
396
397
398
399 transfer_len =
400 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
401 if (opcode == WRITE_10)
402 group_id = lrbp->cmd->cmnd[6];
403 } else if (opcode == UNMAP) {
404
405
406
407 transfer_len = blk_rq_bytes(cmd->request);
408 }
409
410 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
411 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
412 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
413 doorbell, transfer_len, intr, lba, opcode, group_id);
414}
415
416static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
417{
418 struct ufs_clk_info *clki;
419 struct list_head *head = &hba->clk_list_head;
420
421 if (list_empty(head))
422 return;
423
424 list_for_each_entry(clki, head, list) {
425 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
426 clki->max_freq)
427 dev_err(hba->dev, "clk: %s, rate: %u\n",
428 clki->name, clki->curr_freq);
429 }
430}
431
432static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
433 char *err_name)
434{
435 int i;
436 bool found = false;
437 struct ufs_event_hist *e;
438
439 if (id >= UFS_EVT_CNT)
440 return;
441
442 e = &hba->ufs_stats.event[id];
443
444 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
445 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
446
447 if (e->tstamp[p] == 0)
448 continue;
449 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
450 e->val[p], ktime_to_us(e->tstamp[p]));
451 found = true;
452 }
453
454 if (!found)
455 dev_err(hba->dev, "No record of %s\n", err_name);
456 else
457 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
458}
459
460static void ufshcd_print_evt_hist(struct ufs_hba *hba)
461{
462 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
463
464 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
465 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
466 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
467 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
468 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
469 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
470 "auto_hibern8_err");
471 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
472 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
473 "link_startup_fail");
474 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
475 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
476 "suspend_fail");
477 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
478 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
479 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
480
481 ufshcd_vops_dbg_register_dump(hba);
482}
483
484static
485void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
486{
487 struct ufshcd_lrb *lrbp;
488 int prdt_length;
489 int tag;
490
491 for_each_set_bit(tag, &bitmap, hba->nutrs) {
492 lrbp = &hba->lrb[tag];
493
494 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
495 tag, ktime_to_us(lrbp->issue_time_stamp));
496 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
497 tag, ktime_to_us(lrbp->compl_time_stamp));
498 dev_err(hba->dev,
499 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
500 tag, (u64)lrbp->utrd_dma_addr);
501
502 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
503 sizeof(struct utp_transfer_req_desc));
504 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
505 (u64)lrbp->ucd_req_dma_addr);
506 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
507 sizeof(struct utp_upiu_req));
508 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
509 (u64)lrbp->ucd_rsp_dma_addr);
510 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
511 sizeof(struct utp_upiu_rsp));
512
513 prdt_length = le16_to_cpu(
514 lrbp->utr_descriptor_ptr->prd_table_length);
515 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
516 prdt_length /= sizeof(struct ufshcd_sg_entry);
517
518 dev_err(hba->dev,
519 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
520 tag, prdt_length,
521 (u64)lrbp->ucd_prdt_dma_addr);
522
523 if (pr_prdt)
524 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
525 sizeof(struct ufshcd_sg_entry) * prdt_length);
526 }
527}
528
529static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
530{
531 int tag;
532
533 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
534 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
535
536 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
537 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
538 }
539}
540
541static void ufshcd_print_host_state(struct ufs_hba *hba)
542{
543 struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
544
545 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
546 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
547 hba->outstanding_reqs, hba->outstanding_tasks);
548 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
549 hba->saved_err, hba->saved_uic_err);
550 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
551 hba->curr_dev_pwr_mode, hba->uic_link_state);
552 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
553 hba->pm_op_in_progress, hba->is_sys_suspended);
554 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
555 hba->auto_bkops_enabled, hba->host->host_self_blocked);
556 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
557 dev_err(hba->dev,
558 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
559 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
560 hba->ufs_stats.hibern8_exit_cnt);
561 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
562 ktime_to_us(hba->ufs_stats.last_intr_ts),
563 hba->ufs_stats.last_intr_status);
564 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
565 hba->eh_flags, hba->req_abort_count);
566 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
567 hba->ufs_version, hba->capabilities, hba->caps);
568 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
569 hba->dev_quirks);
570 if (sdev_ufs)
571 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
572 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
573
574 ufshcd_print_clk_freqs(hba);
575}
576
577
578
579
580
581
582static void ufshcd_print_pwr_info(struct ufs_hba *hba)
583{
584 static const char * const names[] = {
585 "INVALID MODE",
586 "FAST MODE",
587 "SLOW_MODE",
588 "INVALID MODE",
589 "FASTAUTO_MODE",
590 "SLOWAUTO_MODE",
591 "INVALID MODE",
592 };
593
594 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
595 __func__,
596 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
597 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
598 names[hba->pwr_info.pwr_rx],
599 names[hba->pwr_info.pwr_tx],
600 hba->pwr_info.hs_rate);
601}
602
603static void ufshcd_device_reset(struct ufs_hba *hba)
604{
605 int err;
606
607 err = ufshcd_vops_device_reset(hba);
608
609 if (!err) {
610 ufshcd_set_ufs_dev_active(hba);
611 if (ufshcd_is_wb_allowed(hba)) {
612 hba->dev_info.wb_enabled = false;
613 hba->dev_info.wb_buf_flush_enabled = false;
614 }
615 }
616 if (err != -EOPNOTSUPP)
617 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
618}
619
620void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
621{
622 if (!us)
623 return;
624
625 if (us < 10)
626 udelay(us);
627 else
628 usleep_range(us, us + tolerance);
629}
630EXPORT_SYMBOL_GPL(ufshcd_delay_us);
631
632
633
634
635
636
637
638
639
640
641
642
643
644int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
645 u32 val, unsigned long interval_us,
646 unsigned long timeout_ms)
647{
648 int err = 0;
649 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
650
651
652 val = val & mask;
653
654 while ((ufshcd_readl(hba, reg) & mask) != val) {
655 usleep_range(interval_us, interval_us + 50);
656 if (time_after(jiffies, timeout)) {
657 if ((ufshcd_readl(hba, reg) & mask) != val)
658 err = -ETIMEDOUT;
659 break;
660 }
661 }
662
663 return err;
664}
665
666
667
668
669
670
671
672static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
673{
674 if (hba->ufs_version == ufshci_version(1, 0))
675 return INTERRUPT_MASK_ALL_VER_10;
676 if (hba->ufs_version <= ufshci_version(2, 0))
677 return INTERRUPT_MASK_ALL_VER_11;
678
679 return INTERRUPT_MASK_ALL_VER_21;
680}
681
682
683
684
685
686
687
688static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
689{
690 u32 ufshci_ver;
691
692 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
693 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
694 else
695 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
696
697
698
699
700
701
702 if (ufshci_ver & 0x00010000)
703 return ufshci_version(1, ufshci_ver & 0x00000100);
704
705 return ufshci_ver;
706}
707
708
709
710
711
712
713
714
715static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
716{
717 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
718 DEVICE_PRESENT) ? true : false;
719}
720
721
722
723
724
725
726
727
728static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
729{
730 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
731}
732
733
734
735
736
737
738static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
739{
740 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
741 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
742 else
743 ufshcd_writel(hba, ~(1 << pos),
744 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
745}
746
747
748
749
750
751
752static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
753{
754 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
755 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
756 else
757 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
758}
759
760
761
762
763
764
765static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
766{
767 clear_bit(tag, &hba->outstanding_reqs);
768}
769
770
771
772
773
774
775
776static inline int ufshcd_get_lists_status(u32 reg)
777{
778 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
779}
780
781
782
783
784
785
786
787
788static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
789{
790 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
791 MASK_UIC_COMMAND_RESULT;
792}
793
794
795
796
797
798
799
800
801static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
802{
803 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
804}
805
806
807
808
809
810static inline int
811ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
812{
813 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
814}
815
816
817
818
819
820
821
822
823static inline int
824ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
825{
826 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
827}
828
829
830
831
832
833
834
835
836static inline unsigned int
837ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
838{
839 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
840 MASK_RSP_UPIU_DATA_SEG_LEN;
841}
842
843
844
845
846
847
848
849
850
851
852static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
853{
854 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
855 MASK_RSP_EXCEPTION_EVENT ? true : false;
856}
857
858
859
860
861
862static inline void
863ufshcd_reset_intr_aggr(struct ufs_hba *hba)
864{
865 ufshcd_writel(hba, INT_AGGR_ENABLE |
866 INT_AGGR_COUNTER_AND_TIMER_RESET,
867 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
868}
869
870
871
872
873
874
875
876static inline void
877ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
878{
879 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
880 INT_AGGR_COUNTER_THLD_VAL(cnt) |
881 INT_AGGR_TIMEOUT_VAL(tmout),
882 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
883}
884
885
886
887
888
889static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
890{
891 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
892}
893
894
895
896
897
898
899
900static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
901{
902 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
903 REG_UTP_TASK_REQ_LIST_RUN_STOP);
904 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
905 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
906}
907
908
909
910
911
912static inline void ufshcd_hba_start(struct ufs_hba *hba)
913{
914 u32 val = CONTROLLER_ENABLE;
915
916 if (ufshcd_crypto_enable(hba))
917 val |= CRYPTO_GENERAL_ENABLE;
918
919 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
920}
921
922
923
924
925
926
927
928static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
929{
930 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
931 ? false : true;
932}
933
934u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
935{
936
937 if (hba->ufs_version <= ufshci_version(1, 1))
938 return UFS_UNIPRO_VER_1_41;
939 else
940 return UFS_UNIPRO_VER_1_6;
941}
942EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
943
944static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
945{
946
947
948
949
950
951
952
953
954
955 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
956 return true;
957 else
958 return false;
959}
960
961
962
963
964
965
966
967
968
969static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
970{
971 int ret = 0;
972 struct ufs_clk_info *clki;
973 struct list_head *head = &hba->clk_list_head;
974
975 if (list_empty(head))
976 goto out;
977
978 list_for_each_entry(clki, head, list) {
979 if (!IS_ERR_OR_NULL(clki->clk)) {
980 if (scale_up && clki->max_freq) {
981 if (clki->curr_freq == clki->max_freq)
982 continue;
983
984 ret = clk_set_rate(clki->clk, clki->max_freq);
985 if (ret) {
986 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
987 __func__, clki->name,
988 clki->max_freq, ret);
989 break;
990 }
991 trace_ufshcd_clk_scaling(dev_name(hba->dev),
992 "scaled up", clki->name,
993 clki->curr_freq,
994 clki->max_freq);
995
996 clki->curr_freq = clki->max_freq;
997
998 } else if (!scale_up && clki->min_freq) {
999 if (clki->curr_freq == clki->min_freq)
1000 continue;
1001
1002 ret = clk_set_rate(clki->clk, clki->min_freq);
1003 if (ret) {
1004 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1005 __func__, clki->name,
1006 clki->min_freq, ret);
1007 break;
1008 }
1009 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1010 "scaled down", clki->name,
1011 clki->curr_freq,
1012 clki->min_freq);
1013 clki->curr_freq = clki->min_freq;
1014 }
1015 }
1016 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1017 clki->name, clk_get_rate(clki->clk));
1018 }
1019
1020out:
1021 return ret;
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1033{
1034 int ret = 0;
1035 ktime_t start = ktime_get();
1036
1037 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1038 if (ret)
1039 goto out;
1040
1041 ret = ufshcd_set_clk_freq(hba, scale_up);
1042 if (ret)
1043 goto out;
1044
1045 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1046 if (ret)
1047 ufshcd_set_clk_freq(hba, !scale_up);
1048
1049out:
1050 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1051 (scale_up ? "up" : "down"),
1052 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1064 bool scale_up)
1065{
1066 struct ufs_clk_info *clki;
1067 struct list_head *head = &hba->clk_list_head;
1068
1069 if (list_empty(head))
1070 return false;
1071
1072 list_for_each_entry(clki, head, list) {
1073 if (!IS_ERR_OR_NULL(clki->clk)) {
1074 if (scale_up && clki->max_freq) {
1075 if (clki->curr_freq == clki->max_freq)
1076 continue;
1077 return true;
1078 } else if (!scale_up && clki->min_freq) {
1079 if (clki->curr_freq == clki->min_freq)
1080 continue;
1081 return true;
1082 }
1083 }
1084 }
1085
1086 return false;
1087}
1088
1089static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1090 u64 wait_timeout_us)
1091{
1092 unsigned long flags;
1093 int ret = 0;
1094 u32 tm_doorbell;
1095 u32 tr_doorbell;
1096 bool timeout = false, do_last_check = false;
1097 ktime_t start;
1098
1099 ufshcd_hold(hba, false);
1100 spin_lock_irqsave(hba->host->host_lock, flags);
1101
1102
1103
1104
1105 start = ktime_get();
1106 do {
1107 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1108 ret = -EBUSY;
1109 goto out;
1110 }
1111
1112 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1113 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1114 if (!tm_doorbell && !tr_doorbell) {
1115 timeout = false;
1116 break;
1117 } else if (do_last_check) {
1118 break;
1119 }
1120
1121 spin_unlock_irqrestore(hba->host->host_lock, flags);
1122 schedule();
1123 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1124 wait_timeout_us) {
1125 timeout = true;
1126
1127
1128
1129
1130
1131 do_last_check = true;
1132 }
1133 spin_lock_irqsave(hba->host->host_lock, flags);
1134 } while (tm_doorbell || tr_doorbell);
1135
1136 if (timeout) {
1137 dev_err(hba->dev,
1138 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1139 __func__, tm_doorbell, tr_doorbell);
1140 ret = -EBUSY;
1141 }
1142out:
1143 spin_unlock_irqrestore(hba->host->host_lock, flags);
1144 ufshcd_release(hba);
1145 return ret;
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1158{
1159 int ret = 0;
1160 struct ufs_pa_layer_attr new_pwr_info;
1161
1162 if (scale_up) {
1163 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1164 sizeof(struct ufs_pa_layer_attr));
1165 } else {
1166 memcpy(&new_pwr_info, &hba->pwr_info,
1167 sizeof(struct ufs_pa_layer_attr));
1168
1169 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1170 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1171
1172 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1173 &hba->pwr_info,
1174 sizeof(struct ufs_pa_layer_attr));
1175
1176
1177 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1178 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1179 }
1180 }
1181
1182
1183 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1184 if (ret)
1185 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1186 __func__, ret,
1187 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1188 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1189
1190 return ret;
1191}
1192
1193static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1194{
1195 #define DOORBELL_CLR_TOUT_US (1000 * 1000)
1196 int ret = 0;
1197
1198
1199
1200
1201 ufshcd_scsi_block_requests(hba);
1202 down_write(&hba->clk_scaling_lock);
1203
1204 if (!hba->clk_scaling.is_allowed ||
1205 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1206 ret = -EBUSY;
1207 up_write(&hba->clk_scaling_lock);
1208 ufshcd_scsi_unblock_requests(hba);
1209 goto out;
1210 }
1211
1212
1213 ufshcd_hold(hba, false);
1214
1215out:
1216 return ret;
1217}
1218
1219static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
1220{
1221 if (writelock)
1222 up_write(&hba->clk_scaling_lock);
1223 else
1224 up_read(&hba->clk_scaling_lock);
1225 ufshcd_scsi_unblock_requests(hba);
1226 ufshcd_release(hba);
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1239{
1240 int ret = 0;
1241 bool is_writelock = true;
1242
1243 ret = ufshcd_clock_scaling_prepare(hba);
1244 if (ret)
1245 return ret;
1246
1247
1248 if (!scale_up) {
1249 ret = ufshcd_scale_gear(hba, false);
1250 if (ret)
1251 goto out_unprepare;
1252 }
1253
1254 ret = ufshcd_scale_clks(hba, scale_up);
1255 if (ret) {
1256 if (!scale_up)
1257 ufshcd_scale_gear(hba, true);
1258 goto out_unprepare;
1259 }
1260
1261
1262 if (scale_up) {
1263 ret = ufshcd_scale_gear(hba, true);
1264 if (ret) {
1265 ufshcd_scale_clks(hba, false);
1266 goto out_unprepare;
1267 }
1268 }
1269
1270
1271 downgrade_write(&hba->clk_scaling_lock);
1272 is_writelock = false;
1273 ufshcd_wb_toggle(hba, scale_up);
1274
1275out_unprepare:
1276 ufshcd_clock_scaling_unprepare(hba, is_writelock);
1277 return ret;
1278}
1279
1280static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1281{
1282 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1283 clk_scaling.suspend_work);
1284 unsigned long irq_flags;
1285
1286 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1287 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1288 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1289 return;
1290 }
1291 hba->clk_scaling.is_suspended = true;
1292 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1293
1294 __ufshcd_suspend_clkscaling(hba);
1295}
1296
1297static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1298{
1299 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1300 clk_scaling.resume_work);
1301 unsigned long irq_flags;
1302
1303 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1304 if (!hba->clk_scaling.is_suspended) {
1305 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1306 return;
1307 }
1308 hba->clk_scaling.is_suspended = false;
1309 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1310
1311 devfreq_resume_device(hba->devfreq);
1312}
1313
1314static int ufshcd_devfreq_target(struct device *dev,
1315 unsigned long *freq, u32 flags)
1316{
1317 int ret = 0;
1318 struct ufs_hba *hba = dev_get_drvdata(dev);
1319 ktime_t start;
1320 bool scale_up, sched_clk_scaling_suspend_work = false;
1321 struct list_head *clk_list = &hba->clk_list_head;
1322 struct ufs_clk_info *clki;
1323 unsigned long irq_flags;
1324
1325 if (!ufshcd_is_clkscaling_supported(hba))
1326 return -EINVAL;
1327
1328 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1329
1330 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1331 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1332 if (ufshcd_eh_in_progress(hba)) {
1333 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1334 return 0;
1335 }
1336
1337 if (!hba->clk_scaling.active_reqs)
1338 sched_clk_scaling_suspend_work = true;
1339
1340 if (list_empty(clk_list)) {
1341 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1342 goto out;
1343 }
1344
1345
1346 scale_up = (*freq == clki->max_freq) ? true : false;
1347 if (!scale_up)
1348 *freq = clki->min_freq;
1349
1350 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1351 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1352 ret = 0;
1353 goto out;
1354 }
1355 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1356
1357 start = ktime_get();
1358 ret = ufshcd_devfreq_scale(hba, scale_up);
1359
1360 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1361 (scale_up ? "up" : "down"),
1362 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1363
1364out:
1365 if (sched_clk_scaling_suspend_work)
1366 queue_work(hba->clk_scaling.workq,
1367 &hba->clk_scaling.suspend_work);
1368
1369 return ret;
1370}
1371
1372static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1373{
1374 int *busy = priv;
1375
1376 WARN_ON_ONCE(reserved);
1377 (*busy)++;
1378 return false;
1379}
1380
1381
1382static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1383{
1384 struct request_queue *q = hba->cmd_queue;
1385 int busy = 0;
1386
1387 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1388 return busy;
1389}
1390
1391static int ufshcd_devfreq_get_dev_status(struct device *dev,
1392 struct devfreq_dev_status *stat)
1393{
1394 struct ufs_hba *hba = dev_get_drvdata(dev);
1395 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1396 unsigned long flags;
1397 struct list_head *clk_list = &hba->clk_list_head;
1398 struct ufs_clk_info *clki;
1399 ktime_t curr_t;
1400
1401 if (!ufshcd_is_clkscaling_supported(hba))
1402 return -EINVAL;
1403
1404 memset(stat, 0, sizeof(*stat));
1405
1406 spin_lock_irqsave(hba->host->host_lock, flags);
1407 curr_t = ktime_get();
1408 if (!scaling->window_start_t)
1409 goto start_window;
1410
1411 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1412
1413
1414
1415
1416
1417 stat->current_frequency = clki->curr_freq;
1418 if (scaling->is_busy_started)
1419 scaling->tot_busy_t += ktime_us_delta(curr_t,
1420 scaling->busy_start_t);
1421
1422 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1423 stat->busy_time = scaling->tot_busy_t;
1424start_window:
1425 scaling->window_start_t = curr_t;
1426 scaling->tot_busy_t = 0;
1427
1428 if (hba->outstanding_reqs) {
1429 scaling->busy_start_t = curr_t;
1430 scaling->is_busy_started = true;
1431 } else {
1432 scaling->busy_start_t = 0;
1433 scaling->is_busy_started = false;
1434 }
1435 spin_unlock_irqrestore(hba->host->host_lock, flags);
1436 return 0;
1437}
1438
1439static int ufshcd_devfreq_init(struct ufs_hba *hba)
1440{
1441 struct list_head *clk_list = &hba->clk_list_head;
1442 struct ufs_clk_info *clki;
1443 struct devfreq *devfreq;
1444 int ret;
1445
1446
1447 if (list_empty(clk_list))
1448 return 0;
1449
1450 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1451 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1452 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1453
1454 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1455 &hba->vps->ondemand_data);
1456 devfreq = devfreq_add_device(hba->dev,
1457 &hba->vps->devfreq_profile,
1458 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1459 &hba->vps->ondemand_data);
1460 if (IS_ERR(devfreq)) {
1461 ret = PTR_ERR(devfreq);
1462 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1463
1464 dev_pm_opp_remove(hba->dev, clki->min_freq);
1465 dev_pm_opp_remove(hba->dev, clki->max_freq);
1466 return ret;
1467 }
1468
1469 hba->devfreq = devfreq;
1470
1471 return 0;
1472}
1473
1474static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1475{
1476 struct list_head *clk_list = &hba->clk_list_head;
1477 struct ufs_clk_info *clki;
1478
1479 if (!hba->devfreq)
1480 return;
1481
1482 devfreq_remove_device(hba->devfreq);
1483 hba->devfreq = NULL;
1484
1485 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1486 dev_pm_opp_remove(hba->dev, clki->min_freq);
1487 dev_pm_opp_remove(hba->dev, clki->max_freq);
1488}
1489
1490static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1491{
1492 unsigned long flags;
1493
1494 devfreq_suspend_device(hba->devfreq);
1495 spin_lock_irqsave(hba->host->host_lock, flags);
1496 hba->clk_scaling.window_start_t = 0;
1497 spin_unlock_irqrestore(hba->host->host_lock, flags);
1498}
1499
1500static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1501{
1502 unsigned long flags;
1503 bool suspend = false;
1504
1505 cancel_work_sync(&hba->clk_scaling.suspend_work);
1506 cancel_work_sync(&hba->clk_scaling.resume_work);
1507
1508 spin_lock_irqsave(hba->host->host_lock, flags);
1509 if (!hba->clk_scaling.is_suspended) {
1510 suspend = true;
1511 hba->clk_scaling.is_suspended = true;
1512 }
1513 spin_unlock_irqrestore(hba->host->host_lock, flags);
1514
1515 if (suspend)
1516 __ufshcd_suspend_clkscaling(hba);
1517}
1518
1519static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1520{
1521 unsigned long flags;
1522 bool resume = false;
1523
1524 spin_lock_irqsave(hba->host->host_lock, flags);
1525 if (hba->clk_scaling.is_suspended) {
1526 resume = true;
1527 hba->clk_scaling.is_suspended = false;
1528 }
1529 spin_unlock_irqrestore(hba->host->host_lock, flags);
1530
1531 if (resume)
1532 devfreq_resume_device(hba->devfreq);
1533}
1534
1535static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1536 struct device_attribute *attr, char *buf)
1537{
1538 struct ufs_hba *hba = dev_get_drvdata(dev);
1539
1540 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1541}
1542
1543static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1544 struct device_attribute *attr, const char *buf, size_t count)
1545{
1546 struct ufs_hba *hba = dev_get_drvdata(dev);
1547 u32 value;
1548 int err = 0;
1549
1550 if (kstrtou32(buf, 0, &value))
1551 return -EINVAL;
1552
1553 down(&hba->host_sem);
1554 if (!ufshcd_is_user_access_allowed(hba)) {
1555 err = -EBUSY;
1556 goto out;
1557 }
1558
1559 value = !!value;
1560 if (value == hba->clk_scaling.is_enabled)
1561 goto out;
1562
1563 ufshcd_rpm_get_sync(hba);
1564 ufshcd_hold(hba, false);
1565
1566 hba->clk_scaling.is_enabled = value;
1567
1568 if (value) {
1569 ufshcd_resume_clkscaling(hba);
1570 } else {
1571 ufshcd_suspend_clkscaling(hba);
1572 err = ufshcd_devfreq_scale(hba, true);
1573 if (err)
1574 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1575 __func__, err);
1576 }
1577
1578 ufshcd_release(hba);
1579 ufshcd_rpm_put_sync(hba);
1580out:
1581 up(&hba->host_sem);
1582 return err ? err : count;
1583}
1584
1585static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1586{
1587 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1588 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1589 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1590 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1591 hba->clk_scaling.enable_attr.attr.mode = 0644;
1592 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1593 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1594}
1595
1596static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1597{
1598 if (hba->clk_scaling.enable_attr.attr.name)
1599 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1600}
1601
1602static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1603{
1604 char wq_name[sizeof("ufs_clkscaling_00")];
1605
1606 if (!ufshcd_is_clkscaling_supported(hba))
1607 return;
1608
1609 if (!hba->clk_scaling.min_gear)
1610 hba->clk_scaling.min_gear = UFS_HS_G1;
1611
1612 INIT_WORK(&hba->clk_scaling.suspend_work,
1613 ufshcd_clk_scaling_suspend_work);
1614 INIT_WORK(&hba->clk_scaling.resume_work,
1615 ufshcd_clk_scaling_resume_work);
1616
1617 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1618 hba->host->host_no);
1619 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1620
1621 hba->clk_scaling.is_initialized = true;
1622}
1623
1624static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1625{
1626 if (!hba->clk_scaling.is_initialized)
1627 return;
1628
1629 ufshcd_remove_clk_scaling_sysfs(hba);
1630 destroy_workqueue(hba->clk_scaling.workq);
1631 ufshcd_devfreq_remove(hba);
1632 hba->clk_scaling.is_initialized = false;
1633}
1634
1635static void ufshcd_ungate_work(struct work_struct *work)
1636{
1637 int ret;
1638 unsigned long flags;
1639 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1640 clk_gating.ungate_work);
1641
1642 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1643
1644 spin_lock_irqsave(hba->host->host_lock, flags);
1645 if (hba->clk_gating.state == CLKS_ON) {
1646 spin_unlock_irqrestore(hba->host->host_lock, flags);
1647 goto unblock_reqs;
1648 }
1649
1650 spin_unlock_irqrestore(hba->host->host_lock, flags);
1651 ufshcd_hba_vreg_set_hpm(hba);
1652 ufshcd_setup_clocks(hba, true);
1653
1654 ufshcd_enable_irq(hba);
1655
1656
1657 if (ufshcd_can_hibern8_during_gating(hba)) {
1658
1659 hba->clk_gating.is_suspended = true;
1660 if (ufshcd_is_link_hibern8(hba)) {
1661 ret = ufshcd_uic_hibern8_exit(hba);
1662 if (ret)
1663 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1664 __func__, ret);
1665 else
1666 ufshcd_set_link_active(hba);
1667 }
1668 hba->clk_gating.is_suspended = false;
1669 }
1670unblock_reqs:
1671 ufshcd_scsi_unblock_requests(hba);
1672}
1673
1674
1675
1676
1677
1678
1679
1680int ufshcd_hold(struct ufs_hba *hba, bool async)
1681{
1682 int rc = 0;
1683 bool flush_result;
1684 unsigned long flags;
1685
1686 if (!ufshcd_is_clkgating_allowed(hba))
1687 goto out;
1688 spin_lock_irqsave(hba->host->host_lock, flags);
1689 hba->clk_gating.active_reqs++;
1690
1691start:
1692 switch (hba->clk_gating.state) {
1693 case CLKS_ON:
1694
1695
1696
1697
1698
1699
1700
1701
1702 if (ufshcd_can_hibern8_during_gating(hba) &&
1703 ufshcd_is_link_hibern8(hba)) {
1704 if (async) {
1705 rc = -EAGAIN;
1706 hba->clk_gating.active_reqs--;
1707 break;
1708 }
1709 spin_unlock_irqrestore(hba->host->host_lock, flags);
1710 flush_result = flush_work(&hba->clk_gating.ungate_work);
1711 if (hba->clk_gating.is_suspended && !flush_result)
1712 goto out;
1713 spin_lock_irqsave(hba->host->host_lock, flags);
1714 goto start;
1715 }
1716 break;
1717 case REQ_CLKS_OFF:
1718 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1719 hba->clk_gating.state = CLKS_ON;
1720 trace_ufshcd_clk_gating(dev_name(hba->dev),
1721 hba->clk_gating.state);
1722 break;
1723 }
1724
1725
1726
1727
1728
1729 fallthrough;
1730 case CLKS_OFF:
1731 hba->clk_gating.state = REQ_CLKS_ON;
1732 trace_ufshcd_clk_gating(dev_name(hba->dev),
1733 hba->clk_gating.state);
1734 if (queue_work(hba->clk_gating.clk_gating_workq,
1735 &hba->clk_gating.ungate_work))
1736 ufshcd_scsi_block_requests(hba);
1737
1738
1739
1740
1741 fallthrough;
1742 case REQ_CLKS_ON:
1743 if (async) {
1744 rc = -EAGAIN;
1745 hba->clk_gating.active_reqs--;
1746 break;
1747 }
1748
1749 spin_unlock_irqrestore(hba->host->host_lock, flags);
1750 flush_work(&hba->clk_gating.ungate_work);
1751
1752 spin_lock_irqsave(hba->host->host_lock, flags);
1753 goto start;
1754 default:
1755 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1756 __func__, hba->clk_gating.state);
1757 break;
1758 }
1759 spin_unlock_irqrestore(hba->host->host_lock, flags);
1760out:
1761 return rc;
1762}
1763EXPORT_SYMBOL_GPL(ufshcd_hold);
1764
1765static void ufshcd_gate_work(struct work_struct *work)
1766{
1767 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1768 clk_gating.gate_work.work);
1769 unsigned long flags;
1770 int ret;
1771
1772 spin_lock_irqsave(hba->host->host_lock, flags);
1773
1774
1775
1776
1777
1778
1779 if (hba->clk_gating.is_suspended ||
1780 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1781 hba->clk_gating.state = CLKS_ON;
1782 trace_ufshcd_clk_gating(dev_name(hba->dev),
1783 hba->clk_gating.state);
1784 goto rel_lock;
1785 }
1786
1787 if (hba->clk_gating.active_reqs
1788 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1789 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1790 || hba->active_uic_cmd || hba->uic_async_done)
1791 goto rel_lock;
1792
1793 spin_unlock_irqrestore(hba->host->host_lock, flags);
1794
1795
1796 if (ufshcd_can_hibern8_during_gating(hba)) {
1797 ret = ufshcd_uic_hibern8_enter(hba);
1798 if (ret) {
1799 hba->clk_gating.state = CLKS_ON;
1800 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1801 __func__, ret);
1802 trace_ufshcd_clk_gating(dev_name(hba->dev),
1803 hba->clk_gating.state);
1804 goto out;
1805 }
1806 ufshcd_set_link_hibern8(hba);
1807 }
1808
1809 ufshcd_disable_irq(hba);
1810
1811 ufshcd_setup_clocks(hba, false);
1812
1813
1814 ufshcd_hba_vreg_set_lpm(hba);
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 spin_lock_irqsave(hba->host->host_lock, flags);
1825 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1826 hba->clk_gating.state = CLKS_OFF;
1827 trace_ufshcd_clk_gating(dev_name(hba->dev),
1828 hba->clk_gating.state);
1829 }
1830rel_lock:
1831 spin_unlock_irqrestore(hba->host->host_lock, flags);
1832out:
1833 return;
1834}
1835
1836
1837static void __ufshcd_release(struct ufs_hba *hba)
1838{
1839 if (!ufshcd_is_clkgating_allowed(hba))
1840 return;
1841
1842 hba->clk_gating.active_reqs--;
1843
1844 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1845 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1846 hba->outstanding_tasks ||
1847 hba->active_uic_cmd || hba->uic_async_done ||
1848 hba->clk_gating.state == CLKS_OFF)
1849 return;
1850
1851 hba->clk_gating.state = REQ_CLKS_OFF;
1852 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1853 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1854 &hba->clk_gating.gate_work,
1855 msecs_to_jiffies(hba->clk_gating.delay_ms));
1856}
1857
1858void ufshcd_release(struct ufs_hba *hba)
1859{
1860 unsigned long flags;
1861
1862 spin_lock_irqsave(hba->host->host_lock, flags);
1863 __ufshcd_release(hba);
1864 spin_unlock_irqrestore(hba->host->host_lock, flags);
1865}
1866EXPORT_SYMBOL_GPL(ufshcd_release);
1867
1868static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1869 struct device_attribute *attr, char *buf)
1870{
1871 struct ufs_hba *hba = dev_get_drvdata(dev);
1872
1873 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1874}
1875
1876static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1877 struct device_attribute *attr, const char *buf, size_t count)
1878{
1879 struct ufs_hba *hba = dev_get_drvdata(dev);
1880 unsigned long flags, value;
1881
1882 if (kstrtoul(buf, 0, &value))
1883 return -EINVAL;
1884
1885 spin_lock_irqsave(hba->host->host_lock, flags);
1886 hba->clk_gating.delay_ms = value;
1887 spin_unlock_irqrestore(hba->host->host_lock, flags);
1888 return count;
1889}
1890
1891static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1892 struct device_attribute *attr, char *buf)
1893{
1894 struct ufs_hba *hba = dev_get_drvdata(dev);
1895
1896 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1897}
1898
1899static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1900 struct device_attribute *attr, const char *buf, size_t count)
1901{
1902 struct ufs_hba *hba = dev_get_drvdata(dev);
1903 unsigned long flags;
1904 u32 value;
1905
1906 if (kstrtou32(buf, 0, &value))
1907 return -EINVAL;
1908
1909 value = !!value;
1910
1911 spin_lock_irqsave(hba->host->host_lock, flags);
1912 if (value == hba->clk_gating.is_enabled)
1913 goto out;
1914
1915 if (value)
1916 __ufshcd_release(hba);
1917 else
1918 hba->clk_gating.active_reqs++;
1919
1920 hba->clk_gating.is_enabled = value;
1921out:
1922 spin_unlock_irqrestore(hba->host->host_lock, flags);
1923 return count;
1924}
1925
1926static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1927{
1928 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1929 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1930 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1931 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1932 hba->clk_gating.delay_attr.attr.mode = 0644;
1933 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1934 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1935
1936 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1937 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1938 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1939 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1940 hba->clk_gating.enable_attr.attr.mode = 0644;
1941 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1942 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1943}
1944
1945static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1946{
1947 if (hba->clk_gating.delay_attr.attr.name)
1948 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1949 if (hba->clk_gating.enable_attr.attr.name)
1950 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1951}
1952
1953static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1954{
1955 char wq_name[sizeof("ufs_clk_gating_00")];
1956
1957 if (!ufshcd_is_clkgating_allowed(hba))
1958 return;
1959
1960 hba->clk_gating.state = CLKS_ON;
1961
1962 hba->clk_gating.delay_ms = 150;
1963 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1964 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1965
1966 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1967 hba->host->host_no);
1968 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1969 WQ_MEM_RECLAIM | WQ_HIGHPRI);
1970
1971 ufshcd_init_clk_gating_sysfs(hba);
1972
1973 hba->clk_gating.is_enabled = true;
1974 hba->clk_gating.is_initialized = true;
1975}
1976
1977static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1978{
1979 if (!hba->clk_gating.is_initialized)
1980 return;
1981 ufshcd_remove_clk_gating_sysfs(hba);
1982 cancel_work_sync(&hba->clk_gating.ungate_work);
1983 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1984 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1985 hba->clk_gating.is_initialized = false;
1986}
1987
1988
1989static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1990{
1991 bool queue_resume_work = false;
1992 ktime_t curr_t = ktime_get();
1993 unsigned long flags;
1994
1995 if (!ufshcd_is_clkscaling_supported(hba))
1996 return;
1997
1998 spin_lock_irqsave(hba->host->host_lock, flags);
1999 if (!hba->clk_scaling.active_reqs++)
2000 queue_resume_work = true;
2001
2002 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2003 spin_unlock_irqrestore(hba->host->host_lock, flags);
2004 return;
2005 }
2006
2007 if (queue_resume_work)
2008 queue_work(hba->clk_scaling.workq,
2009 &hba->clk_scaling.resume_work);
2010
2011 if (!hba->clk_scaling.window_start_t) {
2012 hba->clk_scaling.window_start_t = curr_t;
2013 hba->clk_scaling.tot_busy_t = 0;
2014 hba->clk_scaling.is_busy_started = false;
2015 }
2016
2017 if (!hba->clk_scaling.is_busy_started) {
2018 hba->clk_scaling.busy_start_t = curr_t;
2019 hba->clk_scaling.is_busy_started = true;
2020 }
2021 spin_unlock_irqrestore(hba->host->host_lock, flags);
2022}
2023
2024static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2025{
2026 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2027 unsigned long flags;
2028
2029 if (!ufshcd_is_clkscaling_supported(hba))
2030 return;
2031
2032 spin_lock_irqsave(hba->host->host_lock, flags);
2033 hba->clk_scaling.active_reqs--;
2034 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2035 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2036 scaling->busy_start_t));
2037 scaling->busy_start_t = 0;
2038 scaling->is_busy_started = false;
2039 }
2040 spin_unlock_irqrestore(hba->host->host_lock, flags);
2041}
2042
2043static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2044{
2045 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2046 return READ;
2047 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2048 return WRITE;
2049 else
2050 return -EINVAL;
2051}
2052
2053static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2054 struct ufshcd_lrb *lrbp)
2055{
2056 struct ufs_hba_monitor *m = &hba->monitor;
2057
2058 return (m->enabled && lrbp && lrbp->cmd &&
2059 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2060 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2061}
2062
2063static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2064{
2065 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2066 unsigned long flags;
2067
2068 spin_lock_irqsave(hba->host->host_lock, flags);
2069 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2070 hba->monitor.busy_start_ts[dir] = ktime_get();
2071 spin_unlock_irqrestore(hba->host->host_lock, flags);
2072}
2073
2074static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2075{
2076 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2077 unsigned long flags;
2078
2079 spin_lock_irqsave(hba->host->host_lock, flags);
2080 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2081 struct request *req = lrbp->cmd->request;
2082 struct ufs_hba_monitor *m = &hba->monitor;
2083 ktime_t now, inc, lat;
2084
2085 now = lrbp->compl_time_stamp;
2086 inc = ktime_sub(now, m->busy_start_ts[dir]);
2087 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2088 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2089
2090
2091 m->nr_req[dir]++;
2092 lat = ktime_sub(now, lrbp->issue_time_stamp);
2093 m->lat_sum[dir] += lat;
2094 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2095 m->lat_max[dir] = lat;
2096 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2097 m->lat_min[dir] = lat;
2098
2099 m->nr_queued[dir]--;
2100
2101 m->busy_start_ts[dir] = now;
2102 }
2103 spin_unlock_irqrestore(hba->host->host_lock, flags);
2104}
2105
2106
2107
2108
2109
2110
2111static inline
2112void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2113{
2114 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2115
2116 lrbp->issue_time_stamp = ktime_get();
2117 lrbp->compl_time_stamp = ktime_set(0, 0);
2118 ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
2119 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2120 ufshcd_clk_scaling_start_busy(hba);
2121 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2122 ufshcd_start_monitor(hba, lrbp);
2123 if (ufshcd_has_utrlcnr(hba)) {
2124 set_bit(task_tag, &hba->outstanding_reqs);
2125 ufshcd_writel(hba, 1 << task_tag,
2126 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2127 } else {
2128 unsigned long flags;
2129
2130 spin_lock_irqsave(hba->host->host_lock, flags);
2131 set_bit(task_tag, &hba->outstanding_reqs);
2132 ufshcd_writel(hba, 1 << task_tag,
2133 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2134 spin_unlock_irqrestore(hba->host->host_lock, flags);
2135 }
2136
2137 wmb();
2138}
2139
2140
2141
2142
2143
2144static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2145{
2146 int len;
2147 if (lrbp->sense_buffer &&
2148 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2149 int len_to_copy;
2150
2151 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2152 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2153
2154 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2155 len_to_copy);
2156 }
2157}
2158
2159
2160
2161
2162
2163
2164
2165static
2166int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2167{
2168 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2169
2170 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2171
2172
2173 if (hba->dev_cmd.query.descriptor &&
2174 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2175 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2176 GENERAL_UPIU_REQUEST_SIZE;
2177 u16 resp_len;
2178 u16 buf_len;
2179
2180
2181 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2182 MASK_QUERY_DATA_SEG_LEN;
2183 buf_len = be16_to_cpu(
2184 hba->dev_cmd.query.request.upiu_req.length);
2185 if (likely(buf_len >= resp_len)) {
2186 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2187 } else {
2188 dev_warn(hba->dev,
2189 "%s: rsp size %d is bigger than buffer size %d",
2190 __func__, resp_len, buf_len);
2191 return -EINVAL;
2192 }
2193 }
2194
2195 return 0;
2196}
2197
2198
2199
2200
2201
2202
2203
2204static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2205{
2206 int err;
2207
2208 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2209
2210
2211 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2212 hba->nutmrs =
2213 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2214
2215
2216 err = ufshcd_hba_init_crypto_capabilities(hba);
2217 if (err)
2218 dev_err(hba->dev, "crypto setup failed\n");
2219
2220 return err;
2221}
2222
2223
2224
2225
2226
2227
2228
2229static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2230{
2231 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2232 return true;
2233 else
2234 return false;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2245{
2246 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256static inline void
2257ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2258{
2259 WARN_ON(hba->active_uic_cmd);
2260
2261 hba->active_uic_cmd = uic_cmd;
2262
2263
2264 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2265 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2266 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2267
2268 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2269
2270
2271 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2272 REG_UIC_COMMAND);
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static int
2284ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2285{
2286 int ret;
2287 unsigned long flags;
2288
2289 if (wait_for_completion_timeout(&uic_cmd->done,
2290 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2291 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2292 } else {
2293 ret = -ETIMEDOUT;
2294 dev_err(hba->dev,
2295 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2296 uic_cmd->command, uic_cmd->argument3);
2297
2298 if (!uic_cmd->cmd_active) {
2299 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2300 __func__);
2301 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2302 }
2303 }
2304
2305 spin_lock_irqsave(hba->host->host_lock, flags);
2306 hba->active_uic_cmd = NULL;
2307 spin_unlock_irqrestore(hba->host->host_lock, flags);
2308
2309 return ret;
2310}
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322static int
2323__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2324 bool completion)
2325{
2326 if (!ufshcd_ready_for_uic_cmd(hba)) {
2327 dev_err(hba->dev,
2328 "Controller not ready to accept UIC commands\n");
2329 return -EIO;
2330 }
2331
2332 if (completion)
2333 init_completion(&uic_cmd->done);
2334
2335 uic_cmd->cmd_active = 1;
2336 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2337
2338 return 0;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2349{
2350 int ret;
2351 unsigned long flags;
2352
2353 ufshcd_hold(hba, false);
2354 mutex_lock(&hba->uic_cmd_mutex);
2355 ufshcd_add_delay_before_dme_cmd(hba);
2356
2357 spin_lock_irqsave(hba->host->host_lock, flags);
2358 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2359 spin_unlock_irqrestore(hba->host->host_lock, flags);
2360 if (!ret)
2361 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2362
2363 mutex_unlock(&hba->uic_cmd_mutex);
2364
2365 ufshcd_release(hba);
2366 return ret;
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2377{
2378 struct ufshcd_sg_entry *prd_table;
2379 struct scatterlist *sg;
2380 struct scsi_cmnd *cmd;
2381 int sg_segments;
2382 int i;
2383
2384 cmd = lrbp->cmd;
2385 sg_segments = scsi_dma_map(cmd);
2386 if (sg_segments < 0)
2387 return sg_segments;
2388
2389 if (sg_segments) {
2390
2391 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2392 lrbp->utr_descriptor_ptr->prd_table_length =
2393 cpu_to_le16((sg_segments *
2394 sizeof(struct ufshcd_sg_entry)));
2395 else
2396 lrbp->utr_descriptor_ptr->prd_table_length =
2397 cpu_to_le16((u16) (sg_segments));
2398
2399 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2400
2401 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2402 prd_table[i].size =
2403 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2404 prd_table[i].base_addr =
2405 cpu_to_le32(lower_32_bits(sg->dma_address));
2406 prd_table[i].upper_addr =
2407 cpu_to_le32(upper_32_bits(sg->dma_address));
2408 prd_table[i].reserved = 0;
2409 }
2410 } else {
2411 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2412 }
2413
2414 return 0;
2415}
2416
2417
2418
2419
2420
2421
2422static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2423{
2424 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2425
2426 if (hba->ufs_version == ufshci_version(1, 0)) {
2427 u32 rw;
2428 rw = set & INTERRUPT_MASK_RW_VER_10;
2429 set = rw | ((set ^ intrs) & intrs);
2430 } else {
2431 set |= intrs;
2432 }
2433
2434 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2435}
2436
2437
2438
2439
2440
2441
2442static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2443{
2444 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2445
2446 if (hba->ufs_version == ufshci_version(1, 0)) {
2447 u32 rw;
2448 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2449 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2450 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2451
2452 } else {
2453 set &= ~intrs;
2454 }
2455
2456 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2457}
2458
2459
2460
2461
2462
2463
2464
2465
2466static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2467 u8 *upiu_flags, enum dma_data_direction cmd_dir)
2468{
2469 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2470 u32 data_direction;
2471 u32 dword_0;
2472 u32 dword_1 = 0;
2473 u32 dword_3 = 0;
2474
2475 if (cmd_dir == DMA_FROM_DEVICE) {
2476 data_direction = UTP_DEVICE_TO_HOST;
2477 *upiu_flags = UPIU_CMD_FLAGS_READ;
2478 } else if (cmd_dir == DMA_TO_DEVICE) {
2479 data_direction = UTP_HOST_TO_DEVICE;
2480 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2481 } else {
2482 data_direction = UTP_NO_DATA_TRANSFER;
2483 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2484 }
2485
2486 dword_0 = data_direction | (lrbp->command_type
2487 << UPIU_COMMAND_TYPE_OFFSET);
2488 if (lrbp->intr_cmd)
2489 dword_0 |= UTP_REQ_DESC_INT_CMD;
2490
2491
2492 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2493
2494
2495 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2496 req_desc->header.dword_1 = cpu_to_le32(dword_1);
2497
2498
2499
2500
2501
2502 req_desc->header.dword_2 =
2503 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2504 req_desc->header.dword_3 = cpu_to_le32(dword_3);
2505
2506 req_desc->prd_table_length = 0;
2507}
2508
2509
2510
2511
2512
2513
2514
2515static
2516void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2517{
2518 struct scsi_cmnd *cmd = lrbp->cmd;
2519 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2520 unsigned short cdb_len;
2521
2522
2523 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2524 UPIU_TRANSACTION_COMMAND, upiu_flags,
2525 lrbp->lun, lrbp->task_tag);
2526 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2527 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2528
2529
2530 ucd_req_ptr->header.dword_2 = 0;
2531
2532 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2533
2534 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2535 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2536 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2537
2538 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2549 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2550{
2551 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2552 struct ufs_query *query = &hba->dev_cmd.query;
2553 u16 len = be16_to_cpu(query->request.upiu_req.length);
2554
2555
2556 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2557 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2558 lrbp->lun, lrbp->task_tag);
2559 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2560 0, query->request.query_func, 0, 0);
2561
2562
2563 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2564 ucd_req_ptr->header.dword_2 =
2565 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2566 else
2567 ucd_req_ptr->header.dword_2 = 0;
2568
2569
2570 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2571 QUERY_OSF_SIZE);
2572
2573
2574 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2575 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2576
2577 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2578}
2579
2580static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2581{
2582 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2583
2584 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2585
2586
2587 ucd_req_ptr->header.dword_0 =
2588 UPIU_HEADER_DWORD(
2589 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2590
2591 ucd_req_ptr->header.dword_1 = 0;
2592 ucd_req_ptr->header.dword_2 = 0;
2593
2594 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2595}
2596
2597
2598
2599
2600
2601
2602
2603static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2604 struct ufshcd_lrb *lrbp)
2605{
2606 u8 upiu_flags;
2607 int ret = 0;
2608
2609 if (hba->ufs_version <= ufshci_version(1, 1))
2610 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2611 else
2612 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2613
2614 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2615 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2616 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2617 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2618 ufshcd_prepare_utp_nop_upiu(lrbp);
2619 else
2620 ret = -EINVAL;
2621
2622 return ret;
2623}
2624
2625
2626
2627
2628
2629
2630
2631static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2632{
2633 u8 upiu_flags;
2634 int ret = 0;
2635
2636 if (hba->ufs_version <= ufshci_version(1, 1))
2637 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2638 else
2639 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2640
2641 if (likely(lrbp->cmd)) {
2642 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2643 lrbp->cmd->sc_data_direction);
2644 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2645 } else {
2646 ret = -EINVAL;
2647 }
2648
2649 return ret;
2650}
2651
2652
2653
2654
2655
2656
2657
2658static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2659{
2660 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2661}
2662
2663static inline bool is_rpmb_wlun(struct scsi_device *sdev)
2664{
2665 return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
2666}
2667
2668static inline bool is_device_wlun(struct scsi_device *sdev)
2669{
2670 return sdev->lun ==
2671 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2672}
2673
2674static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2675{
2676 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2677 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2678 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2679 i * sizeof(struct utp_transfer_cmd_desc);
2680 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2681 response_upiu);
2682 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2683
2684 lrb->utr_descriptor_ptr = utrdlp + i;
2685 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2686 i * sizeof(struct utp_transfer_req_desc);
2687 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2688 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2689 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2690 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2691 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2692 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2693}
2694
2695
2696
2697
2698
2699
2700
2701
2702static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2703{
2704 struct ufshcd_lrb *lrbp;
2705 struct ufs_hba *hba;
2706 int tag;
2707 int err = 0;
2708
2709 hba = shost_priv(host);
2710
2711 tag = cmd->request->tag;
2712 if (!ufshcd_valid_tag(hba, tag)) {
2713 dev_err(hba->dev,
2714 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2715 __func__, tag, cmd, cmd->request);
2716 BUG();
2717 }
2718
2719 if (!down_read_trylock(&hba->clk_scaling_lock))
2720 return SCSI_MLQUEUE_HOST_BUSY;
2721
2722 switch (hba->ufshcd_state) {
2723 case UFSHCD_STATE_OPERATIONAL:
2724 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2725 break;
2726 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737 if (hba->pm_op_in_progress) {
2738 hba->force_reset = true;
2739 set_host_byte(cmd, DID_BAD_TARGET);
2740 cmd->scsi_done(cmd);
2741 goto out;
2742 }
2743 fallthrough;
2744 case UFSHCD_STATE_RESET:
2745 err = SCSI_MLQUEUE_HOST_BUSY;
2746 goto out;
2747 case UFSHCD_STATE_ERROR:
2748 set_host_byte(cmd, DID_ERROR);
2749 cmd->scsi_done(cmd);
2750 goto out;
2751 default:
2752 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2753 __func__, hba->ufshcd_state);
2754 set_host_byte(cmd, DID_BAD_TARGET);
2755 cmd->scsi_done(cmd);
2756 goto out;
2757 }
2758
2759 hba->req_abort_count = 0;
2760
2761 err = ufshcd_hold(hba, true);
2762 if (err) {
2763 err = SCSI_MLQUEUE_HOST_BUSY;
2764 goto out;
2765 }
2766 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2767 (hba->clk_gating.state != CLKS_ON));
2768
2769 if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
2770 if (hba->pm_op_in_progress)
2771 set_host_byte(cmd, DID_BAD_TARGET);
2772 else
2773 err = SCSI_MLQUEUE_HOST_BUSY;
2774 ufshcd_release(hba);
2775 goto out;
2776 }
2777
2778 lrbp = &hba->lrb[tag];
2779 WARN_ON(lrbp->cmd);
2780 lrbp->cmd = cmd;
2781 lrbp->sense_bufflen = UFS_SENSE_SIZE;
2782 lrbp->sense_buffer = cmd->sense_buffer;
2783 lrbp->task_tag = tag;
2784 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2785 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2786
2787 ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2788
2789 lrbp->req_abort_skip = false;
2790
2791 ufshcd_comp_scsi_upiu(hba, lrbp);
2792
2793 err = ufshcd_map_sg(hba, lrbp);
2794 if (err) {
2795 lrbp->cmd = NULL;
2796 ufshcd_release(hba);
2797 goto out;
2798 }
2799
2800 wmb();
2801
2802 ufshcd_send_command(hba, tag);
2803out:
2804 up_read(&hba->clk_scaling_lock);
2805 return err;
2806}
2807
2808static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2809 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2810{
2811 lrbp->cmd = NULL;
2812 lrbp->sense_bufflen = 0;
2813 lrbp->sense_buffer = NULL;
2814 lrbp->task_tag = tag;
2815 lrbp->lun = 0;
2816 lrbp->intr_cmd = true;
2817 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2818 hba->dev_cmd.type = cmd_type;
2819
2820 return ufshcd_compose_devman_upiu(hba, lrbp);
2821}
2822
2823static int
2824ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2825{
2826 int err = 0;
2827 unsigned long flags;
2828 u32 mask = 1 << tag;
2829
2830
2831 spin_lock_irqsave(hba->host->host_lock, flags);
2832 ufshcd_utrl_clear(hba, tag);
2833 spin_unlock_irqrestore(hba->host->host_lock, flags);
2834
2835
2836
2837
2838
2839 err = ufshcd_wait_for_register(hba,
2840 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2841 mask, ~mask, 1000, 1000);
2842
2843 return err;
2844}
2845
2846static int
2847ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2848{
2849 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2850
2851
2852 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2853 UPIU_RSP_CODE_OFFSET;
2854 return query_res->response;
2855}
2856
2857
2858
2859
2860
2861
2862static int
2863ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2864{
2865 int resp;
2866 int err = 0;
2867
2868 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2869 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2870
2871 switch (resp) {
2872 case UPIU_TRANSACTION_NOP_IN:
2873 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2874 err = -EINVAL;
2875 dev_err(hba->dev, "%s: unexpected response %x\n",
2876 __func__, resp);
2877 }
2878 break;
2879 case UPIU_TRANSACTION_QUERY_RSP:
2880 err = ufshcd_check_query_response(hba, lrbp);
2881 if (!err)
2882 err = ufshcd_copy_query_response(hba, lrbp);
2883 break;
2884 case UPIU_TRANSACTION_REJECT_UPIU:
2885
2886 err = -EPERM;
2887 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2888 __func__);
2889 break;
2890 default:
2891 err = -EINVAL;
2892 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2893 __func__, resp);
2894 break;
2895 }
2896
2897 return err;
2898}
2899
2900static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2901 struct ufshcd_lrb *lrbp, int max_timeout)
2902{
2903 int err = 0;
2904 unsigned long time_left;
2905 unsigned long flags;
2906
2907 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2908 msecs_to_jiffies(max_timeout));
2909
2910
2911 wmb();
2912 spin_lock_irqsave(hba->host->host_lock, flags);
2913 hba->dev_cmd.complete = NULL;
2914 if (likely(time_left)) {
2915 err = ufshcd_get_tr_ocs(lrbp);
2916 if (!err)
2917 err = ufshcd_dev_cmd_completion(hba, lrbp);
2918 }
2919 spin_unlock_irqrestore(hba->host->host_lock, flags);
2920
2921 if (!time_left) {
2922 err = -ETIMEDOUT;
2923 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2924 __func__, lrbp->task_tag);
2925 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2926
2927 err = -EAGAIN;
2928
2929
2930
2931
2932
2933 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2934 }
2935
2936 return err;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2949 enum dev_cmd_type cmd_type, int timeout)
2950{
2951 struct request_queue *q = hba->cmd_queue;
2952 struct request *req;
2953 struct ufshcd_lrb *lrbp;
2954 int err;
2955 int tag;
2956 struct completion wait;
2957
2958 down_read(&hba->clk_scaling_lock);
2959
2960
2961
2962
2963
2964
2965 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2966 if (IS_ERR(req)) {
2967 err = PTR_ERR(req);
2968 goto out_unlock;
2969 }
2970 tag = req->tag;
2971 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2972
2973 req->timeout = msecs_to_jiffies(2 * timeout);
2974 blk_mq_start_request(req);
2975
2976 if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
2977 err = -EBUSY;
2978 goto out;
2979 }
2980
2981 init_completion(&wait);
2982 lrbp = &hba->lrb[tag];
2983 WARN_ON(lrbp->cmd);
2984 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2985 if (unlikely(err))
2986 goto out;
2987
2988 hba->dev_cmd.complete = &wait;
2989
2990 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
2991
2992 wmb();
2993
2994 ufshcd_send_command(hba, tag);
2995 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2996 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2997 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
2998
2999out:
3000 blk_put_request(req);
3001out_unlock:
3002 up_read(&hba->clk_scaling_lock);
3003 return err;
3004}
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016static inline void ufshcd_init_query(struct ufs_hba *hba,
3017 struct ufs_query_req **request, struct ufs_query_res **response,
3018 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3019{
3020 *request = &hba->dev_cmd.query.request;
3021 *response = &hba->dev_cmd.query.response;
3022 memset(*request, 0, sizeof(struct ufs_query_req));
3023 memset(*response, 0, sizeof(struct ufs_query_res));
3024 (*request)->upiu_req.opcode = opcode;
3025 (*request)->upiu_req.idn = idn;
3026 (*request)->upiu_req.index = index;
3027 (*request)->upiu_req.selector = selector;
3028}
3029
3030static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3031 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3032{
3033 int ret;
3034 int retries;
3035
3036 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3037 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3038 if (ret)
3039 dev_dbg(hba->dev,
3040 "%s: failed with error %d, retries %d\n",
3041 __func__, ret, retries);
3042 else
3043 break;
3044 }
3045
3046 if (ret)
3047 dev_err(hba->dev,
3048 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3049 __func__, opcode, idn, ret, retries);
3050 return ret;
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3064 enum flag_idn idn, u8 index, bool *flag_res)
3065{
3066 struct ufs_query_req *request = NULL;
3067 struct ufs_query_res *response = NULL;
3068 int err, selector = 0;
3069 int timeout = QUERY_REQ_TIMEOUT;
3070
3071 BUG_ON(!hba);
3072
3073 ufshcd_hold(hba, false);
3074 mutex_lock(&hba->dev_cmd.lock);
3075 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3076 selector);
3077
3078 switch (opcode) {
3079 case UPIU_QUERY_OPCODE_SET_FLAG:
3080 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3081 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3082 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3083 break;
3084 case UPIU_QUERY_OPCODE_READ_FLAG:
3085 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3086 if (!flag_res) {
3087
3088 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3089 __func__);
3090 err = -EINVAL;
3091 goto out_unlock;
3092 }
3093 break;
3094 default:
3095 dev_err(hba->dev,
3096 "%s: Expected query flag opcode but got = %d\n",
3097 __func__, opcode);
3098 err = -EINVAL;
3099 goto out_unlock;
3100 }
3101
3102 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3103
3104 if (err) {
3105 dev_err(hba->dev,
3106 "%s: Sending flag query for idn %d failed, err = %d\n",
3107 __func__, idn, err);
3108 goto out_unlock;
3109 }
3110
3111 if (flag_res)
3112 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3113 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3114
3115out_unlock:
3116 mutex_unlock(&hba->dev_cmd.lock);
3117 ufshcd_release(hba);
3118 return err;
3119}
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3133 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3134{
3135 struct ufs_query_req *request = NULL;
3136 struct ufs_query_res *response = NULL;
3137 int err;
3138
3139 BUG_ON(!hba);
3140
3141 if (!attr_val) {
3142 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3143 __func__, opcode);
3144 return -EINVAL;
3145 }
3146
3147 ufshcd_hold(hba, false);
3148
3149 mutex_lock(&hba->dev_cmd.lock);
3150 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3151 selector);
3152
3153 switch (opcode) {
3154 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3155 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3156 request->upiu_req.value = cpu_to_be32(*attr_val);
3157 break;
3158 case UPIU_QUERY_OPCODE_READ_ATTR:
3159 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3160 break;
3161 default:
3162 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3163 __func__, opcode);
3164 err = -EINVAL;
3165 goto out_unlock;
3166 }
3167
3168 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3169
3170 if (err) {
3171 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3172 __func__, opcode, idn, index, err);
3173 goto out_unlock;
3174 }
3175
3176 *attr_val = be32_to_cpu(response->upiu_res.value);
3177
3178out_unlock:
3179 mutex_unlock(&hba->dev_cmd.lock);
3180 ufshcd_release(hba);
3181 return err;
3182}
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3198 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3199 u32 *attr_val)
3200{
3201 int ret = 0;
3202 u32 retries;
3203
3204 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3205 ret = ufshcd_query_attr(hba, opcode, idn, index,
3206 selector, attr_val);
3207 if (ret)
3208 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3209 __func__, ret, retries);
3210 else
3211 break;
3212 }
3213
3214 if (ret)
3215 dev_err(hba->dev,
3216 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3217 __func__, idn, ret, QUERY_REQ_RETRIES);
3218 return ret;
3219}
3220
3221static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3222 enum query_opcode opcode, enum desc_idn idn, u8 index,
3223 u8 selector, u8 *desc_buf, int *buf_len)
3224{
3225 struct ufs_query_req *request = NULL;
3226 struct ufs_query_res *response = NULL;
3227 int err;
3228
3229 BUG_ON(!hba);
3230
3231 if (!desc_buf) {
3232 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3233 __func__, opcode);
3234 return -EINVAL;
3235 }
3236
3237 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3238 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3239 __func__, *buf_len);
3240 return -EINVAL;
3241 }
3242
3243 ufshcd_hold(hba, false);
3244
3245 mutex_lock(&hba->dev_cmd.lock);
3246 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3247 selector);
3248 hba->dev_cmd.query.descriptor = desc_buf;
3249 request->upiu_req.length = cpu_to_be16(*buf_len);
3250
3251 switch (opcode) {
3252 case UPIU_QUERY_OPCODE_WRITE_DESC:
3253 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3254 break;
3255 case UPIU_QUERY_OPCODE_READ_DESC:
3256 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3257 break;
3258 default:
3259 dev_err(hba->dev,
3260 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3261 __func__, opcode);
3262 err = -EINVAL;
3263 goto out_unlock;
3264 }
3265
3266 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3267
3268 if (err) {
3269 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3270 __func__, opcode, idn, index, err);
3271 goto out_unlock;
3272 }
3273
3274 *buf_len = be16_to_cpu(response->upiu_res.length);
3275
3276out_unlock:
3277 hba->dev_cmd.query.descriptor = NULL;
3278 mutex_unlock(&hba->dev_cmd.lock);
3279 ufshcd_release(hba);
3280 return err;
3281}
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3298 enum query_opcode opcode,
3299 enum desc_idn idn, u8 index,
3300 u8 selector,
3301 u8 *desc_buf, int *buf_len)
3302{
3303 int err;
3304 int retries;
3305
3306 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3307 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3308 selector, desc_buf, buf_len);
3309 if (!err || err == -EINVAL)
3310 break;
3311 }
3312
3313 return err;
3314}
3315
3316
3317
3318
3319
3320
3321
3322void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3323 int *desc_len)
3324{
3325 if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3326 desc_id == QUERY_DESC_IDN_RFU_1)
3327 *desc_len = 0;
3328 else
3329 *desc_len = hba->desc_size[desc_id];
3330}
3331EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3332
3333static void ufshcd_update_desc_length(struct ufs_hba *hba,
3334 enum desc_idn desc_id, int desc_index,
3335 unsigned char desc_len)
3336{
3337 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3338 desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3339
3340
3341
3342
3343
3344 hba->desc_size[desc_id] = desc_len;
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358int ufshcd_read_desc_param(struct ufs_hba *hba,
3359 enum desc_idn desc_id,
3360 int desc_index,
3361 u8 param_offset,
3362 u8 *param_read_buf,
3363 u8 param_size)
3364{
3365 int ret;
3366 u8 *desc_buf;
3367 int buff_len;
3368 bool is_kmalloc = true;
3369
3370
3371 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3372 return -EINVAL;
3373
3374
3375 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3376 if (!buff_len) {
3377 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3378 return -EINVAL;
3379 }
3380
3381 if (param_offset >= buff_len) {
3382 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3383 __func__, param_offset, desc_id, buff_len);
3384 return -EINVAL;
3385 }
3386
3387
3388 if (param_offset != 0 || param_size < buff_len) {
3389 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3390 if (!desc_buf)
3391 return -ENOMEM;
3392 } else {
3393 desc_buf = param_read_buf;
3394 is_kmalloc = false;
3395 }
3396
3397
3398 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3399 desc_id, desc_index, 0,
3400 desc_buf, &buff_len);
3401
3402 if (ret) {
3403 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3404 __func__, desc_id, desc_index, param_offset, ret);
3405 goto out;
3406 }
3407
3408
3409 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3410 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3411 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3412 ret = -EINVAL;
3413 goto out;
3414 }
3415
3416
3417 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3418 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3419
3420 if (is_kmalloc) {
3421
3422 if (param_offset + param_size > buff_len)
3423 param_size = buff_len - param_offset;
3424 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3425 }
3426out:
3427 if (is_kmalloc)
3428 kfree(desc_buf);
3429 return ret;
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439struct uc_string_id {
3440 u8 len;
3441 u8 type;
3442 wchar_t uc[];
3443} __packed;
3444
3445
3446static inline char ufshcd_remove_non_printable(u8 ch)
3447{
3448 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3449}
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3466 u8 **buf, bool ascii)
3467{
3468 struct uc_string_id *uc_str;
3469 u8 *str;
3470 int ret;
3471
3472 if (!buf)
3473 return -EINVAL;
3474
3475 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3476 if (!uc_str)
3477 return -ENOMEM;
3478
3479 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3480 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3481 if (ret < 0) {
3482 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3483 QUERY_REQ_RETRIES, ret);
3484 str = NULL;
3485 goto out;
3486 }
3487
3488 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3489 dev_dbg(hba->dev, "String Desc is of zero length\n");
3490 str = NULL;
3491 ret = 0;
3492 goto out;
3493 }
3494
3495 if (ascii) {
3496 ssize_t ascii_len;
3497 int i;
3498
3499 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3500 str = kzalloc(ascii_len, GFP_KERNEL);
3501 if (!str) {
3502 ret = -ENOMEM;
3503 goto out;
3504 }
3505
3506
3507
3508
3509
3510 ret = utf16s_to_utf8s(uc_str->uc,
3511 uc_str->len - QUERY_DESC_HDR_SIZE,
3512 UTF16_BIG_ENDIAN, str, ascii_len);
3513
3514
3515 for (i = 0; i < ret; i++)
3516 str[i] = ufshcd_remove_non_printable(str[i]);
3517
3518 str[ret++] = '\0';
3519
3520 } else {
3521 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3522 if (!str) {
3523 ret = -ENOMEM;
3524 goto out;
3525 }
3526 ret = uc_str->len;
3527 }
3528out:
3529 *buf = str;
3530 kfree(uc_str);
3531 return ret;
3532}
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3545 int lun,
3546 enum unit_desc_param param_offset,
3547 u8 *param_read_buf,
3548 u32 param_size)
3549{
3550
3551
3552
3553
3554 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3555 return -EOPNOTSUPP;
3556
3557 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3558 param_offset, param_read_buf, param_size);
3559}
3560
3561static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3562{
3563 int err = 0;
3564 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3565
3566 if (hba->dev_info.wspecversion >= 0x300) {
3567 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3568 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3569 &gating_wait);
3570 if (err)
3571 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3572 err, gating_wait);
3573
3574 if (gating_wait == 0) {
3575 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3576 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3577 gating_wait);
3578 }
3579
3580 hba->dev_info.clk_gating_wait_us = gating_wait;
3581 }
3582
3583 return err;
3584}
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599static int ufshcd_memory_alloc(struct ufs_hba *hba)
3600{
3601 size_t utmrdl_size, utrdl_size, ucdl_size;
3602
3603
3604 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3605 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3606 ucdl_size,
3607 &hba->ucdl_dma_addr,
3608 GFP_KERNEL);
3609
3610
3611
3612
3613
3614
3615
3616 if (!hba->ucdl_base_addr ||
3617 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3618 dev_err(hba->dev,
3619 "Command Descriptor Memory allocation failed\n");
3620 goto out;
3621 }
3622
3623
3624
3625
3626
3627 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3628 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3629 utrdl_size,
3630 &hba->utrdl_dma_addr,
3631 GFP_KERNEL);
3632 if (!hba->utrdl_base_addr ||
3633 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3634 dev_err(hba->dev,
3635 "Transfer Descriptor Memory allocation failed\n");
3636 goto out;
3637 }
3638
3639
3640
3641
3642
3643 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3644 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3645 utmrdl_size,
3646 &hba->utmrdl_dma_addr,
3647 GFP_KERNEL);
3648 if (!hba->utmrdl_base_addr ||
3649 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3650 dev_err(hba->dev,
3651 "Task Management Descriptor Memory allocation failed\n");
3652 goto out;
3653 }
3654
3655
3656 hba->lrb = devm_kcalloc(hba->dev,
3657 hba->nutrs, sizeof(struct ufshcd_lrb),
3658 GFP_KERNEL);
3659 if (!hba->lrb) {
3660 dev_err(hba->dev, "LRB Memory allocation failed\n");
3661 goto out;
3662 }
3663 return 0;
3664out:
3665 return -ENOMEM;
3666}
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3682{
3683 struct utp_transfer_req_desc *utrdlp;
3684 dma_addr_t cmd_desc_dma_addr;
3685 dma_addr_t cmd_desc_element_addr;
3686 u16 response_offset;
3687 u16 prdt_offset;
3688 int cmd_desc_size;
3689 int i;
3690
3691 utrdlp = hba->utrdl_base_addr;
3692
3693 response_offset =
3694 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3695 prdt_offset =
3696 offsetof(struct utp_transfer_cmd_desc, prd_table);
3697
3698 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3699 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3700
3701 for (i = 0; i < hba->nutrs; i++) {
3702
3703 cmd_desc_element_addr =
3704 (cmd_desc_dma_addr + (cmd_desc_size * i));
3705 utrdlp[i].command_desc_base_addr_lo =
3706 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3707 utrdlp[i].command_desc_base_addr_hi =
3708 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3709
3710
3711 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3712 utrdlp[i].response_upiu_offset =
3713 cpu_to_le16(response_offset);
3714 utrdlp[i].prd_table_offset =
3715 cpu_to_le16(prdt_offset);
3716 utrdlp[i].response_upiu_length =
3717 cpu_to_le16(ALIGNED_UPIU_SIZE);
3718 } else {
3719 utrdlp[i].response_upiu_offset =
3720 cpu_to_le16(response_offset >> 2);
3721 utrdlp[i].prd_table_offset =
3722 cpu_to_le16(prdt_offset >> 2);
3723 utrdlp[i].response_upiu_length =
3724 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3725 }
3726
3727 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3728 }
3729}
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3743{
3744 struct uic_command uic_cmd = {0};
3745 int ret;
3746
3747 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3748
3749 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3750 if (ret)
3751 dev_dbg(hba->dev,
3752 "dme-link-startup: error code %d\n", ret);
3753 return ret;
3754}
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764static int ufshcd_dme_reset(struct ufs_hba *hba)
3765{
3766 struct uic_command uic_cmd = {0};
3767 int ret;
3768
3769 uic_cmd.command = UIC_CMD_DME_RESET;
3770
3771 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3772 if (ret)
3773 dev_err(hba->dev,
3774 "dme-reset: error code %d\n", ret);
3775
3776 return ret;
3777}
3778
3779int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3780 int agreed_gear,
3781 int adapt_val)
3782{
3783 int ret;
3784
3785 if (agreed_gear != UFS_HS_G4)
3786 adapt_val = PA_NO_ADAPT;
3787
3788 ret = ufshcd_dme_set(hba,
3789 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3790 adapt_val);
3791 return ret;
3792}
3793EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803static int ufshcd_dme_enable(struct ufs_hba *hba)
3804{
3805 struct uic_command uic_cmd = {0};
3806 int ret;
3807
3808 uic_cmd.command = UIC_CMD_DME_ENABLE;
3809
3810 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3811 if (ret)
3812 dev_err(hba->dev,
3813 "dme-enable: error code %d\n", ret);
3814
3815 return ret;
3816}
3817
3818static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3819{
3820 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3821 unsigned long min_sleep_time_us;
3822
3823 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3824 return;
3825
3826
3827
3828
3829
3830 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3831 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3832 } else {
3833 unsigned long delta =
3834 (unsigned long) ktime_to_us(
3835 ktime_sub(ktime_get(),
3836 hba->last_dme_cmd_tstamp));
3837
3838 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3839 min_sleep_time_us =
3840 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3841 else
3842 return;
3843 }
3844
3845
3846 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3847}
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3860 u8 attr_set, u32 mib_val, u8 peer)
3861{
3862 struct uic_command uic_cmd = {0};
3863 static const char *const action[] = {
3864 "dme-set",
3865 "dme-peer-set"
3866 };
3867 const char *set = action[!!peer];
3868 int ret;
3869 int retries = UFS_UIC_COMMAND_RETRIES;
3870
3871 uic_cmd.command = peer ?
3872 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3873 uic_cmd.argument1 = attr_sel;
3874 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3875 uic_cmd.argument3 = mib_val;
3876
3877 do {
3878
3879 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3880 if (ret)
3881 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3882 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3883 } while (ret && peer && --retries);
3884
3885 if (ret)
3886 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3887 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3888 UFS_UIC_COMMAND_RETRIES - retries);
3889
3890 return ret;
3891}
3892EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3904 u32 *mib_val, u8 peer)
3905{
3906 struct uic_command uic_cmd = {0};
3907 static const char *const action[] = {
3908 "dme-get",
3909 "dme-peer-get"
3910 };
3911 const char *get = action[!!peer];
3912 int ret;
3913 int retries = UFS_UIC_COMMAND_RETRIES;
3914 struct ufs_pa_layer_attr orig_pwr_info;
3915 struct ufs_pa_layer_attr temp_pwr_info;
3916 bool pwr_mode_change = false;
3917
3918 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3919 orig_pwr_info = hba->pwr_info;
3920 temp_pwr_info = orig_pwr_info;
3921
3922 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3923 orig_pwr_info.pwr_rx == FAST_MODE) {
3924 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3925 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3926 pwr_mode_change = true;
3927 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3928 orig_pwr_info.pwr_rx == SLOW_MODE) {
3929 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3930 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3931 pwr_mode_change = true;
3932 }
3933 if (pwr_mode_change) {
3934 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3935 if (ret)
3936 goto out;
3937 }
3938 }
3939
3940 uic_cmd.command = peer ?
3941 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3942 uic_cmd.argument1 = attr_sel;
3943
3944 do {
3945
3946 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3947 if (ret)
3948 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3949 get, UIC_GET_ATTR_ID(attr_sel), ret);
3950 } while (ret && peer && --retries);
3951
3952 if (ret)
3953 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3954 get, UIC_GET_ATTR_ID(attr_sel),
3955 UFS_UIC_COMMAND_RETRIES - retries);
3956
3957 if (mib_val && !ret)
3958 *mib_val = uic_cmd.argument3;
3959
3960 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3961 && pwr_mode_change)
3962 ufshcd_change_power_mode(hba, &orig_pwr_info);
3963out:
3964 return ret;
3965}
3966EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3985{
3986 struct completion uic_async_done;
3987 unsigned long flags;
3988 u8 status;
3989 int ret;
3990 bool reenable_intr = false;
3991
3992 mutex_lock(&hba->uic_cmd_mutex);
3993 init_completion(&uic_async_done);
3994 ufshcd_add_delay_before_dme_cmd(hba);
3995
3996 spin_lock_irqsave(hba->host->host_lock, flags);
3997 if (ufshcd_is_link_broken(hba)) {
3998 ret = -ENOLINK;
3999 goto out_unlock;
4000 }
4001 hba->uic_async_done = &uic_async_done;
4002 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4003 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4004
4005
4006
4007
4008 wmb();
4009 reenable_intr = true;
4010 }
4011 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4012 spin_unlock_irqrestore(hba->host->host_lock, flags);
4013 if (ret) {
4014 dev_err(hba->dev,
4015 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4016 cmd->command, cmd->argument3, ret);
4017 goto out;
4018 }
4019
4020 if (!wait_for_completion_timeout(hba->uic_async_done,
4021 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4022 dev_err(hba->dev,
4023 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4024 cmd->command, cmd->argument3);
4025
4026 if (!cmd->cmd_active) {
4027 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4028 __func__);
4029 goto check_upmcrs;
4030 }
4031
4032 ret = -ETIMEDOUT;
4033 goto out;
4034 }
4035
4036check_upmcrs:
4037 status = ufshcd_get_upmcrs(hba);
4038 if (status != PWR_LOCAL) {
4039 dev_err(hba->dev,
4040 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4041 cmd->command, status);
4042 ret = (status != PWR_OK) ? status : -1;
4043 }
4044out:
4045 if (ret) {
4046 ufshcd_print_host_state(hba);
4047 ufshcd_print_pwr_info(hba);
4048 ufshcd_print_evt_hist(hba);
4049 }
4050
4051 spin_lock_irqsave(hba->host->host_lock, flags);
4052 hba->active_uic_cmd = NULL;
4053 hba->uic_async_done = NULL;
4054 if (reenable_intr)
4055 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4056 if (ret) {
4057 ufshcd_set_link_broken(hba);
4058 ufshcd_schedule_eh_work(hba);
4059 }
4060out_unlock:
4061 spin_unlock_irqrestore(hba->host->host_lock, flags);
4062 mutex_unlock(&hba->uic_cmd_mutex);
4063
4064 return ret;
4065}
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4076{
4077 struct uic_command uic_cmd = {0};
4078 int ret;
4079
4080 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4081 ret = ufshcd_dme_set(hba,
4082 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4083 if (ret) {
4084 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4085 __func__, ret);
4086 goto out;
4087 }
4088 }
4089
4090 uic_cmd.command = UIC_CMD_DME_SET;
4091 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4092 uic_cmd.argument3 = mode;
4093 ufshcd_hold(hba, false);
4094 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4095 ufshcd_release(hba);
4096
4097out:
4098 return ret;
4099}
4100
4101int ufshcd_link_recovery(struct ufs_hba *hba)
4102{
4103 int ret;
4104 unsigned long flags;
4105
4106 spin_lock_irqsave(hba->host->host_lock, flags);
4107 hba->ufshcd_state = UFSHCD_STATE_RESET;
4108 ufshcd_set_eh_in_progress(hba);
4109 spin_unlock_irqrestore(hba->host->host_lock, flags);
4110
4111
4112 ufshcd_device_reset(hba);
4113
4114 ret = ufshcd_host_reset_and_restore(hba);
4115
4116 spin_lock_irqsave(hba->host->host_lock, flags);
4117 if (ret)
4118 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4119 ufshcd_clear_eh_in_progress(hba);
4120 spin_unlock_irqrestore(hba->host->host_lock, flags);
4121
4122 if (ret)
4123 dev_err(hba->dev, "%s: link recovery failed, err %d",
4124 __func__, ret);
4125 else
4126 ufshcd_clear_ua_wluns(hba);
4127
4128 return ret;
4129}
4130EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4131
4132static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4133{
4134 int ret;
4135 struct uic_command uic_cmd = {0};
4136 ktime_t start = ktime_get();
4137
4138 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4139
4140 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4141 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4142 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4143 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4144
4145 if (ret)
4146 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4147 __func__, ret);
4148 else
4149 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4150 POST_CHANGE);
4151
4152 return ret;
4153}
4154
4155int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4156{
4157 struct uic_command uic_cmd = {0};
4158 int ret;
4159 ktime_t start = ktime_get();
4160
4161 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4162
4163 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4164 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4165 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4166 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4167
4168 if (ret) {
4169 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4170 __func__, ret);
4171 } else {
4172 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4173 POST_CHANGE);
4174 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4175 hba->ufs_stats.hibern8_exit_cnt++;
4176 }
4177
4178 return ret;
4179}
4180EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4181
4182void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4183{
4184 unsigned long flags;
4185 bool update = false;
4186
4187 if (!ufshcd_is_auto_hibern8_supported(hba))
4188 return;
4189
4190 spin_lock_irqsave(hba->host->host_lock, flags);
4191 if (hba->ahit != ahit) {
4192 hba->ahit = ahit;
4193 update = true;
4194 }
4195 spin_unlock_irqrestore(hba->host->host_lock, flags);
4196
4197 if (update &&
4198 !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
4199 ufshcd_rpm_get_sync(hba);
4200 ufshcd_hold(hba, false);
4201 ufshcd_auto_hibern8_enable(hba);
4202 ufshcd_release(hba);
4203 ufshcd_rpm_put_sync(hba);
4204 }
4205}
4206EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4207
4208void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4209{
4210 unsigned long flags;
4211
4212 if (!ufshcd_is_auto_hibern8_supported(hba))
4213 return;
4214
4215 spin_lock_irqsave(hba->host->host_lock, flags);
4216 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4217 spin_unlock_irqrestore(hba->host->host_lock, flags);
4218}
4219
4220
4221
4222
4223
4224
4225static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4226{
4227 hba->pwr_info.gear_rx = UFS_PWM_G1;
4228 hba->pwr_info.gear_tx = UFS_PWM_G1;
4229 hba->pwr_info.lane_rx = 1;
4230 hba->pwr_info.lane_tx = 1;
4231 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4232 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4233 hba->pwr_info.hs_rate = 0;
4234}
4235
4236
4237
4238
4239
4240static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4241{
4242 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4243
4244 if (hba->max_pwr_info.is_valid)
4245 return 0;
4246
4247 pwr_info->pwr_tx = FAST_MODE;
4248 pwr_info->pwr_rx = FAST_MODE;
4249 pwr_info->hs_rate = PA_HS_MODE_B;
4250
4251
4252 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4253 &pwr_info->lane_rx);
4254 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4255 &pwr_info->lane_tx);
4256
4257 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4258 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4259 __func__,
4260 pwr_info->lane_rx,
4261 pwr_info->lane_tx);
4262 return -EINVAL;
4263 }
4264
4265
4266
4267
4268
4269
4270 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4271 if (!pwr_info->gear_rx) {
4272 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4273 &pwr_info->gear_rx);
4274 if (!pwr_info->gear_rx) {
4275 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4276 __func__, pwr_info->gear_rx);
4277 return -EINVAL;
4278 }
4279 pwr_info->pwr_rx = SLOW_MODE;
4280 }
4281
4282 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4283 &pwr_info->gear_tx);
4284 if (!pwr_info->gear_tx) {
4285 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4286 &pwr_info->gear_tx);
4287 if (!pwr_info->gear_tx) {
4288 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4289 __func__, pwr_info->gear_tx);
4290 return -EINVAL;
4291 }
4292 pwr_info->pwr_tx = SLOW_MODE;
4293 }
4294
4295 hba->max_pwr_info.is_valid = true;
4296 return 0;
4297}
4298
4299static int ufshcd_change_power_mode(struct ufs_hba *hba,
4300 struct ufs_pa_layer_attr *pwr_mode)
4301{
4302 int ret;
4303
4304
4305 if (!hba->force_pmc &&
4306 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4307 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4308 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4309 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4310 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4311 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4312 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4313 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4314 return 0;
4315 }
4316
4317
4318
4319
4320
4321
4322
4323 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4324 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4325 pwr_mode->lane_rx);
4326 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4327 pwr_mode->pwr_rx == FAST_MODE)
4328 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4329 else
4330 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4331
4332 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4333 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4334 pwr_mode->lane_tx);
4335 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4336 pwr_mode->pwr_tx == FAST_MODE)
4337 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4338 else
4339 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4340
4341 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4342 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4343 pwr_mode->pwr_rx == FAST_MODE ||
4344 pwr_mode->pwr_tx == FAST_MODE)
4345 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4346 pwr_mode->hs_rate);
4347
4348 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4349 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4350 DL_FC0ProtectionTimeOutVal_Default);
4351 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4352 DL_TC0ReplayTimeOutVal_Default);
4353 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4354 DL_AFC0ReqTimeOutVal_Default);
4355 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4356 DL_FC1ProtectionTimeOutVal_Default);
4357 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4358 DL_TC1ReplayTimeOutVal_Default);
4359 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4360 DL_AFC1ReqTimeOutVal_Default);
4361
4362 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4363 DL_FC0ProtectionTimeOutVal_Default);
4364 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4365 DL_TC0ReplayTimeOutVal_Default);
4366 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4367 DL_AFC0ReqTimeOutVal_Default);
4368 }
4369
4370 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4371 | pwr_mode->pwr_tx);
4372
4373 if (ret) {
4374 dev_err(hba->dev,
4375 "%s: power mode change failed %d\n", __func__, ret);
4376 } else {
4377 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4378 pwr_mode);
4379
4380 memcpy(&hba->pwr_info, pwr_mode,
4381 sizeof(struct ufs_pa_layer_attr));
4382 }
4383
4384 return ret;
4385}
4386
4387
4388
4389
4390
4391
4392int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4393 struct ufs_pa_layer_attr *desired_pwr_mode)
4394{
4395 struct ufs_pa_layer_attr final_params = { 0 };
4396 int ret;
4397
4398 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4399 desired_pwr_mode, &final_params);
4400
4401 if (ret)
4402 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4403
4404 ret = ufshcd_change_power_mode(hba, &final_params);
4405
4406 return ret;
4407}
4408EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4409
4410
4411
4412
4413
4414
4415
4416static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4417{
4418 int err;
4419 bool flag_res = true;
4420 ktime_t timeout;
4421
4422 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4423 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4424 if (err) {
4425 dev_err(hba->dev,
4426 "%s setting fDeviceInit flag failed with error %d\n",
4427 __func__, err);
4428 goto out;
4429 }
4430
4431
4432 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4433 do {
4434 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4435 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4436 if (!flag_res)
4437 break;
4438 usleep_range(5000, 10000);
4439 } while (ktime_before(ktime_get(), timeout));
4440
4441 if (err) {
4442 dev_err(hba->dev,
4443 "%s reading fDeviceInit flag failed with error %d\n",
4444 __func__, err);
4445 } else if (flag_res) {
4446 dev_err(hba->dev,
4447 "%s fDeviceInit was not cleared by the device\n",
4448 __func__);
4449 err = -EBUSY;
4450 }
4451out:
4452 return err;
4453}
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467int ufshcd_make_hba_operational(struct ufs_hba *hba)
4468{
4469 int err = 0;
4470 u32 reg;
4471
4472
4473 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4474
4475
4476 if (ufshcd_is_intr_aggr_allowed(hba))
4477 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4478 else
4479 ufshcd_disable_intr_aggr(hba);
4480
4481
4482 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4483 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4484 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4485 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4486 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4487 REG_UTP_TASK_REQ_LIST_BASE_L);
4488 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4489 REG_UTP_TASK_REQ_LIST_BASE_H);
4490
4491
4492
4493
4494
4495 wmb();
4496
4497
4498
4499
4500 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4501 if (!(ufshcd_get_lists_status(reg))) {
4502 ufshcd_enable_run_stop_reg(hba);
4503 } else {
4504 dev_err(hba->dev,
4505 "Host controller not ready to process requests");
4506 err = -EIO;
4507 }
4508
4509 return err;
4510}
4511EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4512
4513
4514
4515
4516
4517void ufshcd_hba_stop(struct ufs_hba *hba)
4518{
4519 unsigned long flags;
4520 int err;
4521
4522
4523
4524
4525
4526 spin_lock_irqsave(hba->host->host_lock, flags);
4527 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4528 spin_unlock_irqrestore(hba->host->host_lock, flags);
4529
4530 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4531 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4532 10, 1);
4533 if (err)
4534 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4535}
4536EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4549{
4550 int retry_outer = 3;
4551 int retry_inner;
4552
4553start:
4554 if (!ufshcd_is_hba_active(hba))
4555
4556 ufshcd_hba_stop(hba);
4557
4558
4559 ufshcd_set_link_off(hba);
4560
4561 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4562
4563
4564 ufshcd_hba_start(hba);
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4577
4578
4579 retry_inner = 50;
4580 while (ufshcd_is_hba_active(hba)) {
4581 if (retry_inner) {
4582 retry_inner--;
4583 } else {
4584 dev_err(hba->dev,
4585 "Controller enable failed\n");
4586 if (retry_outer) {
4587 retry_outer--;
4588 goto start;
4589 }
4590 return -EIO;
4591 }
4592 usleep_range(1000, 1100);
4593 }
4594
4595
4596 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4597
4598 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4599
4600 return 0;
4601}
4602
4603int ufshcd_hba_enable(struct ufs_hba *hba)
4604{
4605 int ret;
4606
4607 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4608 ufshcd_set_link_off(hba);
4609 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4610
4611
4612 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4613 ret = ufshcd_dme_reset(hba);
4614 if (!ret) {
4615 ret = ufshcd_dme_enable(hba);
4616 if (!ret)
4617 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4618 if (ret)
4619 dev_err(hba->dev,
4620 "Host controller enable failed with non-hce\n");
4621 }
4622 } else {
4623 ret = ufshcd_hba_execute_hce(hba);
4624 }
4625
4626 return ret;
4627}
4628EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4629
4630static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4631{
4632 int tx_lanes = 0, i, err = 0;
4633
4634 if (!peer)
4635 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4636 &tx_lanes);
4637 else
4638 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4639 &tx_lanes);
4640 for (i = 0; i < tx_lanes; i++) {
4641 if (!peer)
4642 err = ufshcd_dme_set(hba,
4643 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4644 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4645 0);
4646 else
4647 err = ufshcd_dme_peer_set(hba,
4648 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4649 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4650 0);
4651 if (err) {
4652 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4653 __func__, peer, i, err);
4654 break;
4655 }
4656 }
4657
4658 return err;
4659}
4660
4661static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4662{
4663 return ufshcd_disable_tx_lcc(hba, true);
4664}
4665
4666void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4667{
4668 struct ufs_event_hist *e;
4669
4670 if (id >= UFS_EVT_CNT)
4671 return;
4672
4673 e = &hba->ufs_stats.event[id];
4674 e->val[e->pos] = val;
4675 e->tstamp[e->pos] = ktime_get();
4676 e->cnt += 1;
4677 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4678
4679 ufshcd_vops_event_notify(hba, id, &val);
4680}
4681EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4682
4683
4684
4685
4686
4687
4688
4689static int ufshcd_link_startup(struct ufs_hba *hba)
4690{
4691 int ret;
4692 int retries = DME_LINKSTARTUP_RETRIES;
4693 bool link_startup_again = false;
4694
4695
4696
4697
4698
4699 if (!ufshcd_is_ufs_dev_active(hba))
4700 link_startup_again = true;
4701
4702link_startup:
4703 do {
4704 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4705
4706 ret = ufshcd_dme_link_startup(hba);
4707
4708
4709 if (!ret && !ufshcd_is_device_present(hba)) {
4710 ufshcd_update_evt_hist(hba,
4711 UFS_EVT_LINK_STARTUP_FAIL,
4712 0);
4713 dev_err(hba->dev, "%s: Device not present\n", __func__);
4714 ret = -ENXIO;
4715 goto out;
4716 }
4717
4718
4719
4720
4721
4722
4723 if (ret && ufshcd_hba_enable(hba)) {
4724 ufshcd_update_evt_hist(hba,
4725 UFS_EVT_LINK_STARTUP_FAIL,
4726 (u32)ret);
4727 goto out;
4728 }
4729 } while (ret && retries--);
4730
4731 if (ret) {
4732
4733 ufshcd_update_evt_hist(hba,
4734 UFS_EVT_LINK_STARTUP_FAIL,
4735 (u32)ret);
4736 goto out;
4737 }
4738
4739 if (link_startup_again) {
4740 link_startup_again = false;
4741 retries = DME_LINKSTARTUP_RETRIES;
4742 goto link_startup;
4743 }
4744
4745
4746 ufshcd_init_pwr_info(hba);
4747 ufshcd_print_pwr_info(hba);
4748
4749 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4750 ret = ufshcd_disable_device_tx_lcc(hba);
4751 if (ret)
4752 goto out;
4753 }
4754
4755
4756 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4757 if (ret)
4758 goto out;
4759
4760
4761 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4762 ret = ufshcd_make_hba_operational(hba);
4763out:
4764 if (ret) {
4765 dev_err(hba->dev, "link startup failed %d\n", ret);
4766 ufshcd_print_host_state(hba);
4767 ufshcd_print_pwr_info(hba);
4768 ufshcd_print_evt_hist(hba);
4769 }
4770 return ret;
4771}
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4784{
4785 int err = 0;
4786 int retries;
4787
4788 ufshcd_hold(hba, false);
4789 mutex_lock(&hba->dev_cmd.lock);
4790 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4791 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4792 NOP_OUT_TIMEOUT);
4793
4794 if (!err || err == -ETIMEDOUT)
4795 break;
4796
4797 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4798 }
4799 mutex_unlock(&hba->dev_cmd.lock);
4800 ufshcd_release(hba);
4801
4802 if (err)
4803 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4804 return err;
4805}
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4817{
4818 int ret = 0;
4819 u8 lun_qdepth;
4820 struct ufs_hba *hba;
4821
4822 hba = shost_priv(sdev->host);
4823
4824 lun_qdepth = hba->nutrs;
4825 ret = ufshcd_read_unit_desc_param(hba,
4826 ufshcd_scsi_to_upiu_lun(sdev->lun),
4827 UNIT_DESC_PARAM_LU_Q_DEPTH,
4828 &lun_qdepth,
4829 sizeof(lun_qdepth));
4830
4831
4832 if (ret == -EOPNOTSUPP)
4833 lun_qdepth = 1;
4834 else if (!lun_qdepth)
4835
4836 lun_qdepth = hba->nutrs;
4837 else
4838 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4839
4840 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4841 __func__, lun_qdepth);
4842 scsi_change_queue_depth(sdev, lun_qdepth);
4843}
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4857 u8 lun,
4858 u8 *b_lu_write_protect)
4859{
4860 int ret;
4861
4862 if (!b_lu_write_protect)
4863 ret = -EINVAL;
4864
4865
4866
4867
4868
4869 else if (lun >= hba->dev_info.max_lu_supported)
4870 ret = -ENOTSUPP;
4871 else
4872 ret = ufshcd_read_unit_desc_param(hba,
4873 lun,
4874 UNIT_DESC_PARAM_LU_WR_PROTECT,
4875 b_lu_write_protect,
4876 sizeof(*b_lu_write_protect));
4877 return ret;
4878}
4879
4880
4881
4882
4883
4884
4885
4886
4887static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4888 struct scsi_device *sdev)
4889{
4890 if (hba->dev_info.f_power_on_wp_en &&
4891 !hba->dev_info.is_lu_power_on_wp) {
4892 u8 b_lu_write_protect;
4893
4894 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4895 &b_lu_write_protect) &&
4896 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4897 hba->dev_info.is_lu_power_on_wp = true;
4898 }
4899}
4900
4901
4902
4903
4904
4905
4906static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4907{
4908 struct device_link *link;
4909
4910
4911
4912
4913
4914 if (hba->sdev_ufs_device) {
4915 link = device_link_add(&sdev->sdev_gendev,
4916 &hba->sdev_ufs_device->sdev_gendev,
4917 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4918 if (!link) {
4919 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4920 dev_name(&hba->sdev_ufs_device->sdev_gendev));
4921 return;
4922 }
4923 hba->luns_avail--;
4924
4925 if (hba->luns_avail == 1) {
4926 ufshcd_rpm_put(hba);
4927 return;
4928 }
4929 } else {
4930
4931
4932
4933
4934 hba->luns_avail--;
4935 }
4936}
4937
4938
4939
4940
4941
4942
4943
4944static int ufshcd_slave_alloc(struct scsi_device *sdev)
4945{
4946 struct ufs_hba *hba;
4947
4948 hba = shost_priv(sdev->host);
4949
4950
4951 sdev->use_10_for_ms = 1;
4952
4953
4954 sdev->set_dbd_for_ms = 1;
4955
4956
4957 sdev->allow_restart = 1;
4958
4959
4960 sdev->no_report_opcodes = 1;
4961
4962
4963 sdev->no_write_same = 1;
4964
4965 ufshcd_set_queue_depth(sdev);
4966
4967 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4968
4969 ufshcd_setup_links(hba, sdev);
4970
4971 return 0;
4972}
4973
4974
4975
4976
4977
4978
4979
4980
4981static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4982{
4983 struct ufs_hba *hba = shost_priv(sdev->host);
4984
4985 if (depth > hba->nutrs)
4986 depth = hba->nutrs;
4987 return scsi_change_queue_depth(sdev, depth);
4988}
4989
4990
4991
4992
4993
4994static int ufshcd_slave_configure(struct scsi_device *sdev)
4995{
4996 struct ufs_hba *hba = shost_priv(sdev->host);
4997 struct request_queue *q = sdev->request_queue;
4998
4999 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5000 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
5001 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
5002
5003
5004
5005
5006 if (is_device_wlun(sdev))
5007 pm_runtime_get_noresume(&sdev->sdev_gendev);
5008 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5009 sdev->rpm_autosuspend = 1;
5010
5011 ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
5012
5013 return 0;
5014}
5015
5016
5017
5018
5019
5020static void ufshcd_slave_destroy(struct scsi_device *sdev)
5021{
5022 struct ufs_hba *hba;
5023
5024 hba = shost_priv(sdev->host);
5025
5026 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5027 unsigned long flags;
5028
5029 spin_lock_irqsave(hba->host->host_lock, flags);
5030 hba->sdev_ufs_device = NULL;
5031 spin_unlock_irqrestore(hba->host->host_lock, flags);
5032 }
5033}
5034
5035
5036
5037
5038
5039
5040
5041
5042static inline int
5043ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5044{
5045 int result = 0;
5046
5047 switch (scsi_status) {
5048 case SAM_STAT_CHECK_CONDITION:
5049 ufshcd_copy_sense_data(lrbp);
5050 fallthrough;
5051 case SAM_STAT_GOOD:
5052 result |= DID_OK << 16 | scsi_status;
5053 break;
5054 case SAM_STAT_TASK_SET_FULL:
5055 case SAM_STAT_BUSY:
5056 case SAM_STAT_TASK_ABORTED:
5057 ufshcd_copy_sense_data(lrbp);
5058 result |= scsi_status;
5059 break;
5060 default:
5061 result |= DID_ERROR << 16;
5062 break;
5063 }
5064
5065 return result;
5066}
5067
5068
5069
5070
5071
5072
5073
5074
5075static inline int
5076ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5077{
5078 int result = 0;
5079 int scsi_status;
5080 int ocs;
5081
5082
5083 ocs = ufshcd_get_tr_ocs(lrbp);
5084
5085 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5086 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5087 MASK_RSP_UPIU_RESULT)
5088 ocs = OCS_SUCCESS;
5089 }
5090
5091 switch (ocs) {
5092 case OCS_SUCCESS:
5093 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5094 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5095 switch (result) {
5096 case UPIU_TRANSACTION_RESPONSE:
5097
5098
5099
5100
5101 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5102
5103
5104
5105
5106
5107 scsi_status = result & MASK_SCSI_STATUS;
5108 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122 if (!hba->pm_op_in_progress &&
5123 !ufshcd_eh_in_progress(hba) &&
5124 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5125
5126 schedule_work(&hba->eeh_work);
5127 break;
5128 case UPIU_TRANSACTION_REJECT_UPIU:
5129
5130 result = DID_ERROR << 16;
5131 dev_err(hba->dev,
5132 "Reject UPIU not fully implemented\n");
5133 break;
5134 default:
5135 dev_err(hba->dev,
5136 "Unexpected request response code = %x\n",
5137 result);
5138 result = DID_ERROR << 16;
5139 break;
5140 }
5141 break;
5142 case OCS_ABORTED:
5143 result |= DID_ABORT << 16;
5144 break;
5145 case OCS_INVALID_COMMAND_STATUS:
5146 result |= DID_REQUEUE << 16;
5147 break;
5148 case OCS_INVALID_CMD_TABLE_ATTR:
5149 case OCS_INVALID_PRDT_ATTR:
5150 case OCS_MISMATCH_DATA_BUF_SIZE:
5151 case OCS_MISMATCH_RESP_UPIU_SIZE:
5152 case OCS_PEER_COMM_FAILURE:
5153 case OCS_FATAL_ERROR:
5154 case OCS_DEVICE_FATAL_ERROR:
5155 case OCS_INVALID_CRYPTO_CONFIG:
5156 case OCS_GENERAL_CRYPTO_ERROR:
5157 default:
5158 result |= DID_ERROR << 16;
5159 dev_err(hba->dev,
5160 "OCS error from controller = %x for tag %d\n",
5161 ocs, lrbp->task_tag);
5162 ufshcd_print_evt_hist(hba);
5163 ufshcd_print_host_state(hba);
5164 break;
5165 }
5166
5167 if ((host_byte(result) != DID_OK) &&
5168 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5169 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
5170 return result;
5171}
5172
5173static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5174 u32 intr_mask)
5175{
5176 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5177 !ufshcd_is_auto_hibern8_enabled(hba))
5178 return false;
5179
5180 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5181 return false;
5182
5183 if (hba->active_uic_cmd &&
5184 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5185 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5186 return false;
5187
5188 return true;
5189}
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5201{
5202 irqreturn_t retval = IRQ_NONE;
5203
5204 spin_lock(hba->host->host_lock);
5205 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5206 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5207
5208 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5209 hba->active_uic_cmd->argument2 |=
5210 ufshcd_get_uic_cmd_result(hba);
5211 hba->active_uic_cmd->argument3 =
5212 ufshcd_get_dme_attr_val(hba);
5213 if (!hba->uic_async_done)
5214 hba->active_uic_cmd->cmd_active = 0;
5215 complete(&hba->active_uic_cmd->done);
5216 retval = IRQ_HANDLED;
5217 }
5218
5219 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5220 hba->active_uic_cmd->cmd_active = 0;
5221 complete(hba->uic_async_done);
5222 retval = IRQ_HANDLED;
5223 }
5224
5225 if (retval == IRQ_HANDLED)
5226 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5227 UFS_CMD_COMP);
5228 spin_unlock(hba->host->host_lock);
5229 return retval;
5230}
5231
5232
5233
5234
5235
5236
5237static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5238 unsigned long completed_reqs)
5239{
5240 struct ufshcd_lrb *lrbp;
5241 struct scsi_cmnd *cmd;
5242 int result;
5243 int index;
5244 bool update_scaling = false;
5245
5246 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5247 if (!test_and_clear_bit(index, &hba->outstanding_reqs))
5248 continue;
5249 lrbp = &hba->lrb[index];
5250 lrbp->compl_time_stamp = ktime_get();
5251 cmd = lrbp->cmd;
5252 if (cmd) {
5253 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5254 ufshcd_update_monitor(hba, lrbp);
5255 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
5256 result = ufshcd_transfer_rsp_status(hba, lrbp);
5257 scsi_dma_unmap(cmd);
5258 cmd->result = result;
5259
5260 lrbp->cmd = NULL;
5261
5262 cmd->scsi_done(cmd);
5263 ufshcd_release(hba);
5264 update_scaling = true;
5265 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5266 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5267 if (hba->dev_cmd.complete) {
5268 ufshcd_add_command_trace(hba, index,
5269 UFS_DEV_COMP);
5270 complete(hba->dev_cmd.complete);
5271 update_scaling = true;
5272 }
5273 }
5274 if (update_scaling)
5275 ufshcd_clk_scaling_update_busy(hba);
5276 }
5277}
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
5289{
5290 unsigned long completed_reqs = 0;
5291
5292
5293
5294
5295
5296
5297
5298
5299 if (ufshcd_is_intr_aggr_allowed(hba) &&
5300 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5301 ufshcd_reset_intr_aggr(hba);
5302
5303 if (use_utrlcnr) {
5304 u32 utrlcnr;
5305
5306 utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
5307 if (utrlcnr) {
5308 ufshcd_writel(hba, utrlcnr,
5309 REG_UTP_TRANSFER_REQ_LIST_COMPL);
5310 completed_reqs = utrlcnr;
5311 }
5312 } else {
5313 unsigned long flags;
5314 u32 tr_doorbell;
5315
5316 spin_lock_irqsave(hba->host->host_lock, flags);
5317 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5318 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5319 spin_unlock_irqrestore(hba->host->host_lock, flags);
5320 }
5321
5322 if (completed_reqs) {
5323 __ufshcd_transfer_req_compl(hba, completed_reqs);
5324 return IRQ_HANDLED;
5325 } else {
5326 return IRQ_NONE;
5327 }
5328}
5329
5330int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5331{
5332 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5333 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5334 &ee_ctrl_mask);
5335}
5336
5337int ufshcd_write_ee_control(struct ufs_hba *hba)
5338{
5339 int err;
5340
5341 mutex_lock(&hba->ee_ctrl_mutex);
5342 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5343 mutex_unlock(&hba->ee_ctrl_mutex);
5344 if (err)
5345 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5346 __func__, err);
5347 return err;
5348}
5349
5350int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
5351 u16 set, u16 clr)
5352{
5353 u16 new_mask, ee_ctrl_mask;
5354 int err = 0;
5355
5356 mutex_lock(&hba->ee_ctrl_mutex);
5357 new_mask = (*mask & ~clr) | set;
5358 ee_ctrl_mask = new_mask | *other_mask;
5359 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5360 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5361
5362 if (!err) {
5363 hba->ee_ctrl_mask = ee_ctrl_mask;
5364 *mask = new_mask;
5365 }
5366 mutex_unlock(&hba->ee_ctrl_mutex);
5367 return err;
5368}
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5381{
5382 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5383}
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5396{
5397 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5398}
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5412{
5413 int err = 0;
5414
5415 if (hba->auto_bkops_enabled)
5416 goto out;
5417
5418 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5419 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5420 if (err) {
5421 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5422 __func__, err);
5423 goto out;
5424 }
5425
5426 hba->auto_bkops_enabled = true;
5427 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5428
5429
5430 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5431 if (err)
5432 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5433 __func__, err);
5434out:
5435 return err;
5436}
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5451{
5452 int err = 0;
5453
5454 if (!hba->auto_bkops_enabled)
5455 goto out;
5456
5457
5458
5459
5460
5461 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5462 if (err) {
5463 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5464 __func__, err);
5465 goto out;
5466 }
5467
5468 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5469 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5470 if (err) {
5471 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5472 __func__, err);
5473 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5474 goto out;
5475 }
5476
5477 hba->auto_bkops_enabled = false;
5478 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5479 hba->is_urgent_bkops_lvl_checked = false;
5480out:
5481 return err;
5482}
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5494{
5495 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5496 hba->auto_bkops_enabled = false;
5497 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5498 ufshcd_enable_auto_bkops(hba);
5499 } else {
5500 hba->auto_bkops_enabled = true;
5501 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5502 ufshcd_disable_auto_bkops(hba);
5503 }
5504 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5505 hba->is_urgent_bkops_lvl_checked = false;
5506}
5507
5508static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5509{
5510 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5511 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5512}
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5531 enum bkops_status status)
5532{
5533 int err;
5534 u32 curr_status = 0;
5535
5536 err = ufshcd_get_bkops_status(hba, &curr_status);
5537 if (err) {
5538 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5539 __func__, err);
5540 goto out;
5541 } else if (curr_status > BKOPS_STATUS_MAX) {
5542 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5543 __func__, curr_status);
5544 err = -EINVAL;
5545 goto out;
5546 }
5547
5548 if (curr_status >= status)
5549 err = ufshcd_enable_auto_bkops(hba);
5550 else
5551 err = ufshcd_disable_auto_bkops(hba);
5552out:
5553 return err;
5554}
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5567{
5568 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5569}
5570
5571static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5572{
5573 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5574 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5575}
5576
5577static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5578{
5579 int err;
5580 u32 curr_status = 0;
5581
5582 if (hba->is_urgent_bkops_lvl_checked)
5583 goto enable_auto_bkops;
5584
5585 err = ufshcd_get_bkops_status(hba, &curr_status);
5586 if (err) {
5587 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5588 __func__, err);
5589 goto out;
5590 }
5591
5592
5593
5594
5595
5596
5597
5598 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5599 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5600 __func__, curr_status);
5601
5602 hba->urgent_bkops_lvl = curr_status;
5603 hba->is_urgent_bkops_lvl_checked = true;
5604 }
5605
5606enable_auto_bkops:
5607 err = ufshcd_enable_auto_bkops(hba);
5608out:
5609 if (err < 0)
5610 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5611 __func__, err);
5612}
5613
5614static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5615{
5616 u8 index;
5617 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5618 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5619
5620 index = ufshcd_wb_get_query_index(hba);
5621 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5622}
5623
5624int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5625{
5626 int ret;
5627
5628 if (!ufshcd_is_wb_allowed(hba))
5629 return 0;
5630
5631 if (!(enable ^ hba->dev_info.wb_enabled))
5632 return 0;
5633
5634 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5635 if (ret) {
5636 dev_err(hba->dev, "%s Write Booster %s failed %d\n",
5637 __func__, enable ? "enable" : "disable", ret);
5638 return ret;
5639 }
5640
5641 hba->dev_info.wb_enabled = enable;
5642 dev_info(hba->dev, "%s Write Booster %s\n",
5643 __func__, enable ? "enabled" : "disabled");
5644
5645 return ret;
5646}
5647
5648static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5649{
5650 int ret;
5651
5652 ret = __ufshcd_wb_toggle(hba, set,
5653 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5654 if (ret) {
5655 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
5656 __func__, set ? "enable" : "disable", ret);
5657 return;
5658 }
5659 dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
5660 __func__, set ? "enabled" : "disabled");
5661}
5662
5663static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5664{
5665 int ret;
5666
5667 if (!ufshcd_is_wb_allowed(hba) ||
5668 hba->dev_info.wb_buf_flush_enabled == enable)
5669 return;
5670
5671 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
5672 if (ret) {
5673 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5674 enable ? "enable" : "disable", ret);
5675 return;
5676 }
5677
5678 hba->dev_info.wb_buf_flush_enabled = enable;
5679
5680 dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
5681 __func__, enable ? "enabled" : "disabled");
5682}
5683
5684static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5685 u32 avail_buf)
5686{
5687 u32 cur_buf;
5688 int ret;
5689 u8 index;
5690
5691 index = ufshcd_wb_get_query_index(hba);
5692 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5693 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5694 index, 0, &cur_buf);
5695 if (ret) {
5696 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5697 __func__, ret);
5698 return false;
5699 }
5700
5701 if (!cur_buf) {
5702 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5703 cur_buf);
5704 return false;
5705 }
5706
5707 if (avail_buf < hba->vps->wb_flush_threshold)
5708 return true;
5709
5710 return false;
5711}
5712
5713static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5714{
5715 int ret;
5716 u32 avail_buf;
5717 u8 index;
5718
5719 if (!ufshcd_is_wb_allowed(hba))
5720 return false;
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732 index = ufshcd_wb_get_query_index(hba);
5733 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5734 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5735 index, 0, &avail_buf);
5736 if (ret) {
5737 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5738 __func__, ret);
5739 return false;
5740 }
5741
5742 if (!hba->dev_info.b_presrv_uspc_en) {
5743 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5744 return true;
5745 return false;
5746 }
5747
5748 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5749}
5750
5751static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5752{
5753 struct ufs_hba *hba = container_of(to_delayed_work(work),
5754 struct ufs_hba,
5755 rpm_dev_flush_recheck_work);
5756
5757
5758
5759
5760
5761
5762 ufshcd_rpm_get_sync(hba);
5763 ufshcd_rpm_put_sync(hba);
5764}
5765
5766
5767
5768
5769
5770
5771
5772
5773static void ufshcd_exception_event_handler(struct work_struct *work)
5774{
5775 struct ufs_hba *hba;
5776 int err;
5777 u32 status = 0;
5778 hba = container_of(work, struct ufs_hba, eeh_work);
5779
5780 ufshcd_scsi_block_requests(hba);
5781 err = ufshcd_get_ee_status(hba, &status);
5782 if (err) {
5783 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5784 __func__, err);
5785 goto out;
5786 }
5787
5788 trace_ufshcd_exception_event(dev_name(hba->dev), status);
5789
5790 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
5791 ufshcd_bkops_exception_event_handler(hba);
5792
5793 ufs_debugfs_exception_event(hba, status);
5794out:
5795 ufshcd_scsi_unblock_requests(hba);
5796 return;
5797}
5798
5799
5800static void ufshcd_complete_requests(struct ufs_hba *hba)
5801{
5802 ufshcd_trc_handler(hba, false);
5803 ufshcd_tmc_handler(hba);
5804}
5805
5806
5807
5808
5809
5810
5811
5812
5813static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5814{
5815 unsigned long flags;
5816 bool err_handling = true;
5817
5818 spin_lock_irqsave(hba->host->host_lock, flags);
5819
5820
5821
5822
5823 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5824 goto out;
5825
5826 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5827 ((hba->saved_err & UIC_ERROR) &&
5828 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5829 goto out;
5830
5831 if ((hba->saved_err & UIC_ERROR) &&
5832 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5833 int err;
5834
5835
5836
5837 spin_unlock_irqrestore(hba->host->host_lock, flags);
5838 msleep(50);
5839 spin_lock_irqsave(hba->host->host_lock, flags);
5840
5841
5842
5843
5844
5845 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5846 ((hba->saved_err & UIC_ERROR) &&
5847 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5848 goto out;
5849
5850
5851
5852
5853
5854
5855
5856
5857 spin_unlock_irqrestore(hba->host->host_lock, flags);
5858 err = ufshcd_verify_dev_init(hba);
5859 spin_lock_irqsave(hba->host->host_lock, flags);
5860
5861 if (err)
5862 goto out;
5863
5864
5865 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5866 hba->saved_err &= ~UIC_ERROR;
5867
5868 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5869 if (!hba->saved_uic_err)
5870 err_handling = false;
5871 }
5872out:
5873 spin_unlock_irqrestore(hba->host->host_lock, flags);
5874 return err_handling;
5875}
5876
5877
5878static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5879{
5880 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5881 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5882}
5883
5884
5885static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5886{
5887
5888 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5889 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5890 ufshcd_is_saved_err_fatal(hba))
5891 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5892 else
5893 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5894 queue_work(hba->eh_wq, &hba->eh_work);
5895 }
5896}
5897
5898static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5899{
5900 down_write(&hba->clk_scaling_lock);
5901 hba->clk_scaling.is_allowed = allow;
5902 up_write(&hba->clk_scaling_lock);
5903}
5904
5905static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5906{
5907 if (suspend) {
5908 if (hba->clk_scaling.is_enabled)
5909 ufshcd_suspend_clkscaling(hba);
5910 ufshcd_clk_scaling_allow(hba, false);
5911 } else {
5912 ufshcd_clk_scaling_allow(hba, true);
5913 if (hba->clk_scaling.is_enabled)
5914 ufshcd_resume_clkscaling(hba);
5915 }
5916}
5917
5918static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5919{
5920 ufshcd_rpm_get_sync(hba);
5921 if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
5922 hba->is_sys_suspended) {
5923 enum ufs_pm_op pm_op;
5924
5925
5926
5927
5928
5929
5930 ufshcd_setup_hba_vreg(hba, true);
5931 ufshcd_enable_irq(hba);
5932 ufshcd_setup_vreg(hba, true);
5933 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5934 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5935 ufshcd_hold(hba, false);
5936 if (!ufshcd_is_clkgating_allowed(hba))
5937 ufshcd_setup_clocks(hba, true);
5938 ufshcd_release(hba);
5939 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5940 ufshcd_vops_resume(hba, pm_op);
5941 } else {
5942 ufshcd_hold(hba, false);
5943 if (ufshcd_is_clkscaling_supported(hba) &&
5944 hba->clk_scaling.is_enabled)
5945 ufshcd_suspend_clkscaling(hba);
5946 ufshcd_clk_scaling_allow(hba, false);
5947 }
5948 ufshcd_scsi_block_requests(hba);
5949
5950 down_write(&hba->clk_scaling_lock);
5951 up_write(&hba->clk_scaling_lock);
5952 cancel_work_sync(&hba->eeh_work);
5953}
5954
5955static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5956{
5957 ufshcd_scsi_unblock_requests(hba);
5958 ufshcd_release(hba);
5959 if (ufshcd_is_clkscaling_supported(hba))
5960 ufshcd_clk_scaling_suspend(hba, false);
5961 ufshcd_clear_ua_wluns(hba);
5962 ufshcd_rpm_put(hba);
5963}
5964
5965static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5966{
5967 return (!hba->is_powered || hba->shutting_down ||
5968 !hba->sdev_ufs_device ||
5969 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5970 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5971 ufshcd_is_link_broken(hba))));
5972}
5973
5974#ifdef CONFIG_PM
5975static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5976{
5977 struct Scsi_Host *shost = hba->host;
5978 struct scsi_device *sdev;
5979 struct request_queue *q;
5980 int ret;
5981
5982 hba->is_sys_suspended = false;
5983
5984
5985
5986
5987 ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
5988
5989
5990 if (ret)
5991 ret = pm_runtime_set_active(hba->dev);
5992
5993
5994
5995
5996
5997
5998 if (!ret) {
5999 shost_for_each_device(sdev, shost) {
6000 q = sdev->request_queue;
6001 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6002 q->rpm_status == RPM_SUSPENDING))
6003 pm_request_resume(q->dev);
6004 }
6005 }
6006}
6007#else
6008static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6009{
6010}
6011#endif
6012
6013static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6014{
6015 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6016 u32 mode;
6017
6018 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6019
6020 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6021 return true;
6022
6023 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6024 return true;
6025
6026 return false;
6027}
6028
6029
6030
6031
6032
6033static void ufshcd_err_handler(struct work_struct *work)
6034{
6035 struct ufs_hba *hba;
6036 unsigned long flags;
6037 bool err_xfer = false;
6038 bool err_tm = false;
6039 int err = 0, pmc_err;
6040 int tag;
6041 bool needs_reset = false, needs_restore = false;
6042
6043 hba = container_of(work, struct ufs_hba, eh_work);
6044
6045 down(&hba->host_sem);
6046 spin_lock_irqsave(hba->host->host_lock, flags);
6047 if (ufshcd_err_handling_should_stop(hba)) {
6048 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6049 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6050 spin_unlock_irqrestore(hba->host->host_lock, flags);
6051 up(&hba->host_sem);
6052 return;
6053 }
6054 ufshcd_set_eh_in_progress(hba);
6055 spin_unlock_irqrestore(hba->host->host_lock, flags);
6056 ufshcd_err_handling_prepare(hba);
6057
6058 ufshcd_complete_requests(hba);
6059 spin_lock_irqsave(hba->host->host_lock, flags);
6060 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6061 hba->ufshcd_state = UFSHCD_STATE_RESET;
6062
6063
6064
6065
6066 if (ufshcd_err_handling_should_stop(hba))
6067 goto skip_err_handling;
6068
6069 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6070 bool ret;
6071
6072 spin_unlock_irqrestore(hba->host->host_lock, flags);
6073
6074 ret = ufshcd_quirk_dl_nac_errors(hba);
6075 spin_lock_irqsave(hba->host->host_lock, flags);
6076 if (!ret && ufshcd_err_handling_should_stop(hba))
6077 goto skip_err_handling;
6078 }
6079
6080 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6081 (hba->saved_uic_err &&
6082 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6083 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6084
6085 spin_unlock_irqrestore(hba->host->host_lock, flags);
6086 ufshcd_print_host_state(hba);
6087 ufshcd_print_pwr_info(hba);
6088 ufshcd_print_evt_hist(hba);
6089 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6090 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6091 spin_lock_irqsave(hba->host->host_lock, flags);
6092 }
6093
6094
6095
6096
6097
6098
6099 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6100 ufshcd_is_saved_err_fatal(hba) ||
6101 ((hba->saved_err & UIC_ERROR) &&
6102 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6103 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6104 needs_reset = true;
6105 goto do_reset;
6106 }
6107
6108
6109
6110
6111
6112 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6113 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6114 if (!hba->saved_uic_err)
6115 hba->saved_err &= ~UIC_ERROR;
6116 spin_unlock_irqrestore(hba->host->host_lock, flags);
6117 if (ufshcd_is_pwr_mode_restore_needed(hba))
6118 needs_restore = true;
6119 spin_lock_irqsave(hba->host->host_lock, flags);
6120 if (!hba->saved_err && !needs_restore)
6121 goto skip_err_handling;
6122 }
6123
6124 hba->silence_err_logs = true;
6125
6126 spin_unlock_irqrestore(hba->host->host_lock, flags);
6127
6128 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6129 if (ufshcd_try_to_abort_task(hba, tag)) {
6130 err_xfer = true;
6131 goto lock_skip_pending_xfer_clear;
6132 }
6133 }
6134
6135
6136 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6137 if (ufshcd_clear_tm_cmd(hba, tag)) {
6138 err_tm = true;
6139 goto lock_skip_pending_xfer_clear;
6140 }
6141 }
6142
6143lock_skip_pending_xfer_clear:
6144
6145 ufshcd_complete_requests(hba);
6146
6147 spin_lock_irqsave(hba->host->host_lock, flags);
6148 hba->silence_err_logs = false;
6149 if (err_xfer || err_tm) {
6150 needs_reset = true;
6151 goto do_reset;
6152 }
6153
6154
6155
6156
6157
6158 if (needs_restore) {
6159 spin_unlock_irqrestore(hba->host->host_lock, flags);
6160
6161
6162
6163
6164 down_write(&hba->clk_scaling_lock);
6165 hba->force_pmc = true;
6166 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6167 if (pmc_err) {
6168 needs_reset = true;
6169 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6170 __func__, pmc_err);
6171 }
6172 hba->force_pmc = false;
6173 ufshcd_print_pwr_info(hba);
6174 up_write(&hba->clk_scaling_lock);
6175 spin_lock_irqsave(hba->host->host_lock, flags);
6176 }
6177
6178do_reset:
6179
6180 if (needs_reset) {
6181 hba->force_reset = false;
6182 spin_unlock_irqrestore(hba->host->host_lock, flags);
6183 err = ufshcd_reset_and_restore(hba);
6184 if (err)
6185 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6186 __func__, err);
6187 else
6188 ufshcd_recover_pm_error(hba);
6189 spin_lock_irqsave(hba->host->host_lock, flags);
6190 }
6191
6192skip_err_handling:
6193 if (!needs_reset) {
6194 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6195 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6196 if (hba->saved_err || hba->saved_uic_err)
6197 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6198 __func__, hba->saved_err, hba->saved_uic_err);
6199 }
6200 ufshcd_clear_eh_in_progress(hba);
6201 spin_unlock_irqrestore(hba->host->host_lock, flags);
6202 ufshcd_err_handling_unprepare(hba);
6203 up(&hba->host_sem);
6204}
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6215{
6216 u32 reg;
6217 irqreturn_t retval = IRQ_NONE;
6218
6219
6220 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6221 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6222 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6223 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6224
6225
6226
6227
6228 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6229 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6230 __func__);
6231
6232
6233 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6234 struct uic_command *cmd = NULL;
6235
6236 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6237 if (hba->uic_async_done && hba->active_uic_cmd)
6238 cmd = hba->active_uic_cmd;
6239
6240
6241
6242
6243 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6244 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6245 }
6246 retval |= IRQ_HANDLED;
6247 }
6248
6249
6250 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6251 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6252 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6253 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6254
6255 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6256 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6257 else if (hba->dev_quirks &
6258 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6259 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6260 hba->uic_error |=
6261 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6262 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6263 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6264 }
6265 retval |= IRQ_HANDLED;
6266 }
6267
6268
6269 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6270 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6271 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6272 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6273 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6274 retval |= IRQ_HANDLED;
6275 }
6276
6277 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6278 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6279 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6280 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6281 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6282 retval |= IRQ_HANDLED;
6283 }
6284
6285 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6286 if ((reg & UIC_DME_ERROR) &&
6287 (reg & UIC_DME_ERROR_CODE_MASK)) {
6288 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6289 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6290 retval |= IRQ_HANDLED;
6291 }
6292
6293 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6294 __func__, hba->uic_error);
6295 return retval;
6296}
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6308{
6309 bool queue_eh_work = false;
6310 irqreturn_t retval = IRQ_NONE;
6311
6312 spin_lock(hba->host->host_lock);
6313 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6314
6315 if (hba->errors & INT_FATAL_ERRORS) {
6316 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6317 hba->errors);
6318 queue_eh_work = true;
6319 }
6320
6321 if (hba->errors & UIC_ERROR) {
6322 hba->uic_error = 0;
6323 retval = ufshcd_update_uic_error(hba);
6324 if (hba->uic_error)
6325 queue_eh_work = true;
6326 }
6327
6328 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6329 dev_err(hba->dev,
6330 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6331 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6332 "Enter" : "Exit",
6333 hba->errors, ufshcd_get_upmcrs(hba));
6334 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6335 hba->errors);
6336 ufshcd_set_link_broken(hba);
6337 queue_eh_work = true;
6338 }
6339
6340 if (queue_eh_work) {
6341
6342
6343
6344
6345 hba->saved_err |= hba->errors;
6346 hba->saved_uic_err |= hba->uic_error;
6347
6348
6349 if ((hba->saved_err &
6350 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6351 (hba->saved_uic_err &&
6352 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6353 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6354 __func__, hba->saved_err,
6355 hba->saved_uic_err);
6356 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6357 "host_regs: ");
6358 ufshcd_print_pwr_info(hba);
6359 }
6360 ufshcd_schedule_eh_work(hba);
6361 retval |= IRQ_HANDLED;
6362 }
6363
6364
6365
6366
6367
6368
6369 hba->errors = 0;
6370 hba->uic_error = 0;
6371 spin_unlock(hba->host->host_lock);
6372 return retval;
6373}
6374
6375struct ctm_info {
6376 struct ufs_hba *hba;
6377 unsigned long pending;
6378 unsigned int ncpl;
6379};
6380
6381static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
6382{
6383 struct ctm_info *const ci = priv;
6384 struct completion *c;
6385
6386 WARN_ON_ONCE(reserved);
6387 if (test_bit(req->tag, &ci->pending))
6388 return true;
6389 ci->ncpl++;
6390 c = req->end_io_data;
6391 if (c)
6392 complete(c);
6393 return true;
6394}
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6405{
6406 unsigned long flags;
6407 struct request_queue *q = hba->tmf_queue;
6408 struct ctm_info ci = {
6409 .hba = hba,
6410 };
6411
6412 spin_lock_irqsave(hba->host->host_lock, flags);
6413 ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6414 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
6415 spin_unlock_irqrestore(hba->host->host_lock, flags);
6416
6417 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
6418}
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6430{
6431 irqreturn_t retval = IRQ_NONE;
6432
6433 if (intr_status & UFSHCD_UIC_MASK)
6434 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6435
6436 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6437 retval |= ufshcd_check_errors(hba, intr_status);
6438
6439 if (intr_status & UTP_TASK_REQ_COMPL)
6440 retval |= ufshcd_tmc_handler(hba);
6441
6442 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6443 retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
6444
6445 return retval;
6446}
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457static irqreturn_t ufshcd_intr(int irq, void *__hba)
6458{
6459 u32 intr_status, enabled_intr_status = 0;
6460 irqreturn_t retval = IRQ_NONE;
6461 struct ufs_hba *hba = __hba;
6462 int retries = hba->nutrs;
6463
6464 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6465 hba->ufs_stats.last_intr_status = intr_status;
6466 hba->ufs_stats.last_intr_ts = ktime_get();
6467
6468
6469
6470
6471
6472
6473
6474 while (intr_status && retries--) {
6475 enabled_intr_status =
6476 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6477 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6478 if (enabled_intr_status)
6479 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6480
6481 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6482 }
6483
6484 if (enabled_intr_status && retval == IRQ_NONE &&
6485 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6486 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6487 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6488 __func__,
6489 intr_status,
6490 hba->ufs_stats.last_intr_status,
6491 enabled_intr_status);
6492 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6493 }
6494
6495 return retval;
6496}
6497
6498static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6499{
6500 int err = 0;
6501 u32 mask = 1 << tag;
6502 unsigned long flags;
6503
6504 if (!test_bit(tag, &hba->outstanding_tasks))
6505 goto out;
6506
6507 spin_lock_irqsave(hba->host->host_lock, flags);
6508 ufshcd_utmrl_clear(hba, tag);
6509 spin_unlock_irqrestore(hba->host->host_lock, flags);
6510
6511
6512 err = ufshcd_wait_for_register(hba,
6513 REG_UTP_TASK_REQ_DOOR_BELL,
6514 mask, 0, 1000, 1000);
6515out:
6516 return err;
6517}
6518
6519static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6520 struct utp_task_req_desc *treq, u8 tm_function)
6521{
6522 struct request_queue *q = hba->tmf_queue;
6523 struct Scsi_Host *host = hba->host;
6524 DECLARE_COMPLETION_ONSTACK(wait);
6525 struct request *req;
6526 unsigned long flags;
6527 int task_tag, err;
6528
6529
6530
6531
6532 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6533 if (IS_ERR(req))
6534 return PTR_ERR(req);
6535
6536 req->end_io_data = &wait;
6537 ufshcd_hold(hba, false);
6538
6539 spin_lock_irqsave(host->host_lock, flags);
6540 blk_mq_start_request(req);
6541
6542 task_tag = req->tag;
6543 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
6544
6545 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6546 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6547
6548
6549 __set_bit(task_tag, &hba->outstanding_tasks);
6550
6551
6552 wmb();
6553
6554 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6555
6556 wmb();
6557
6558 spin_unlock_irqrestore(host->host_lock, flags);
6559
6560 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6561
6562
6563 err = wait_for_completion_io_timeout(&wait,
6564 msecs_to_jiffies(TM_CMD_TIMEOUT));
6565 if (!err) {
6566
6567
6568
6569
6570 req->end_io_data = NULL;
6571 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
6572 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6573 __func__, tm_function);
6574 if (ufshcd_clear_tm_cmd(hba, task_tag))
6575 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6576 __func__, task_tag);
6577 err = -ETIMEDOUT;
6578 } else {
6579 err = 0;
6580 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6581
6582 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
6583 }
6584
6585 spin_lock_irqsave(hba->host->host_lock, flags);
6586 __clear_bit(task_tag, &hba->outstanding_tasks);
6587 spin_unlock_irqrestore(hba->host->host_lock, flags);
6588
6589 ufshcd_release(hba);
6590 blk_put_request(req);
6591
6592 return err;
6593}
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6606 u8 tm_function, u8 *tm_response)
6607{
6608 struct utp_task_req_desc treq = { { 0 }, };
6609 int ocs_value, err;
6610
6611
6612 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6613 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6614
6615
6616 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6617 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6618 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6619
6620
6621
6622
6623
6624 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6625 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
6626
6627 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6628 if (err == -ETIMEDOUT)
6629 return err;
6630
6631 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6632 if (ocs_value != OCS_SUCCESS)
6633 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6634 __func__, ocs_value);
6635 else if (tm_response)
6636 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
6637 MASK_TM_SERVICE_RESP;
6638 return err;
6639}
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6659 struct utp_upiu_req *req_upiu,
6660 struct utp_upiu_req *rsp_upiu,
6661 u8 *desc_buff, int *buff_len,
6662 enum dev_cmd_type cmd_type,
6663 enum query_opcode desc_op)
6664{
6665 struct request_queue *q = hba->cmd_queue;
6666 struct request *req;
6667 struct ufshcd_lrb *lrbp;
6668 int err = 0;
6669 int tag;
6670 struct completion wait;
6671 u8 upiu_flags;
6672
6673 down_read(&hba->clk_scaling_lock);
6674
6675 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6676 if (IS_ERR(req)) {
6677 err = PTR_ERR(req);
6678 goto out_unlock;
6679 }
6680 tag = req->tag;
6681 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6682
6683 if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
6684 err = -EBUSY;
6685 goto out;
6686 }
6687
6688 init_completion(&wait);
6689 lrbp = &hba->lrb[tag];
6690 WARN_ON(lrbp->cmd);
6691 lrbp->cmd = NULL;
6692 lrbp->sense_bufflen = 0;
6693 lrbp->sense_buffer = NULL;
6694 lrbp->task_tag = tag;
6695 lrbp->lun = 0;
6696 lrbp->intr_cmd = true;
6697 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6698 hba->dev_cmd.type = cmd_type;
6699
6700 if (hba->ufs_version <= ufshci_version(1, 1))
6701 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6702 else
6703 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6704
6705
6706 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6707
6708 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6709
6710
6711 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6712 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6713
6714
6715
6716
6717 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6718 *buff_len = 0;
6719 }
6720
6721 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6722
6723 hba->dev_cmd.complete = &wait;
6724
6725 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
6726
6727 wmb();
6728
6729 ufshcd_send_command(hba, tag);
6730
6731
6732
6733
6734
6735 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6736
6737
6738 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6739 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6740 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6741 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6742 MASK_QUERY_DATA_SEG_LEN;
6743
6744 if (*buff_len >= resp_len) {
6745 memcpy(desc_buff, descp, resp_len);
6746 *buff_len = resp_len;
6747 } else {
6748 dev_warn(hba->dev,
6749 "%s: rsp size %d is bigger than buffer size %d",
6750 __func__, resp_len, *buff_len);
6751 *buff_len = 0;
6752 err = -EINVAL;
6753 }
6754 }
6755 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
6756 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
6757
6758out:
6759 blk_put_request(req);
6760out_unlock:
6761 up_read(&hba->clk_scaling_lock);
6762 return err;
6763}
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6781 struct utp_upiu_req *req_upiu,
6782 struct utp_upiu_req *rsp_upiu,
6783 int msgcode,
6784 u8 *desc_buff, int *buff_len,
6785 enum query_opcode desc_op)
6786{
6787 int err;
6788 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6789 struct utp_task_req_desc treq = { { 0 }, };
6790 int ocs_value;
6791 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6792
6793 switch (msgcode) {
6794 case UPIU_TRANSACTION_NOP_OUT:
6795 cmd_type = DEV_CMD_TYPE_NOP;
6796 fallthrough;
6797 case UPIU_TRANSACTION_QUERY_REQ:
6798 ufshcd_hold(hba, false);
6799 mutex_lock(&hba->dev_cmd.lock);
6800 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6801 desc_buff, buff_len,
6802 cmd_type, desc_op);
6803 mutex_unlock(&hba->dev_cmd.lock);
6804 ufshcd_release(hba);
6805
6806 break;
6807 case UPIU_TRANSACTION_TASK_REQ:
6808 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6809 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6810
6811 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
6812
6813 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6814 if (err == -ETIMEDOUT)
6815 break;
6816
6817 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6818 if (ocs_value != OCS_SUCCESS) {
6819 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6820 ocs_value);
6821 break;
6822 }
6823
6824 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
6825
6826 break;
6827 default:
6828 err = -EINVAL;
6829
6830 break;
6831 }
6832
6833 return err;
6834}
6835
6836
6837
6838
6839
6840
6841
6842
6843static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6844{
6845 struct Scsi_Host *host;
6846 struct ufs_hba *hba;
6847 u32 pos;
6848 int err;
6849 u8 resp = 0xF, lun;
6850
6851 host = cmd->device->host;
6852 hba = shost_priv(host);
6853
6854 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6855 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
6856 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6857 if (!err)
6858 err = resp;
6859 goto out;
6860 }
6861
6862
6863 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6864 if (hba->lrb[pos].lun == lun) {
6865 err = ufshcd_clear_cmd(hba, pos);
6866 if (err)
6867 break;
6868 __ufshcd_transfer_req_compl(hba, pos);
6869 }
6870 }
6871
6872out:
6873 hba->req_abort_count = 0;
6874 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
6875 if (!err) {
6876 err = SUCCESS;
6877 } else {
6878 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6879 err = FAILED;
6880 }
6881 return err;
6882}
6883
6884static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6885{
6886 struct ufshcd_lrb *lrbp;
6887 int tag;
6888
6889 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6890 lrbp = &hba->lrb[tag];
6891 lrbp->req_abort_skip = true;
6892 }
6893}
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6909{
6910 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6911 int err = 0;
6912 int poll_cnt;
6913 u8 resp = 0xF;
6914 u32 reg;
6915
6916 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6917 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6918 UFS_QUERY_TASK, &resp);
6919 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6920
6921 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6922 __func__, tag);
6923 break;
6924 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6925
6926
6927
6928
6929 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6930 __func__, tag);
6931 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6932 if (reg & (1 << tag)) {
6933
6934 usleep_range(100, 200);
6935 continue;
6936 }
6937
6938 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6939 __func__, tag);
6940 goto out;
6941 } else {
6942 dev_err(hba->dev,
6943 "%s: no response from device. tag = %d, err %d\n",
6944 __func__, tag, err);
6945 if (!err)
6946 err = resp;
6947 goto out;
6948 }
6949 }
6950
6951 if (!poll_cnt) {
6952 err = -EBUSY;
6953 goto out;
6954 }
6955
6956 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6957 UFS_ABORT_TASK, &resp);
6958 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6959 if (!err) {
6960 err = resp;
6961 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6962 __func__, tag, err);
6963 }
6964 goto out;
6965 }
6966
6967 err = ufshcd_clear_cmd(hba, tag);
6968 if (err)
6969 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6970 __func__, tag, err);
6971
6972out:
6973 return err;
6974}
6975
6976
6977
6978
6979
6980
6981
6982static int ufshcd_abort(struct scsi_cmnd *cmd)
6983{
6984 struct Scsi_Host *host;
6985 struct ufs_hba *hba;
6986 unsigned long flags;
6987 unsigned int tag;
6988 int err = 0;
6989 struct ufshcd_lrb *lrbp;
6990 u32 reg;
6991
6992 host = cmd->device->host;
6993 hba = shost_priv(host);
6994 tag = cmd->request->tag;
6995 lrbp = &hba->lrb[tag];
6996 if (!ufshcd_valid_tag(hba, tag)) {
6997 dev_err(hba->dev,
6998 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6999 __func__, tag, cmd, cmd->request);
7000 BUG();
7001 }
7002
7003 ufshcd_hold(hba, false);
7004 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7005
7006 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7007 dev_err(hba->dev,
7008 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7009 __func__, tag, hba->outstanding_reqs, reg);
7010 goto out;
7011 }
7012
7013
7014 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7015
7016
7017
7018
7019
7020
7021
7022
7023 scsi_print_command(cmd);
7024 if (!hba->req_abort_count) {
7025 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7026 ufshcd_print_evt_hist(hba);
7027 ufshcd_print_host_state(hba);
7028 ufshcd_print_pwr_info(hba);
7029 ufshcd_print_trs(hba, 1 << tag, true);
7030 } else {
7031 ufshcd_print_trs(hba, 1 << tag, false);
7032 }
7033 hba->req_abort_count++;
7034
7035 if (!(reg & (1 << tag))) {
7036 dev_err(hba->dev,
7037 "%s: cmd was completed, but without a notifying intr, tag = %d",
7038 __func__, tag);
7039 goto cleanup;
7040 }
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7051 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7052 __ufshcd_transfer_req_compl(hba, (1UL << tag));
7053 set_bit(tag, &hba->outstanding_reqs);
7054 spin_lock_irqsave(host->host_lock, flags);
7055 hba->force_reset = true;
7056 ufshcd_schedule_eh_work(hba);
7057 spin_unlock_irqrestore(host->host_lock, flags);
7058 goto out;
7059 }
7060
7061
7062 if (lrbp->req_abort_skip)
7063 err = -EIO;
7064 else
7065 err = ufshcd_try_to_abort_task(hba, tag);
7066
7067 if (!err) {
7068cleanup:
7069 __ufshcd_transfer_req_compl(hba, (1UL << tag));
7070out:
7071 err = SUCCESS;
7072 } else {
7073 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7074 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7075 err = FAILED;
7076 }
7077
7078
7079
7080
7081
7082 ufshcd_release(hba);
7083 return err;
7084}
7085
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7097{
7098 int err;
7099
7100
7101
7102
7103
7104 ufshcd_hba_stop(hba);
7105 hba->silence_err_logs = true;
7106 ufshcd_complete_requests(hba);
7107 hba->silence_err_logs = false;
7108
7109
7110 ufshcd_set_clk_freq(hba, true);
7111
7112 err = ufshcd_hba_enable(hba);
7113
7114
7115 if (!err)
7116 err = ufshcd_probe_hba(hba, false);
7117
7118 if (err)
7119 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7120 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7121 return err;
7122}
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7134{
7135 u32 saved_err;
7136 u32 saved_uic_err;
7137 int err = 0;
7138 unsigned long flags;
7139 int retries = MAX_HOST_RESET_RETRIES;
7140
7141
7142
7143
7144
7145 spin_lock_irqsave(hba->host->host_lock, flags);
7146 saved_err = hba->saved_err;
7147 saved_uic_err = hba->saved_uic_err;
7148 hba->saved_err = 0;
7149 hba->saved_uic_err = 0;
7150 spin_unlock_irqrestore(hba->host->host_lock, flags);
7151
7152 do {
7153
7154 ufshcd_device_reset(hba);
7155
7156 err = ufshcd_host_reset_and_restore(hba);
7157 } while (err && --retries);
7158
7159 spin_lock_irqsave(hba->host->host_lock, flags);
7160
7161
7162
7163
7164 scsi_report_bus_reset(hba->host, 0);
7165 if (err) {
7166 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7167 hba->saved_err |= saved_err;
7168 hba->saved_uic_err |= saved_uic_err;
7169 }
7170 spin_unlock_irqrestore(hba->host->host_lock, flags);
7171
7172 return err;
7173}
7174
7175
7176
7177
7178
7179
7180
7181static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7182{
7183 int err = SUCCESS;
7184 unsigned long flags;
7185 struct ufs_hba *hba;
7186
7187 hba = shost_priv(cmd->device->host);
7188
7189 spin_lock_irqsave(hba->host->host_lock, flags);
7190 hba->force_reset = true;
7191 ufshcd_schedule_eh_work(hba);
7192 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7193 spin_unlock_irqrestore(hba->host->host_lock, flags);
7194
7195 flush_work(&hba->eh_work);
7196
7197 spin_lock_irqsave(hba->host->host_lock, flags);
7198 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7199 err = FAILED;
7200 spin_unlock_irqrestore(hba->host->host_lock, flags);
7201
7202 return err;
7203}
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7214{
7215 int i;
7216 int curr_uA;
7217 u16 data;
7218 u16 unit;
7219
7220 for (i = start_scan; i >= 0; i--) {
7221 data = be16_to_cpup((__be16 *)&buff[2 * i]);
7222 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7223 ATTR_ICC_LVL_UNIT_OFFSET;
7224 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7225 switch (unit) {
7226 case UFSHCD_NANO_AMP:
7227 curr_uA = curr_uA / 1000;
7228 break;
7229 case UFSHCD_MILI_AMP:
7230 curr_uA = curr_uA * 1000;
7231 break;
7232 case UFSHCD_AMP:
7233 curr_uA = curr_uA * 1000 * 1000;
7234 break;
7235 case UFSHCD_MICRO_AMP:
7236 default:
7237 break;
7238 }
7239 if (sup_curr_uA >= curr_uA)
7240 break;
7241 }
7242 if (i < 0) {
7243 i = 0;
7244 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7245 }
7246
7247 return (u32)i;
7248}
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7260 u8 *desc_buf, int len)
7261{
7262 u32 icc_level = 0;
7263
7264 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7265 !hba->vreg_info.vccq2) {
7266 dev_err(hba->dev,
7267 "%s: Regulator capability was not set, actvIccLevel=%d",
7268 __func__, icc_level);
7269 goto out;
7270 }
7271
7272 if (hba->vreg_info.vcc->max_uA)
7273 icc_level = ufshcd_get_max_icc_level(
7274 hba->vreg_info.vcc->max_uA,
7275 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7276 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7277
7278 if (hba->vreg_info.vccq->max_uA)
7279 icc_level = ufshcd_get_max_icc_level(
7280 hba->vreg_info.vccq->max_uA,
7281 icc_level,
7282 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7283
7284 if (hba->vreg_info.vccq2->max_uA)
7285 icc_level = ufshcd_get_max_icc_level(
7286 hba->vreg_info.vccq2->max_uA,
7287 icc_level,
7288 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7289out:
7290 return icc_level;
7291}
7292
7293static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7294{
7295 int ret;
7296 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7297 u8 *desc_buf;
7298 u32 icc_level;
7299
7300 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7301 if (!desc_buf)
7302 return;
7303
7304 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7305 desc_buf, buff_len);
7306 if (ret) {
7307 dev_err(hba->dev,
7308 "%s: Failed reading power descriptor.len = %d ret = %d",
7309 __func__, buff_len, ret);
7310 goto out;
7311 }
7312
7313 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7314 buff_len);
7315 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7316
7317 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7318 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7319
7320 if (ret)
7321 dev_err(hba->dev,
7322 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7323 __func__, icc_level, ret);
7324
7325out:
7326 kfree(desc_buf);
7327}
7328
7329static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7330{
7331 scsi_autopm_get_device(sdev);
7332 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7333 if (sdev->rpm_autosuspend)
7334 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7335 RPM_AUTOSUSPEND_DELAY_MS);
7336 scsi_autopm_put_device(sdev);
7337}
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7366{
7367 int ret = 0;
7368 struct scsi_device *sdev_boot;
7369
7370 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7371 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7372 if (IS_ERR(hba->sdev_ufs_device)) {
7373 ret = PTR_ERR(hba->sdev_ufs_device);
7374 hba->sdev_ufs_device = NULL;
7375 goto out;
7376 }
7377 scsi_device_put(hba->sdev_ufs_device);
7378
7379 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7380 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7381 if (IS_ERR(hba->sdev_rpmb)) {
7382 ret = PTR_ERR(hba->sdev_rpmb);
7383 goto remove_sdev_ufs_device;
7384 }
7385 ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7386 scsi_device_put(hba->sdev_rpmb);
7387
7388 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7389 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7390 if (IS_ERR(sdev_boot)) {
7391 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7392 } else {
7393 ufshcd_blk_pm_runtime_init(sdev_boot);
7394 scsi_device_put(sdev_boot);
7395 }
7396 goto out;
7397
7398remove_sdev_ufs_device:
7399 scsi_remove_device(hba->sdev_ufs_device);
7400out:
7401 return ret;
7402}
7403
7404static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7405{
7406 struct ufs_dev_info *dev_info = &hba->dev_info;
7407 u8 lun;
7408 u32 d_lu_wb_buf_alloc;
7409 u32 ext_ufs_feature;
7410
7411 if (!ufshcd_is_wb_allowed(hba))
7412 return;
7413
7414
7415
7416
7417
7418 if (!(dev_info->wspecversion >= 0x310 ||
7419 dev_info->wspecversion == 0x220 ||
7420 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7421 goto wb_disabled;
7422
7423 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7424 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7425 goto wb_disabled;
7426
7427 ext_ufs_feature = get_unaligned_be32(desc_buf +
7428 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7429
7430 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
7431 goto wb_disabled;
7432
7433
7434
7435
7436
7437
7438 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7439
7440 dev_info->b_presrv_uspc_en =
7441 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7442
7443 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
7444 if (!get_unaligned_be32(desc_buf +
7445 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
7446 goto wb_disabled;
7447 } else {
7448 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7449 d_lu_wb_buf_alloc = 0;
7450 ufshcd_read_unit_desc_param(hba,
7451 lun,
7452 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7453 (u8 *)&d_lu_wb_buf_alloc,
7454 sizeof(d_lu_wb_buf_alloc));
7455 if (d_lu_wb_buf_alloc) {
7456 dev_info->wb_dedicated_lu = lun;
7457 break;
7458 }
7459 }
7460
7461 if (!d_lu_wb_buf_alloc)
7462 goto wb_disabled;
7463 }
7464 return;
7465
7466wb_disabled:
7467 hba->caps &= ~UFSHCD_CAP_WB_EN;
7468}
7469
7470void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7471{
7472 struct ufs_dev_fix *f;
7473 struct ufs_dev_info *dev_info = &hba->dev_info;
7474
7475 if (!fixups)
7476 return;
7477
7478 for (f = fixups; f->quirk; f++) {
7479 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7480 f->wmanufacturerid == UFS_ANY_VENDOR) &&
7481 ((dev_info->model &&
7482 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7483 !strcmp(f->model, UFS_ANY_MODEL)))
7484 hba->dev_quirks |= f->quirk;
7485 }
7486}
7487EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7488
7489static void ufs_fixup_device_setup(struct ufs_hba *hba)
7490{
7491
7492 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7493
7494
7495 ufshcd_vops_fixup_dev_quirks(hba);
7496}
7497
7498static int ufs_get_device_desc(struct ufs_hba *hba)
7499{
7500 int err;
7501 u8 model_index;
7502 u8 *desc_buf;
7503 struct ufs_dev_info *dev_info = &hba->dev_info;
7504
7505 desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7506 if (!desc_buf) {
7507 err = -ENOMEM;
7508 goto out;
7509 }
7510
7511 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7512 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7513 if (err) {
7514 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7515 __func__, err);
7516 goto out;
7517 }
7518
7519
7520
7521
7522
7523 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7524 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7525
7526
7527 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7528 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7529
7530 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7531
7532 err = ufshcd_read_string_desc(hba, model_index,
7533 &dev_info->model, SD_ASCII_STD);
7534 if (err < 0) {
7535 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7536 __func__, err);
7537 goto out;
7538 }
7539
7540 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
7541 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
7542
7543 ufs_fixup_device_setup(hba);
7544
7545 ufshcd_wb_probe(hba, desc_buf);
7546
7547
7548
7549
7550
7551 err = 0;
7552
7553out:
7554 kfree(desc_buf);
7555 return err;
7556}
7557
7558static void ufs_put_device_desc(struct ufs_hba *hba)
7559{
7560 struct ufs_dev_info *dev_info = &hba->dev_info;
7561
7562 kfree(dev_info->model);
7563 dev_info->model = NULL;
7564}
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7578{
7579 int ret = 0;
7580 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7581
7582 ret = ufshcd_dme_peer_get(hba,
7583 UIC_ARG_MIB_SEL(
7584 RX_MIN_ACTIVATETIME_CAPABILITY,
7585 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7586 &peer_rx_min_activatetime);
7587 if (ret)
7588 goto out;
7589
7590
7591 tuned_pa_tactivate =
7592 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7593 / PA_TACTIVATE_TIME_UNIT_US);
7594 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7595 tuned_pa_tactivate);
7596
7597out:
7598 return ret;
7599}
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7613{
7614 int ret = 0;
7615 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7616 u32 max_hibern8_time, tuned_pa_hibern8time;
7617
7618 ret = ufshcd_dme_get(hba,
7619 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7620 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7621 &local_tx_hibern8_time_cap);
7622 if (ret)
7623 goto out;
7624
7625 ret = ufshcd_dme_peer_get(hba,
7626 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7627 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7628 &peer_rx_hibern8_time_cap);
7629 if (ret)
7630 goto out;
7631
7632 max_hibern8_time = max(local_tx_hibern8_time_cap,
7633 peer_rx_hibern8_time_cap);
7634
7635 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7636 / PA_HIBERN8_TIME_UNIT_US);
7637 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7638 tuned_pa_hibern8time);
7639out:
7640 return ret;
7641}
7642
7643
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653
7654static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7655{
7656 int ret = 0;
7657 u32 granularity, peer_granularity;
7658 u32 pa_tactivate, peer_pa_tactivate;
7659 u32 pa_tactivate_us, peer_pa_tactivate_us;
7660 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7661
7662 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7663 &granularity);
7664 if (ret)
7665 goto out;
7666
7667 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7668 &peer_granularity);
7669 if (ret)
7670 goto out;
7671
7672 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7673 (granularity > PA_GRANULARITY_MAX_VAL)) {
7674 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7675 __func__, granularity);
7676 return -EINVAL;
7677 }
7678
7679 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7680 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7681 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7682 __func__, peer_granularity);
7683 return -EINVAL;
7684 }
7685
7686 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7687 if (ret)
7688 goto out;
7689
7690 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7691 &peer_pa_tactivate);
7692 if (ret)
7693 goto out;
7694
7695 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7696 peer_pa_tactivate_us = peer_pa_tactivate *
7697 gran_to_us_table[peer_granularity - 1];
7698
7699 if (pa_tactivate_us > peer_pa_tactivate_us) {
7700 u32 new_peer_pa_tactivate;
7701
7702 new_peer_pa_tactivate = pa_tactivate_us /
7703 gran_to_us_table[peer_granularity - 1];
7704 new_peer_pa_tactivate++;
7705 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7706 new_peer_pa_tactivate);
7707 }
7708
7709out:
7710 return ret;
7711}
7712
7713static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7714{
7715 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7716 ufshcd_tune_pa_tactivate(hba);
7717 ufshcd_tune_pa_hibern8time(hba);
7718 }
7719
7720 ufshcd_vops_apply_dev_quirks(hba);
7721
7722 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7723
7724 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7725
7726 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7727 ufshcd_quirk_tune_host_pa_tactivate(hba);
7728}
7729
7730static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7731{
7732 hba->ufs_stats.hibern8_exit_cnt = 0;
7733 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7734 hba->req_abort_count = 0;
7735}
7736
7737static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7738{
7739 int err;
7740 size_t buff_len;
7741 u8 *desc_buf;
7742
7743 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7744 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7745 if (!desc_buf) {
7746 err = -ENOMEM;
7747 goto out;
7748 }
7749
7750 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7751 desc_buf, buff_len);
7752 if (err) {
7753 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7754 __func__, err);
7755 goto out;
7756 }
7757
7758 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7759 hba->dev_info.max_lu_supported = 32;
7760 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7761 hba->dev_info.max_lu_supported = 8;
7762
7763out:
7764 kfree(desc_buf);
7765 return err;
7766}
7767
7768static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7769 {19200000, REF_CLK_FREQ_19_2_MHZ},
7770 {26000000, REF_CLK_FREQ_26_MHZ},
7771 {38400000, REF_CLK_FREQ_38_4_MHZ},
7772 {52000000, REF_CLK_FREQ_52_MHZ},
7773 {0, REF_CLK_FREQ_INVAL},
7774};
7775
7776static enum ufs_ref_clk_freq
7777ufs_get_bref_clk_from_hz(unsigned long freq)
7778{
7779 int i;
7780
7781 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7782 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7783 return ufs_ref_clk_freqs[i].val;
7784
7785 return REF_CLK_FREQ_INVAL;
7786}
7787
7788void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7789{
7790 unsigned long freq;
7791
7792 freq = clk_get_rate(refclk);
7793
7794 hba->dev_ref_clk_freq =
7795 ufs_get_bref_clk_from_hz(freq);
7796
7797 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7798 dev_err(hba->dev,
7799 "invalid ref_clk setting = %ld\n", freq);
7800}
7801
7802static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7803{
7804 int err;
7805 u32 ref_clk;
7806 u32 freq = hba->dev_ref_clk_freq;
7807
7808 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7809 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7810
7811 if (err) {
7812 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7813 err);
7814 goto out;
7815 }
7816
7817 if (ref_clk == freq)
7818 goto out;
7819
7820 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7821 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7822
7823 if (err) {
7824 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7825 ufs_ref_clk_freqs[freq].freq_hz);
7826 goto out;
7827 }
7828
7829 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7830 ufs_ref_clk_freqs[freq].freq_hz);
7831
7832out:
7833 return err;
7834}
7835
7836static int ufshcd_device_params_init(struct ufs_hba *hba)
7837{
7838 bool flag;
7839 int ret, i;
7840
7841
7842 for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7843 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7844
7845
7846 ret = ufshcd_device_geo_params_init(hba);
7847 if (ret)
7848 goto out;
7849
7850
7851 ret = ufs_get_device_desc(hba);
7852 if (ret) {
7853 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7854 __func__, ret);
7855 goto out;
7856 }
7857
7858 ufshcd_get_ref_clk_gating_wait(hba);
7859
7860 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7861 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7862 hba->dev_info.f_power_on_wp_en = flag;
7863
7864
7865 if (ufshcd_get_max_pwr_mode(hba))
7866 dev_err(hba->dev,
7867 "%s: Failed getting max supported power mode\n",
7868 __func__);
7869out:
7870 return ret;
7871}
7872
7873
7874
7875
7876
7877static int ufshcd_add_lus(struct ufs_hba *hba)
7878{
7879 int ret;
7880
7881
7882 ret = ufshcd_scsi_add_wlus(hba);
7883 if (ret)
7884 goto out;
7885
7886 ufshcd_clear_ua_wluns(hba);
7887
7888
7889 if (ufshcd_is_clkscaling_supported(hba)) {
7890 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7891 &hba->pwr_info,
7892 sizeof(struct ufs_pa_layer_attr));
7893 hba->clk_scaling.saved_pwr_info.is_valid = true;
7894 hba->clk_scaling.is_allowed = true;
7895
7896 ret = ufshcd_devfreq_init(hba);
7897 if (ret)
7898 goto out;
7899
7900 hba->clk_scaling.is_enabled = true;
7901 ufshcd_init_clk_scaling_sysfs(hba);
7902 }
7903
7904 ufs_bsg_probe(hba);
7905 scsi_scan_host(hba->host);
7906 pm_runtime_put_sync(hba->dev);
7907
7908out:
7909 return ret;
7910}
7911
7912static int
7913ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7914
7915static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7916{
7917 struct scsi_device *sdp;
7918 unsigned long flags;
7919 int ret = 0;
7920
7921 spin_lock_irqsave(hba->host->host_lock, flags);
7922 if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
7923 sdp = hba->sdev_ufs_device;
7924 else if (wlun == UFS_UPIU_RPMB_WLUN)
7925 sdp = hba->sdev_rpmb;
7926 else
7927 BUG();
7928 if (sdp) {
7929 ret = scsi_device_get(sdp);
7930 if (!ret && !scsi_device_online(sdp)) {
7931 ret = -ENODEV;
7932 scsi_device_put(sdp);
7933 }
7934 } else {
7935 ret = -ENODEV;
7936 }
7937 spin_unlock_irqrestore(hba->host->host_lock, flags);
7938 if (ret)
7939 goto out_err;
7940
7941 ret = ufshcd_send_request_sense(hba, sdp);
7942 scsi_device_put(sdp);
7943out_err:
7944 if (ret)
7945 dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7946 __func__, wlun, ret);
7947 return ret;
7948}
7949
7950static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7951{
7952 int ret = 0;
7953
7954 if (!hba->wlun_dev_clr_ua)
7955 goto out;
7956
7957 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7958 if (!ret)
7959 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7960 if (!ret)
7961 hba->wlun_dev_clr_ua = false;
7962out:
7963 if (ret)
7964 dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7965 __func__, ret);
7966 return ret;
7967}
7968
7969
7970
7971
7972
7973
7974
7975
7976static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7977{
7978 int ret;
7979 unsigned long flags;
7980 ktime_t start = ktime_get();
7981
7982 hba->ufshcd_state = UFSHCD_STATE_RESET;
7983
7984 ret = ufshcd_link_startup(hba);
7985 if (ret)
7986 goto out;
7987
7988
7989 ufshcd_clear_dbg_ufs_stats(hba);
7990
7991
7992 ufshcd_set_link_active(hba);
7993
7994
7995 ret = ufshcd_verify_dev_init(hba);
7996 if (ret)
7997 goto out;
7998
7999
8000 ret = ufshcd_complete_dev_init(hba);
8001 if (ret)
8002 goto out;
8003
8004
8005
8006
8007
8008 if (async) {
8009 ret = ufshcd_device_params_init(hba);
8010 if (ret)
8011 goto out;
8012 }
8013
8014 ufshcd_tune_unipro_params(hba);
8015
8016
8017 ufshcd_set_ufs_dev_active(hba);
8018 ufshcd_force_reset_auto_bkops(hba);
8019 hba->wlun_dev_clr_ua = true;
8020 hba->wlun_rpmb_clr_ua = true;
8021
8022
8023 if (hba->max_pwr_info.is_valid) {
8024
8025
8026
8027
8028 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8029 ufshcd_set_dev_ref_clk(hba);
8030 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8031 if (ret) {
8032 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8033 __func__, ret);
8034 goto out;
8035 }
8036 ufshcd_print_pwr_info(hba);
8037 }
8038
8039
8040
8041
8042
8043
8044
8045 ufshcd_set_active_icc_lvl(hba);
8046
8047 ufshcd_wb_config(hba);
8048 if (hba->ee_usr_mask)
8049 ufshcd_write_ee_control(hba);
8050
8051 ufshcd_auto_hibern8_enable(hba);
8052
8053out:
8054 spin_lock_irqsave(hba->host->host_lock, flags);
8055 if (ret)
8056 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8057 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8058 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8059 spin_unlock_irqrestore(hba->host->host_lock, flags);
8060
8061 trace_ufshcd_init(dev_name(hba->dev), ret,
8062 ktime_to_us(ktime_sub(ktime_get(), start)),
8063 hba->curr_dev_pwr_mode, hba->uic_link_state);
8064 return ret;
8065}
8066
8067
8068
8069
8070
8071
8072static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8073{
8074 struct ufs_hba *hba = (struct ufs_hba *)data;
8075 int ret;
8076
8077 down(&hba->host_sem);
8078
8079 ret = ufshcd_probe_hba(hba, true);
8080 up(&hba->host_sem);
8081 if (ret)
8082 goto out;
8083
8084
8085 ret = ufshcd_add_lus(hba);
8086out:
8087
8088
8089
8090
8091 if (ret) {
8092 pm_runtime_put_sync(hba->dev);
8093 ufshcd_hba_exit(hba);
8094 }
8095}
8096
8097static const struct attribute_group *ufshcd_driver_groups[] = {
8098 &ufs_sysfs_unit_descriptor_group,
8099 &ufs_sysfs_lun_attributes_group,
8100 NULL,
8101};
8102
8103static struct ufs_hba_variant_params ufs_hba_vps = {
8104 .hba_enable_delay_us = 1000,
8105 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
8106 .devfreq_profile.polling_ms = 100,
8107 .devfreq_profile.target = ufshcd_devfreq_target,
8108 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8109 .ondemand_data.upthreshold = 70,
8110 .ondemand_data.downdifferential = 5,
8111};
8112
8113static struct scsi_host_template ufshcd_driver_template = {
8114 .module = THIS_MODULE,
8115 .name = UFSHCD,
8116 .proc_name = UFSHCD,
8117 .queuecommand = ufshcd_queuecommand,
8118 .slave_alloc = ufshcd_slave_alloc,
8119 .slave_configure = ufshcd_slave_configure,
8120 .slave_destroy = ufshcd_slave_destroy,
8121 .change_queue_depth = ufshcd_change_queue_depth,
8122 .eh_abort_handler = ufshcd_abort,
8123 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8124 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
8125 .this_id = -1,
8126 .sg_tablesize = SG_ALL,
8127 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8128 .can_queue = UFSHCD_CAN_QUEUE,
8129 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
8130 .max_host_blocked = 1,
8131 .track_queue_depth = 1,
8132 .sdev_groups = ufshcd_driver_groups,
8133 .dma_boundary = PAGE_SIZE - 1,
8134 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
8135};
8136
8137static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8138 int ua)
8139{
8140 int ret;
8141
8142 if (!vreg)
8143 return 0;
8144
8145
8146
8147
8148
8149
8150
8151 if (!vreg->max_uA)
8152 return 0;
8153
8154 ret = regulator_set_load(vreg->reg, ua);
8155 if (ret < 0) {
8156 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8157 __func__, vreg->name, ua, ret);
8158 }
8159
8160 return ret;
8161}
8162
8163static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8164 struct ufs_vreg *vreg)
8165{
8166 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8167}
8168
8169static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8170 struct ufs_vreg *vreg)
8171{
8172 if (!vreg)
8173 return 0;
8174
8175 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8176}
8177
8178static int ufshcd_config_vreg(struct device *dev,
8179 struct ufs_vreg *vreg, bool on)
8180{
8181 int ret = 0;
8182 struct regulator *reg;
8183 const char *name;
8184 int min_uV, uA_load;
8185
8186 BUG_ON(!vreg);
8187
8188 reg = vreg->reg;
8189 name = vreg->name;
8190
8191 if (regulator_count_voltages(reg) > 0) {
8192 uA_load = on ? vreg->max_uA : 0;
8193 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8194 if (ret)
8195 goto out;
8196
8197 if (vreg->min_uV && vreg->max_uV) {
8198 min_uV = on ? vreg->min_uV : 0;
8199 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8200 if (ret)
8201 dev_err(dev,
8202 "%s: %s set voltage failed, err=%d\n",
8203 __func__, name, ret);
8204 }
8205 }
8206out:
8207 return ret;
8208}
8209
8210static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8211{
8212 int ret = 0;
8213
8214 if (!vreg || vreg->enabled)
8215 goto out;
8216
8217 ret = ufshcd_config_vreg(dev, vreg, true);
8218 if (!ret)
8219 ret = regulator_enable(vreg->reg);
8220
8221 if (!ret)
8222 vreg->enabled = true;
8223 else
8224 dev_err(dev, "%s: %s enable failed, err=%d\n",
8225 __func__, vreg->name, ret);
8226out:
8227 return ret;
8228}
8229
8230static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8231{
8232 int ret = 0;
8233
8234 if (!vreg || !vreg->enabled || vreg->always_on)
8235 goto out;
8236
8237 ret = regulator_disable(vreg->reg);
8238
8239 if (!ret) {
8240
8241 ufshcd_config_vreg(dev, vreg, false);
8242 vreg->enabled = false;
8243 } else {
8244 dev_err(dev, "%s: %s disable failed, err=%d\n",
8245 __func__, vreg->name, ret);
8246 }
8247out:
8248 return ret;
8249}
8250
8251static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8252{
8253 int ret = 0;
8254 struct device *dev = hba->dev;
8255 struct ufs_vreg_info *info = &hba->vreg_info;
8256
8257 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8258 if (ret)
8259 goto out;
8260
8261 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8262 if (ret)
8263 goto out;
8264
8265 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8266
8267out:
8268 if (ret) {
8269 ufshcd_toggle_vreg(dev, info->vccq2, false);
8270 ufshcd_toggle_vreg(dev, info->vccq, false);
8271 ufshcd_toggle_vreg(dev, info->vcc, false);
8272 }
8273 return ret;
8274}
8275
8276static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8277{
8278 struct ufs_vreg_info *info = &hba->vreg_info;
8279
8280 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8281}
8282
8283static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8284{
8285 int ret = 0;
8286
8287 if (!vreg)
8288 goto out;
8289
8290 vreg->reg = devm_regulator_get(dev, vreg->name);
8291 if (IS_ERR(vreg->reg)) {
8292 ret = PTR_ERR(vreg->reg);
8293 dev_err(dev, "%s: %s get failed, err=%d\n",
8294 __func__, vreg->name, ret);
8295 }
8296out:
8297 return ret;
8298}
8299
8300static int ufshcd_init_vreg(struct ufs_hba *hba)
8301{
8302 int ret = 0;
8303 struct device *dev = hba->dev;
8304 struct ufs_vreg_info *info = &hba->vreg_info;
8305
8306 ret = ufshcd_get_vreg(dev, info->vcc);
8307 if (ret)
8308 goto out;
8309
8310 ret = ufshcd_get_vreg(dev, info->vccq);
8311 if (!ret)
8312 ret = ufshcd_get_vreg(dev, info->vccq2);
8313out:
8314 return ret;
8315}
8316
8317static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8318{
8319 struct ufs_vreg_info *info = &hba->vreg_info;
8320
8321 if (info)
8322 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8323
8324 return 0;
8325}
8326
8327static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8328{
8329 int ret = 0;
8330 struct ufs_clk_info *clki;
8331 struct list_head *head = &hba->clk_list_head;
8332 unsigned long flags;
8333 ktime_t start = ktime_get();
8334 bool clk_state_changed = false;
8335
8336 if (list_empty(head))
8337 goto out;
8338
8339 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8340 if (ret)
8341 return ret;
8342
8343 list_for_each_entry(clki, head, list) {
8344 if (!IS_ERR_OR_NULL(clki->clk)) {
8345
8346
8347
8348
8349 if (ufshcd_is_link_active(hba) &&
8350 clki->keep_link_active)
8351 continue;
8352
8353 clk_state_changed = on ^ clki->enabled;
8354 if (on && !clki->enabled) {
8355 ret = clk_prepare_enable(clki->clk);
8356 if (ret) {
8357 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8358 __func__, clki->name, ret);
8359 goto out;
8360 }
8361 } else if (!on && clki->enabled) {
8362 clk_disable_unprepare(clki->clk);
8363 }
8364 clki->enabled = on;
8365 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8366 clki->name, on ? "en" : "dis");
8367 }
8368 }
8369
8370 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8371 if (ret)
8372 return ret;
8373
8374out:
8375 if (ret) {
8376 list_for_each_entry(clki, head, list) {
8377 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8378 clk_disable_unprepare(clki->clk);
8379 }
8380 } else if (!ret && on) {
8381 spin_lock_irqsave(hba->host->host_lock, flags);
8382 hba->clk_gating.state = CLKS_ON;
8383 trace_ufshcd_clk_gating(dev_name(hba->dev),
8384 hba->clk_gating.state);
8385 spin_unlock_irqrestore(hba->host->host_lock, flags);
8386 }
8387
8388 if (clk_state_changed)
8389 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8390 (on ? "on" : "off"),
8391 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8392 return ret;
8393}
8394
8395static int ufshcd_init_clocks(struct ufs_hba *hba)
8396{
8397 int ret = 0;
8398 struct ufs_clk_info *clki;
8399 struct device *dev = hba->dev;
8400 struct list_head *head = &hba->clk_list_head;
8401
8402 if (list_empty(head))
8403 goto out;
8404
8405 list_for_each_entry(clki, head, list) {
8406 if (!clki->name)
8407 continue;
8408
8409 clki->clk = devm_clk_get(dev, clki->name);
8410 if (IS_ERR(clki->clk)) {
8411 ret = PTR_ERR(clki->clk);
8412 dev_err(dev, "%s: %s clk get failed, %d\n",
8413 __func__, clki->name, ret);
8414 goto out;
8415 }
8416
8417
8418
8419
8420
8421
8422 if (!strcmp(clki->name, "ref_clk"))
8423 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8424
8425 if (clki->max_freq) {
8426 ret = clk_set_rate(clki->clk, clki->max_freq);
8427 if (ret) {
8428 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8429 __func__, clki->name,
8430 clki->max_freq, ret);
8431 goto out;
8432 }
8433 clki->curr_freq = clki->max_freq;
8434 }
8435 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8436 clki->name, clk_get_rate(clki->clk));
8437 }
8438out:
8439 return ret;
8440}
8441
8442static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8443{
8444 int err = 0;
8445
8446 if (!hba->vops)
8447 goto out;
8448
8449 err = ufshcd_vops_init(hba);
8450 if (err)
8451 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8452 __func__, ufshcd_get_var_name(hba), err);
8453out:
8454 return err;
8455}
8456
8457static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8458{
8459 if (!hba->vops)
8460 return;
8461
8462 ufshcd_vops_exit(hba);
8463}
8464
8465static int ufshcd_hba_init(struct ufs_hba *hba)
8466{
8467 int err;
8468
8469
8470
8471
8472
8473
8474
8475
8476 err = ufshcd_init_hba_vreg(hba);
8477 if (err)
8478 goto out;
8479
8480 err = ufshcd_setup_hba_vreg(hba, true);
8481 if (err)
8482 goto out;
8483
8484 err = ufshcd_init_clocks(hba);
8485 if (err)
8486 goto out_disable_hba_vreg;
8487
8488 err = ufshcd_setup_clocks(hba, true);
8489 if (err)
8490 goto out_disable_hba_vreg;
8491
8492 err = ufshcd_init_vreg(hba);
8493 if (err)
8494 goto out_disable_clks;
8495
8496 err = ufshcd_setup_vreg(hba, true);
8497 if (err)
8498 goto out_disable_clks;
8499
8500 err = ufshcd_variant_hba_init(hba);
8501 if (err)
8502 goto out_disable_vreg;
8503
8504 ufs_debugfs_hba_init(hba);
8505
8506 hba->is_powered = true;
8507 goto out;
8508
8509out_disable_vreg:
8510 ufshcd_setup_vreg(hba, false);
8511out_disable_clks:
8512 ufshcd_setup_clocks(hba, false);
8513out_disable_hba_vreg:
8514 ufshcd_setup_hba_vreg(hba, false);
8515out:
8516 return err;
8517}
8518
8519static void ufshcd_hba_exit(struct ufs_hba *hba)
8520{
8521 if (hba->is_powered) {
8522 ufshcd_exit_clk_scaling(hba);
8523 ufshcd_exit_clk_gating(hba);
8524 if (hba->eh_wq)
8525 destroy_workqueue(hba->eh_wq);
8526 ufs_debugfs_hba_exit(hba);
8527 ufshcd_variant_hba_exit(hba);
8528 ufshcd_setup_vreg(hba, false);
8529 ufshcd_setup_clocks(hba, false);
8530 ufshcd_setup_hba_vreg(hba, false);
8531 hba->is_powered = false;
8532 ufs_put_device_desc(hba);
8533 }
8534}
8535
8536static int
8537ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8538{
8539 unsigned char cmd[6] = {REQUEST_SENSE,
8540 0,
8541 0,
8542 0,
8543 UFS_SENSE_SIZE,
8544 0};
8545 char *buffer;
8546 int ret;
8547
8548 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8549 if (!buffer) {
8550 ret = -ENOMEM;
8551 goto out;
8552 }
8553
8554 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8555 UFS_SENSE_SIZE, NULL, NULL,
8556 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8557 if (ret)
8558 pr_err("%s: failed with err %d\n", __func__, ret);
8559
8560 kfree(buffer);
8561out:
8562 return ret;
8563}
8564
8565
8566
8567
8568
8569
8570
8571
8572
8573
8574static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8575 enum ufs_dev_pwr_mode pwr_mode)
8576{
8577 unsigned char cmd[6] = { START_STOP };
8578 struct scsi_sense_hdr sshdr;
8579 struct scsi_device *sdp;
8580 unsigned long flags;
8581 int ret;
8582
8583 spin_lock_irqsave(hba->host->host_lock, flags);
8584 sdp = hba->sdev_ufs_device;
8585 if (sdp) {
8586 ret = scsi_device_get(sdp);
8587 if (!ret && !scsi_device_online(sdp)) {
8588 ret = -ENODEV;
8589 scsi_device_put(sdp);
8590 }
8591 } else {
8592 ret = -ENODEV;
8593 }
8594 spin_unlock_irqrestore(hba->host->host_lock, flags);
8595
8596 if (ret)
8597 return ret;
8598
8599
8600
8601
8602
8603
8604
8605 hba->host->eh_noresume = 1;
8606 if (hba->wlun_dev_clr_ua)
8607 ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
8608
8609 cmd[4] = pwr_mode << 4;
8610
8611
8612
8613
8614
8615
8616 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8617 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8618 if (ret) {
8619 sdev_printk(KERN_WARNING, sdp,
8620 "START_STOP failed for power mode: %d, result %x\n",
8621 pwr_mode, ret);
8622 if (ret > 0 && scsi_sense_valid(&sshdr))
8623 scsi_print_sense_hdr(sdp, NULL, &sshdr);
8624 }
8625
8626 if (!ret)
8627 hba->curr_dev_pwr_mode = pwr_mode;
8628
8629 scsi_device_put(sdp);
8630 hba->host->eh_noresume = 0;
8631 return ret;
8632}
8633
8634static int ufshcd_link_state_transition(struct ufs_hba *hba,
8635 enum uic_link_state req_link_state,
8636 int check_for_bkops)
8637{
8638 int ret = 0;
8639
8640 if (req_link_state == hba->uic_link_state)
8641 return 0;
8642
8643 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8644 ret = ufshcd_uic_hibern8_enter(hba);
8645 if (!ret) {
8646 ufshcd_set_link_hibern8(hba);
8647 } else {
8648 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8649 __func__, ret);
8650 goto out;
8651 }
8652 }
8653
8654
8655
8656
8657
8658 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8659 (!check_for_bkops || !hba->auto_bkops_enabled)) {
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670 ret = ufshcd_uic_hibern8_enter(hba);
8671 if (ret) {
8672 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8673 __func__, ret);
8674 goto out;
8675 }
8676
8677
8678
8679
8680 ufshcd_hba_stop(hba);
8681
8682
8683
8684
8685 ufshcd_set_link_off(hba);
8686 }
8687
8688out:
8689 return ret;
8690}
8691
8692static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8693{
8694 bool vcc_off = false;
8695
8696
8697
8698
8699
8700
8701
8702 if (!ufshcd_is_link_active(hba) &&
8703 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8704 usleep_range(2000, 2100);
8705
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718
8719
8720
8721 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8722 !hba->dev_info.is_lu_power_on_wp) {
8723 ufshcd_setup_vreg(hba, false);
8724 vcc_off = true;
8725 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8726 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8727 vcc_off = true;
8728 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
8729 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8730 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8731 }
8732 }
8733
8734
8735
8736
8737 if (vcc_off && hba->vreg_info.vcc &&
8738 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8739 usleep_range(5000, 5100);
8740}
8741
8742static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8743{
8744 int ret = 0;
8745
8746 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8747 !hba->dev_info.is_lu_power_on_wp) {
8748 ret = ufshcd_setup_vreg(hba, true);
8749 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8750 if (!ufshcd_is_link_active(hba)) {
8751 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8752 if (ret)
8753 goto vcc_disable;
8754 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8755 if (ret)
8756 goto vccq_lpm;
8757 }
8758 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8759 }
8760 goto out;
8761
8762vccq_lpm:
8763 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8764vcc_disable:
8765 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8766out:
8767 return ret;
8768}
8769
8770static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8771{
8772 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8773 ufshcd_setup_hba_vreg(hba, false);
8774}
8775
8776static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8777{
8778 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8779 ufshcd_setup_hba_vreg(hba, true);
8780}
8781
8782static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8783{
8784 int ret = 0;
8785 int check_for_bkops;
8786 enum ufs_pm_level pm_lvl;
8787 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8788 enum uic_link_state req_link_state;
8789
8790 hba->pm_op_in_progress = true;
8791 if (pm_op != UFS_SHUTDOWN_PM) {
8792 pm_lvl = pm_op == UFS_RUNTIME_PM ?
8793 hba->rpm_lvl : hba->spm_lvl;
8794 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8795 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8796 } else {
8797 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8798 req_link_state = UIC_LINK_OFF_STATE;
8799 }
8800
8801
8802
8803
8804
8805 ufshcd_hold(hba, false);
8806 hba->clk_gating.is_suspended = true;
8807
8808 if (ufshcd_is_clkscaling_supported(hba))
8809 ufshcd_clk_scaling_suspend(hba, true);
8810
8811 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8812 req_link_state == UIC_LINK_ACTIVE_STATE) {
8813 goto vops_suspend;
8814 }
8815
8816 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8817 (req_link_state == hba->uic_link_state))
8818 goto enable_scaling;
8819
8820
8821 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8822 ret = -EINVAL;
8823 goto enable_scaling;
8824 }
8825
8826 if (pm_op == UFS_RUNTIME_PM) {
8827 if (ufshcd_can_autobkops_during_suspend(hba)) {
8828
8829
8830
8831
8832
8833 ret = ufshcd_urgent_bkops(hba);
8834 if (ret)
8835 goto enable_scaling;
8836 } else {
8837
8838 ufshcd_disable_auto_bkops(hba);
8839 }
8840
8841
8842
8843
8844
8845 hba->dev_info.b_rpm_dev_flush_capable =
8846 hba->auto_bkops_enabled ||
8847 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8848 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8849 ufshcd_is_auto_hibern8_enabled(hba))) &&
8850 ufshcd_wb_need_flush(hba));
8851 }
8852
8853 flush_work(&hba->eeh_work);
8854
8855 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8856 if (pm_op != UFS_RUNTIME_PM)
8857
8858 ufshcd_disable_auto_bkops(hba);
8859
8860 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8861 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8862 if (ret)
8863 goto enable_scaling;
8864 }
8865 }
8866
8867
8868
8869
8870
8871 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8872 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
8873 if (ret)
8874 goto set_dev_active;
8875
8876vops_suspend:
8877
8878
8879
8880
8881
8882 ret = ufshcd_vops_suspend(hba, pm_op);
8883 if (ret)
8884 goto set_link_active;
8885 goto out;
8886
8887set_link_active:
8888
8889
8890
8891
8892
8893 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8894 ufshcd_device_reset(hba);
8895 WARN_ON(!ufshcd_is_link_off(hba));
8896 }
8897 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8898 ufshcd_set_link_active(hba);
8899 else if (ufshcd_is_link_off(hba))
8900 ufshcd_host_reset_and_restore(hba);
8901set_dev_active:
8902
8903 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8904 ufshcd_device_reset(hba);
8905 ufshcd_host_reset_and_restore(hba);
8906 }
8907 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8908 ufshcd_disable_auto_bkops(hba);
8909enable_scaling:
8910 if (ufshcd_is_clkscaling_supported(hba))
8911 ufshcd_clk_scaling_suspend(hba, false);
8912
8913 hba->dev_info.b_rpm_dev_flush_capable = false;
8914out:
8915 if (hba->dev_info.b_rpm_dev_flush_capable) {
8916 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8917 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8918 }
8919
8920 if (ret) {
8921 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
8922 hba->clk_gating.is_suspended = false;
8923 ufshcd_release(hba);
8924 }
8925 hba->pm_op_in_progress = false;
8926 return ret;
8927}
8928
8929#ifdef CONFIG_PM
8930static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8931{
8932 int ret;
8933 enum uic_link_state old_link_state = hba->uic_link_state;
8934
8935 hba->pm_op_in_progress = true;
8936
8937
8938
8939
8940
8941
8942 ret = ufshcd_vops_resume(hba, pm_op);
8943 if (ret)
8944 goto out;
8945
8946
8947 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8948
8949 if (ufshcd_is_link_hibern8(hba)) {
8950 ret = ufshcd_uic_hibern8_exit(hba);
8951 if (!ret) {
8952 ufshcd_set_link_active(hba);
8953 } else {
8954 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8955 __func__, ret);
8956 goto vendor_suspend;
8957 }
8958 } else if (ufshcd_is_link_off(hba)) {
8959
8960
8961
8962
8963
8964
8965 ret = ufshcd_reset_and_restore(hba);
8966
8967
8968
8969
8970 if (ret || !ufshcd_is_link_active(hba))
8971 goto vendor_suspend;
8972 }
8973
8974 if (!ufshcd_is_ufs_dev_active(hba)) {
8975 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8976 if (ret)
8977 goto set_old_link_state;
8978 }
8979
8980 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8981 ufshcd_enable_auto_bkops(hba);
8982 else
8983
8984
8985
8986
8987 ufshcd_urgent_bkops(hba);
8988
8989 if (hba->ee_usr_mask)
8990 ufshcd_write_ee_control(hba);
8991
8992 if (ufshcd_is_clkscaling_supported(hba))
8993 ufshcd_clk_scaling_suspend(hba, false);
8994
8995 if (hba->dev_info.b_rpm_dev_flush_capable) {
8996 hba->dev_info.b_rpm_dev_flush_capable = false;
8997 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8998 }
8999
9000
9001 ufshcd_auto_hibern8_enable(hba);
9002 goto out;
9003
9004set_old_link_state:
9005 ufshcd_link_state_transition(hba, old_link_state, 0);
9006vendor_suspend:
9007 ufshcd_vops_suspend(hba, pm_op);
9008out:
9009 if (ret)
9010 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9011 hba->clk_gating.is_suspended = false;
9012 ufshcd_release(hba);
9013 hba->pm_op_in_progress = false;
9014 return ret;
9015}
9016
9017static int ufshcd_wl_runtime_suspend(struct device *dev)
9018{
9019 struct scsi_device *sdev = to_scsi_device(dev);
9020 struct ufs_hba *hba;
9021 int ret;
9022 ktime_t start = ktime_get();
9023
9024 hba = shost_priv(sdev->host);
9025
9026 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9027 if (ret)
9028 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9029
9030 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9031 ktime_to_us(ktime_sub(ktime_get(), start)),
9032 hba->curr_dev_pwr_mode, hba->uic_link_state);
9033
9034 return ret;
9035}
9036
9037static int ufshcd_wl_runtime_resume(struct device *dev)
9038{
9039 struct scsi_device *sdev = to_scsi_device(dev);
9040 struct ufs_hba *hba;
9041 int ret = 0;
9042 ktime_t start = ktime_get();
9043
9044 hba = shost_priv(sdev->host);
9045
9046 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9047 if (ret)
9048 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9049
9050 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9051 ktime_to_us(ktime_sub(ktime_get(), start)),
9052 hba->curr_dev_pwr_mode, hba->uic_link_state);
9053
9054 return ret;
9055}
9056#endif
9057
9058#ifdef CONFIG_PM_SLEEP
9059static int ufshcd_wl_suspend(struct device *dev)
9060{
9061 struct scsi_device *sdev = to_scsi_device(dev);
9062 struct ufs_hba *hba;
9063 int ret = 0;
9064 ktime_t start = ktime_get();
9065
9066 hba = shost_priv(sdev->host);
9067 down(&hba->host_sem);
9068
9069 if (pm_runtime_suspended(dev))
9070 goto out;
9071
9072 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9073 if (ret) {
9074 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9075 up(&hba->host_sem);
9076 }
9077
9078out:
9079 if (!ret)
9080 hba->is_sys_suspended = true;
9081 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9082 ktime_to_us(ktime_sub(ktime_get(), start)),
9083 hba->curr_dev_pwr_mode, hba->uic_link_state);
9084
9085 return ret;
9086}
9087
9088static int ufshcd_wl_resume(struct device *dev)
9089{
9090 struct scsi_device *sdev = to_scsi_device(dev);
9091 struct ufs_hba *hba;
9092 int ret = 0;
9093 ktime_t start = ktime_get();
9094
9095 hba = shost_priv(sdev->host);
9096
9097 if (pm_runtime_suspended(dev))
9098 goto out;
9099
9100 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9101 if (ret)
9102 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9103out:
9104 trace_ufshcd_wl_resume(dev_name(dev), ret,
9105 ktime_to_us(ktime_sub(ktime_get(), start)),
9106 hba->curr_dev_pwr_mode, hba->uic_link_state);
9107 if (!ret)
9108 hba->is_sys_suspended = false;
9109 up(&hba->host_sem);
9110 return ret;
9111}
9112#endif
9113
9114static void ufshcd_wl_shutdown(struct device *dev)
9115{
9116 struct scsi_device *sdev = to_scsi_device(dev);
9117 struct ufs_hba *hba;
9118
9119 hba = shost_priv(sdev->host);
9120
9121 down(&hba->host_sem);
9122 hba->shutting_down = true;
9123 up(&hba->host_sem);
9124
9125
9126 ufshcd_rpm_get_sync(hba);
9127 scsi_device_quiesce(sdev);
9128 shost_for_each_device(sdev, hba->host) {
9129 if (sdev == hba->sdev_ufs_device)
9130 continue;
9131 scsi_device_quiesce(sdev);
9132 }
9133 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9134}
9135
9136
9137
9138
9139
9140
9141
9142
9143static int ufshcd_suspend(struct ufs_hba *hba)
9144{
9145 int ret;
9146
9147 if (!hba->is_powered)
9148 return 0;
9149
9150
9151
9152
9153 ufshcd_disable_irq(hba);
9154 ret = ufshcd_setup_clocks(hba, false);
9155 if (ret) {
9156 ufshcd_enable_irq(hba);
9157 return ret;
9158 }
9159 if (ufshcd_is_clkgating_allowed(hba)) {
9160 hba->clk_gating.state = CLKS_OFF;
9161 trace_ufshcd_clk_gating(dev_name(hba->dev),
9162 hba->clk_gating.state);
9163 }
9164
9165 ufshcd_vreg_set_lpm(hba);
9166
9167 ufshcd_hba_vreg_set_lpm(hba);
9168 return ret;
9169}
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180static int ufshcd_resume(struct ufs_hba *hba)
9181{
9182 int ret;
9183
9184 if (!hba->is_powered)
9185 return 0;
9186
9187 ufshcd_hba_vreg_set_hpm(hba);
9188 ret = ufshcd_vreg_set_hpm(hba);
9189 if (ret)
9190 goto out;
9191
9192
9193 ret = ufshcd_setup_clocks(hba, true);
9194 if (ret)
9195 goto disable_vreg;
9196
9197
9198 ufshcd_enable_irq(hba);
9199 goto out;
9200
9201disable_vreg:
9202 ufshcd_vreg_set_lpm(hba);
9203out:
9204 if (ret)
9205 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
9206 return ret;
9207}
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217int ufshcd_system_suspend(struct ufs_hba *hba)
9218{
9219 int ret = 0;
9220 ktime_t start = ktime_get();
9221
9222 if (pm_runtime_suspended(hba->dev))
9223 goto out;
9224
9225 ret = ufshcd_suspend(hba);
9226out:
9227 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9228 ktime_to_us(ktime_sub(ktime_get(), start)),
9229 hba->curr_dev_pwr_mode, hba->uic_link_state);
9230 return ret;
9231}
9232EXPORT_SYMBOL(ufshcd_system_suspend);
9233
9234
9235
9236
9237
9238
9239
9240
9241int ufshcd_system_resume(struct ufs_hba *hba)
9242{
9243 int ret = 0;
9244 ktime_t start = ktime_get();
9245
9246 if (pm_runtime_suspended(hba->dev))
9247 goto out;
9248
9249 ret = ufshcd_resume(hba);
9250
9251out:
9252 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9253 ktime_to_us(ktime_sub(ktime_get(), start)),
9254 hba->curr_dev_pwr_mode, hba->uic_link_state);
9255
9256 return ret;
9257}
9258EXPORT_SYMBOL(ufshcd_system_resume);
9259
9260
9261
9262
9263
9264
9265
9266
9267
9268int ufshcd_runtime_suspend(struct ufs_hba *hba)
9269{
9270 int ret;
9271 ktime_t start = ktime_get();
9272
9273 ret = ufshcd_suspend(hba);
9274
9275 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9276 ktime_to_us(ktime_sub(ktime_get(), start)),
9277 hba->curr_dev_pwr_mode, hba->uic_link_state);
9278 return ret;
9279}
9280EXPORT_SYMBOL(ufshcd_runtime_suspend);
9281
9282
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292int ufshcd_runtime_resume(struct ufs_hba *hba)
9293{
9294 int ret;
9295 ktime_t start = ktime_get();
9296
9297 ret = ufshcd_resume(hba);
9298
9299 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9300 ktime_to_us(ktime_sub(ktime_get(), start)),
9301 hba->curr_dev_pwr_mode, hba->uic_link_state);
9302 return ret;
9303}
9304EXPORT_SYMBOL(ufshcd_runtime_resume);
9305
9306int ufshcd_runtime_idle(struct ufs_hba *hba)
9307{
9308 return 0;
9309}
9310EXPORT_SYMBOL(ufshcd_runtime_idle);
9311
9312
9313
9314
9315
9316
9317
9318
9319
9320
9321int ufshcd_shutdown(struct ufs_hba *hba)
9322{
9323 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9324 goto out;
9325
9326 pm_runtime_get_sync(hba->dev);
9327
9328 ufshcd_suspend(hba);
9329out:
9330 hba->is_powered = false;
9331
9332 return 0;
9333}
9334EXPORT_SYMBOL(ufshcd_shutdown);
9335
9336
9337
9338
9339
9340
9341void ufshcd_remove(struct ufs_hba *hba)
9342{
9343 if (hba->sdev_ufs_device)
9344 ufshcd_rpm_get_sync(hba);
9345 ufs_bsg_remove(hba);
9346 ufs_sysfs_remove_nodes(hba->dev);
9347 blk_cleanup_queue(hba->tmf_queue);
9348 blk_mq_free_tag_set(&hba->tmf_tag_set);
9349 blk_cleanup_queue(hba->cmd_queue);
9350 scsi_remove_host(hba->host);
9351
9352 ufshcd_disable_intr(hba, hba->intr_mask);
9353 ufshcd_hba_stop(hba);
9354 ufshcd_hba_exit(hba);
9355}
9356EXPORT_SYMBOL_GPL(ufshcd_remove);
9357
9358
9359
9360
9361
9362void ufshcd_dealloc_host(struct ufs_hba *hba)
9363{
9364 scsi_host_put(hba->host);
9365}
9366EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9367
9368
9369
9370
9371
9372
9373
9374
9375static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9376{
9377 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9378 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9379 return 0;
9380 }
9381 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9382}
9383
9384
9385
9386
9387
9388
9389
9390int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9391{
9392 struct Scsi_Host *host;
9393 struct ufs_hba *hba;
9394 int err = 0;
9395
9396 if (!dev) {
9397 dev_err(dev,
9398 "Invalid memory reference for dev is NULL\n");
9399 err = -ENODEV;
9400 goto out_error;
9401 }
9402
9403 host = scsi_host_alloc(&ufshcd_driver_template,
9404 sizeof(struct ufs_hba));
9405 if (!host) {
9406 dev_err(dev, "scsi_host_alloc failed\n");
9407 err = -ENOMEM;
9408 goto out_error;
9409 }
9410 hba = shost_priv(host);
9411 hba->host = host;
9412 hba->dev = dev;
9413 *hba_handle = hba;
9414 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9415
9416 INIT_LIST_HEAD(&hba->clk_list_head);
9417
9418out_error:
9419 return err;
9420}
9421EXPORT_SYMBOL(ufshcd_alloc_host);
9422
9423
9424static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9425 const struct blk_mq_queue_data *qd)
9426{
9427 WARN_ON_ONCE(true);
9428 return BLK_STS_NOTSUPP;
9429}
9430
9431static const struct blk_mq_ops ufshcd_tmf_ops = {
9432 .queue_rq = ufshcd_queue_tmf,
9433};
9434
9435
9436
9437
9438
9439
9440
9441
9442int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9443{
9444 int err;
9445 struct Scsi_Host *host = hba->host;
9446 struct device *dev = hba->dev;
9447 char eh_wq_name[sizeof("ufs_eh_wq_00")];
9448
9449 if (!mmio_base) {
9450 dev_err(hba->dev,
9451 "Invalid memory reference for mmio_base is NULL\n");
9452 err = -ENODEV;
9453 goto out_error;
9454 }
9455
9456 hba->mmio_base = mmio_base;
9457 hba->irq = irq;
9458 hba->vps = &ufs_hba_vps;
9459
9460 err = ufshcd_hba_init(hba);
9461 if (err)
9462 goto out_error;
9463
9464
9465 err = ufshcd_hba_capabilities(hba);
9466 if (err)
9467 goto out_disable;
9468
9469
9470 hba->ufs_version = ufshcd_get_ufs_version(hba);
9471
9472
9473 hba->intr_mask = ufshcd_get_intr_mask(hba);
9474
9475 err = ufshcd_set_dma_mask(hba);
9476 if (err) {
9477 dev_err(hba->dev, "set dma mask failed\n");
9478 goto out_disable;
9479 }
9480
9481
9482 err = ufshcd_memory_alloc(hba);
9483 if (err) {
9484 dev_err(hba->dev, "Memory allocation failed\n");
9485 goto out_disable;
9486 }
9487
9488
9489 ufshcd_host_memory_configure(hba);
9490
9491 host->can_queue = hba->nutrs;
9492 host->cmd_per_lun = hba->nutrs;
9493 host->max_id = UFSHCD_MAX_ID;
9494 host->max_lun = UFS_MAX_LUNS;
9495 host->max_channel = UFSHCD_MAX_CHANNEL;
9496 host->unique_id = host->host_no;
9497 host->max_cmd_len = UFS_CDB_SIZE;
9498
9499 hba->max_pwr_info.is_valid = false;
9500
9501
9502 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9503 hba->host->host_no);
9504 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9505 if (!hba->eh_wq) {
9506 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9507 __func__);
9508 err = -ENOMEM;
9509 goto out_disable;
9510 }
9511 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9512 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9513
9514 sema_init(&hba->host_sem, 1);
9515
9516
9517 mutex_init(&hba->uic_cmd_mutex);
9518
9519
9520 mutex_init(&hba->dev_cmd.lock);
9521
9522
9523 mutex_init(&hba->ee_ctrl_mutex);
9524
9525 init_rwsem(&hba->clk_scaling_lock);
9526
9527 ufshcd_init_clk_gating(hba);
9528
9529 ufshcd_init_clk_scaling(hba);
9530
9531
9532
9533
9534
9535
9536 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9537 REG_INTERRUPT_STATUS);
9538 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9539
9540
9541
9542
9543 mb();
9544
9545
9546 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9547 if (err) {
9548 dev_err(hba->dev, "request irq failed\n");
9549 goto out_disable;
9550 } else {
9551 hba->is_irq_enabled = true;
9552 }
9553
9554 err = scsi_add_host(host, hba->dev);
9555 if (err) {
9556 dev_err(hba->dev, "scsi_add_host failed\n");
9557 goto out_disable;
9558 }
9559
9560 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9561 if (IS_ERR(hba->cmd_queue)) {
9562 err = PTR_ERR(hba->cmd_queue);
9563 goto out_remove_scsi_host;
9564 }
9565
9566 hba->tmf_tag_set = (struct blk_mq_tag_set) {
9567 .nr_hw_queues = 1,
9568 .queue_depth = hba->nutmrs,
9569 .ops = &ufshcd_tmf_ops,
9570 .flags = BLK_MQ_F_NO_SCHED,
9571 };
9572 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9573 if (err < 0)
9574 goto free_cmd_queue;
9575 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9576 if (IS_ERR(hba->tmf_queue)) {
9577 err = PTR_ERR(hba->tmf_queue);
9578 goto free_tmf_tag_set;
9579 }
9580
9581
9582 ufshcd_device_reset(hba);
9583
9584 ufshcd_init_crypto(hba);
9585
9586
9587 err = ufshcd_hba_enable(hba);
9588 if (err) {
9589 dev_err(hba->dev, "Host controller enable failed\n");
9590 ufshcd_print_evt_hist(hba);
9591 ufshcd_print_host_state(hba);
9592 goto free_tmf_queue;
9593 }
9594
9595
9596
9597
9598
9599
9600 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9601 UFS_SLEEP_PWR_MODE,
9602 UIC_LINK_HIBERN8_STATE);
9603 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9604 UFS_SLEEP_PWR_MODE,
9605 UIC_LINK_HIBERN8_STATE);
9606
9607 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9608 ufshcd_rpm_dev_flush_recheck_work);
9609
9610
9611 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9612 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9613 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9614 }
9615
9616
9617 pm_runtime_get_sync(dev);
9618 atomic_set(&hba->scsi_block_reqs_cnt, 0);
9619
9620
9621
9622
9623
9624
9625 ufshcd_set_ufs_dev_active(hba);
9626
9627 async_schedule(ufshcd_async_scan, hba);
9628 ufs_sysfs_add_nodes(hba->dev);
9629
9630 return 0;
9631
9632free_tmf_queue:
9633 blk_cleanup_queue(hba->tmf_queue);
9634free_tmf_tag_set:
9635 blk_mq_free_tag_set(&hba->tmf_tag_set);
9636free_cmd_queue:
9637 blk_cleanup_queue(hba->cmd_queue);
9638out_remove_scsi_host:
9639 scsi_remove_host(hba->host);
9640out_disable:
9641 hba->is_irq_enabled = false;
9642 ufshcd_hba_exit(hba);
9643out_error:
9644 return err;
9645}
9646EXPORT_SYMBOL_GPL(ufshcd_init);
9647
9648void ufshcd_resume_complete(struct device *dev)
9649{
9650 struct ufs_hba *hba = dev_get_drvdata(dev);
9651
9652 if (hba->complete_put) {
9653 ufshcd_rpm_put(hba);
9654 hba->complete_put = false;
9655 }
9656 if (hba->rpmb_complete_put) {
9657 ufshcd_rpmb_rpm_put(hba);
9658 hba->rpmb_complete_put = false;
9659 }
9660}
9661EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
9662
9663int ufshcd_suspend_prepare(struct device *dev)
9664{
9665 struct ufs_hba *hba = dev_get_drvdata(dev);
9666 int ret;
9667
9668
9669
9670
9671
9672
9673
9674 if (hba->sdev_ufs_device) {
9675 ret = ufshcd_rpm_get_sync(hba);
9676 if (ret < 0 && ret != -EACCES) {
9677 ufshcd_rpm_put(hba);
9678 return ret;
9679 }
9680 hba->complete_put = true;
9681 }
9682 if (hba->sdev_rpmb) {
9683 ufshcd_rpmb_rpm_get_sync(hba);
9684 hba->rpmb_complete_put = true;
9685 }
9686 return 0;
9687}
9688EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
9689
9690#ifdef CONFIG_PM_SLEEP
9691static int ufshcd_wl_poweroff(struct device *dev)
9692{
9693 struct scsi_device *sdev = to_scsi_device(dev);
9694 struct ufs_hba *hba = shost_priv(sdev->host);
9695
9696 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9697 return 0;
9698}
9699#endif
9700
9701static int ufshcd_wl_probe(struct device *dev)
9702{
9703 struct scsi_device *sdev = to_scsi_device(dev);
9704
9705 if (!is_device_wlun(sdev))
9706 return -ENODEV;
9707
9708 blk_pm_runtime_init(sdev->request_queue, dev);
9709 pm_runtime_set_autosuspend_delay(dev, 0);
9710 pm_runtime_allow(dev);
9711
9712 return 0;
9713}
9714
9715static int ufshcd_wl_remove(struct device *dev)
9716{
9717 pm_runtime_forbid(dev);
9718 return 0;
9719}
9720
9721static const struct dev_pm_ops ufshcd_wl_pm_ops = {
9722#ifdef CONFIG_PM_SLEEP
9723 .suspend = ufshcd_wl_suspend,
9724 .resume = ufshcd_wl_resume,
9725 .freeze = ufshcd_wl_suspend,
9726 .thaw = ufshcd_wl_resume,
9727 .poweroff = ufshcd_wl_poweroff,
9728 .restore = ufshcd_wl_resume,
9729#endif
9730 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
9731};
9732
9733
9734
9735
9736
9737
9738
9739
9740
9741
9742
9743static struct scsi_driver ufs_dev_wlun_template = {
9744 .gendrv = {
9745 .name = "ufs_device_wlun",
9746 .owner = THIS_MODULE,
9747 .probe = ufshcd_wl_probe,
9748 .remove = ufshcd_wl_remove,
9749 .pm = &ufshcd_wl_pm_ops,
9750 .shutdown = ufshcd_wl_shutdown,
9751 },
9752};
9753
9754static int ufshcd_rpmb_probe(struct device *dev)
9755{
9756 return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV;
9757}
9758
9759static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
9760{
9761 int ret = 0;
9762
9763 if (!hba->wlun_rpmb_clr_ua)
9764 return 0;
9765 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
9766 if (!ret)
9767 hba->wlun_rpmb_clr_ua = 0;
9768 return ret;
9769}
9770
9771#ifdef CONFIG_PM
9772static int ufshcd_rpmb_resume(struct device *dev)
9773{
9774 struct ufs_hba *hba = wlun_dev_to_hba(dev);
9775
9776 if (hba->sdev_rpmb)
9777 ufshcd_clear_rpmb_uac(hba);
9778 return 0;
9779}
9780#endif
9781
9782static const struct dev_pm_ops ufs_rpmb_pm_ops = {
9783 SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)
9784 SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume)
9785};
9786
9787
9788static struct scsi_driver ufs_rpmb_wlun_template = {
9789 .gendrv = {
9790 .name = "ufs_rpmb_wlun",
9791 .owner = THIS_MODULE,
9792 .probe = ufshcd_rpmb_probe,
9793 .pm = &ufs_rpmb_pm_ops,
9794 },
9795};
9796
9797static int __init ufshcd_core_init(void)
9798{
9799 int ret;
9800
9801 ufs_debugfs_init();
9802
9803 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
9804 if (ret)
9805 goto debugfs_exit;
9806
9807 ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv);
9808 if (ret)
9809 goto unregister;
9810
9811 return ret;
9812unregister:
9813 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
9814debugfs_exit:
9815 ufs_debugfs_exit();
9816 return ret;
9817}
9818
9819static void __exit ufshcd_core_exit(void)
9820{
9821 ufs_debugfs_exit();
9822 scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv);
9823 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
9824}
9825
9826module_init(ufshcd_core_init);
9827module_exit(ufshcd_core_exit);
9828
9829MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9830MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9831MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9832MODULE_LICENSE("GPL");
9833MODULE_VERSION(UFSHCD_DRIVER_VERSION);
9834