1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef _UFSHCD_H
38#define _UFSHCD_H
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/init.h>
43#include <linux/interrupt.h>
44#include <linux/io.h>
45#include <linux/delay.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/rwsem.h>
49#include <linux/workqueue.h>
50#include <linux/errno.h>
51#include <linux/types.h>
52#include <linux/wait.h>
53#include <linux/bitops.h>
54#include <linux/pm_runtime.h>
55#include <linux/clk.h>
56#include <linux/completion.h>
57#include <linux/regulator/consumer.h>
58#include "unipro.h"
59
60#include <asm/irq.h>
61#include <asm/byteorder.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_dbg.h>
67#include <scsi/scsi_eh.h>
68
69#include "ufs.h"
70#include "ufshci.h"
71
72#define UFSHCD "ufshcd"
73#define UFSHCD_DRIVER_VERSION "0.2"
74
75struct ufs_hba;
76
77enum dev_cmd_type {
78 DEV_CMD_TYPE_NOP = 0x0,
79 DEV_CMD_TYPE_QUERY = 0x1,
80};
81
82
83
84
85
86
87
88
89
90
91
92struct uic_command {
93 u32 command;
94 u32 argument1;
95 u32 argument2;
96 u32 argument3;
97 int cmd_active;
98 int result;
99 struct completion done;
100};
101
102
103enum ufs_pm_op {
104 UFS_RUNTIME_PM,
105 UFS_SYSTEM_PM,
106 UFS_SHUTDOWN_PM,
107};
108
109#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
110#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
111#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
112
113
114enum uic_link_state {
115 UIC_LINK_OFF_STATE = 0,
116 UIC_LINK_ACTIVE_STATE = 1,
117 UIC_LINK_HIBERN8_STATE = 2,
118};
119
120#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
121#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
122 UIC_LINK_ACTIVE_STATE)
123#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
124 UIC_LINK_HIBERN8_STATE)
125#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
126#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
127 UIC_LINK_ACTIVE_STATE)
128#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
129 UIC_LINK_HIBERN8_STATE)
130
131
132
133
134
135enum ufs_pm_level {
136 UFS_PM_LVL_0,
137 UFS_PM_LVL_1,
138 UFS_PM_LVL_2,
139 UFS_PM_LVL_3,
140 UFS_PM_LVL_4,
141 UFS_PM_LVL_5,
142 UFS_PM_LVL_MAX
143};
144
145struct ufs_pm_lvl_states {
146 enum ufs_dev_pwr_mode dev_state;
147 enum uic_link_state link_state;
148};
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172struct ufshcd_lrb {
173 struct utp_transfer_req_desc *utr_descriptor_ptr;
174 struct utp_upiu_req *ucd_req_ptr;
175 struct utp_upiu_rsp *ucd_rsp_ptr;
176 struct ufshcd_sg_entry *ucd_prdt_ptr;
177
178 dma_addr_t utrd_dma_addr;
179 dma_addr_t ucd_req_dma_addr;
180 dma_addr_t ucd_rsp_dma_addr;
181 dma_addr_t ucd_prdt_dma_addr;
182
183 struct scsi_cmnd *cmd;
184 u8 *sense_buffer;
185 unsigned int sense_bufflen;
186 int scsi_status;
187
188 int command_type;
189 int task_tag;
190 u8 lun;
191 bool intr_cmd;
192 ktime_t issue_time_stamp;
193 ktime_t compl_time_stamp;
194
195 bool req_abort_skip;
196};
197
198
199
200
201
202
203
204struct ufs_query {
205 struct ufs_query_req request;
206 u8 *descriptor;
207 struct ufs_query_res response;
208};
209
210
211
212
213
214
215
216
217struct ufs_dev_cmd {
218 enum dev_cmd_type type;
219 struct mutex lock;
220 struct completion *complete;
221 wait_queue_head_t tag_wq;
222 struct ufs_query query;
223};
224
225struct ufs_desc_size {
226 int dev_desc;
227 int pwr_desc;
228 int geom_desc;
229 int interc_desc;
230 int unit_desc;
231 int conf_desc;
232 int hlth_desc;
233};
234
235
236
237
238
239
240
241
242
243
244
245struct ufs_clk_info {
246 struct list_head list;
247 struct clk *clk;
248 const char *name;
249 u32 max_freq;
250 u32 min_freq;
251 u32 curr_freq;
252 bool enabled;
253};
254
255enum ufs_notify_change_status {
256 PRE_CHANGE,
257 POST_CHANGE,
258};
259
260struct ufs_pa_layer_attr {
261 u32 gear_rx;
262 u32 gear_tx;
263 u32 lane_rx;
264 u32 lane_tx;
265 u32 pwr_rx;
266 u32 pwr_tx;
267 u32 hs_rate;
268};
269
270struct ufs_pwr_mode_info {
271 bool is_valid;
272 struct ufs_pa_layer_attr info;
273};
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302struct ufs_hba_variant_ops {
303 const char *name;
304 int (*init)(struct ufs_hba *);
305 void (*exit)(struct ufs_hba *);
306 u32 (*get_ufs_hci_version)(struct ufs_hba *);
307 int (*clk_scale_notify)(struct ufs_hba *, bool,
308 enum ufs_notify_change_status);
309 int (*setup_clocks)(struct ufs_hba *, bool,
310 enum ufs_notify_change_status);
311 int (*setup_regulators)(struct ufs_hba *, bool);
312 int (*hce_enable_notify)(struct ufs_hba *,
313 enum ufs_notify_change_status);
314 int (*link_startup_notify)(struct ufs_hba *,
315 enum ufs_notify_change_status);
316 int (*pwr_change_notify)(struct ufs_hba *,
317 enum ufs_notify_change_status status,
318 struct ufs_pa_layer_attr *,
319 struct ufs_pa_layer_attr *);
320 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
321 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
322 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
323 enum ufs_notify_change_status);
324 int (*apply_dev_quirks)(struct ufs_hba *);
325 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
326 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
327 void (*dbg_register_dump)(struct ufs_hba *hba);
328 int (*phy_initialization)(struct ufs_hba *);
329};
330
331
332enum clk_gating_state {
333 CLKS_OFF,
334 CLKS_ON,
335 REQ_CLKS_OFF,
336 REQ_CLKS_ON,
337};
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct ufs_clk_gating {
356 struct delayed_work gate_work;
357 struct work_struct ungate_work;
358 enum clk_gating_state state;
359 unsigned long delay_ms;
360 bool is_suspended;
361 struct device_attribute delay_attr;
362 struct device_attribute enable_attr;
363 bool is_enabled;
364 int active_reqs;
365};
366
367struct ufs_saved_pwr_info {
368 struct ufs_pa_layer_attr info;
369 bool is_valid;
370};
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390struct ufs_clk_scaling {
391 int active_reqs;
392 unsigned long tot_busy_t;
393 unsigned long window_start_t;
394 ktime_t busy_start_t;
395 struct device_attribute enable_attr;
396 struct ufs_saved_pwr_info saved_pwr_info;
397 struct workqueue_struct *workq;
398 struct work_struct suspend_work;
399 struct work_struct resume_work;
400 bool is_allowed;
401 bool is_busy_started;
402 bool is_suspended;
403};
404
405
406
407
408
409
410struct ufs_init_prefetch {
411 u32 icc_level;
412};
413
414#define UIC_ERR_REG_HIST_LENGTH 8
415
416
417
418
419
420
421struct ufs_uic_err_reg_hist {
422 int pos;
423 u32 reg[UIC_ERR_REG_HIST_LENGTH];
424 ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
425};
426
427
428
429
430
431
432
433
434
435
436
437
438
439struct ufs_stats {
440 u32 hibern8_exit_cnt;
441 ktime_t last_hibern8_exit_tstamp;
442 struct ufs_uic_err_reg_hist pa_err;
443 struct ufs_uic_err_reg_hist dl_err;
444 struct ufs_uic_err_reg_hist nl_err;
445 struct ufs_uic_err_reg_hist tl_err;
446 struct ufs_uic_err_reg_hist dme_err;
447};
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503struct ufs_hba {
504 void __iomem *mmio_base;
505
506
507 struct utp_transfer_cmd_desc *ucdl_base_addr;
508 struct utp_transfer_req_desc *utrdl_base_addr;
509 struct utp_task_req_desc *utmrdl_base_addr;
510
511
512 dma_addr_t ucdl_dma_addr;
513 dma_addr_t utrdl_dma_addr;
514 dma_addr_t utmrdl_dma_addr;
515
516 struct Scsi_Host *host;
517 struct device *dev;
518
519
520
521
522 struct scsi_device *sdev_ufs_device;
523
524 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
525 enum uic_link_state uic_link_state;
526
527 enum ufs_pm_level rpm_lvl;
528
529 enum ufs_pm_level spm_lvl;
530 struct device_attribute rpm_lvl_attr;
531 struct device_attribute spm_lvl_attr;
532 int pm_op_in_progress;
533
534
535 u32 ahit;
536
537 struct ufshcd_lrb *lrb;
538 unsigned long lrb_in_use;
539
540 unsigned long outstanding_tasks;
541 unsigned long outstanding_reqs;
542
543 u32 capabilities;
544 int nutrs;
545 int nutmrs;
546 u32 ufs_version;
547 struct ufs_hba_variant_ops *vops;
548 void *priv;
549 unsigned int irq;
550 bool is_irq_enabled;
551
552
553 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
554
555
556
557
558
559 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
560
561
562
563
564
565
566
567
568 #define UFSHCD_QUIRK_BROKEN_LCC 0x4
569
570
571
572
573
574
575 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
576
577
578
579
580
581
582 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
583
584
585
586
587
588
589
590 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
591
592
593
594
595
596 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
597
598 unsigned int quirks;
599
600
601 unsigned int dev_quirks;
602
603 wait_queue_head_t tm_wq;
604 wait_queue_head_t tm_tag_wq;
605 unsigned long tm_condition;
606 unsigned long tm_slots_in_use;
607
608 struct uic_command *active_uic_cmd;
609 struct mutex uic_cmd_mutex;
610 struct completion *uic_async_done;
611
612 u32 ufshcd_state;
613 u32 eh_flags;
614 u32 intr_mask;
615 u16 ee_ctrl_mask;
616 bool is_powered;
617 bool is_init_prefetch;
618 struct ufs_init_prefetch init_prefetch_data;
619
620
621 struct work_struct eh_work;
622 struct work_struct eeh_work;
623
624
625 u32 errors;
626 u32 uic_error;
627 u32 saved_err;
628 u32 saved_uic_err;
629 struct ufs_stats ufs_stats;
630
631
632 struct ufs_dev_cmd dev_cmd;
633 ktime_t last_dme_cmd_tstamp;
634
635
636 struct ufs_dev_info dev_info;
637 bool auto_bkops_enabled;
638 struct ufs_vreg_info vreg_info;
639 struct list_head clk_list_head;
640
641 bool wlun_dev_clr_ua;
642
643
644 int req_abort_count;
645
646
647 u32 lanes_per_direction;
648 struct ufs_pa_layer_attr pwr_info;
649 struct ufs_pwr_mode_info max_pwr_info;
650
651 struct ufs_clk_gating clk_gating;
652
653 u32 caps;
654
655#define UFSHCD_CAP_CLK_GATING (1 << 0)
656
657#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
658
659#define UFSHCD_CAP_CLK_SCALING (1 << 2)
660
661#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
662
663
664
665
666
667#define UFSHCD_CAP_INTR_AGGR (1 << 4)
668
669
670
671
672
673
674
675#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
676
677 struct devfreq *devfreq;
678 struct ufs_clk_scaling clk_scaling;
679 bool is_sys_suspended;
680
681 enum bkops_status urgent_bkops_lvl;
682 bool is_urgent_bkops_lvl_checked;
683
684 struct rw_semaphore clk_scaling_lock;
685 struct ufs_desc_size desc_size;
686};
687
688
689static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
690{
691 return hba->caps & UFSHCD_CAP_CLK_GATING;
692}
693static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
694{
695 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
696}
697static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
698{
699 return hba->caps & UFSHCD_CAP_CLK_SCALING;
700}
701static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
702{
703 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
704}
705
706static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
707{
708
709#ifndef CONFIG_SCSI_UFS_DWC
710 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
711 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
712 return true;
713 else
714 return false;
715#else
716return true;
717#endif
718}
719
720#define ufshcd_writel(hba, val, reg) \
721 writel((val), (hba)->mmio_base + (reg))
722#define ufshcd_readl(hba, reg) \
723 readl((hba)->mmio_base + (reg))
724
725
726
727
728
729
730
731
732static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
733{
734 u32 tmp;
735
736 tmp = ufshcd_readl(hba, reg);
737 tmp &= ~mask;
738 tmp |= (val & mask);
739 ufshcd_writel(hba, tmp, reg);
740}
741
742int ufshcd_alloc_host(struct device *, struct ufs_hba **);
743void ufshcd_dealloc_host(struct ufs_hba *);
744int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
745void ufshcd_remove(struct ufs_hba *);
746int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
747 u32 val, unsigned long interval_us,
748 unsigned long timeout_ms, bool can_sleep);
749
750static inline void check_upiu_size(void)
751{
752 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
753 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
754}
755
756
757
758
759
760
761static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
762{
763 BUG_ON(!hba);
764 hba->priv = variant;
765}
766
767
768
769
770
771static inline void *ufshcd_get_variant(struct ufs_hba *hba)
772{
773 BUG_ON(!hba);
774 return hba->priv;
775}
776static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
777 struct ufs_hba *hba)
778{
779 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
780}
781
782extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
783extern int ufshcd_runtime_resume(struct ufs_hba *hba);
784extern int ufshcd_runtime_idle(struct ufs_hba *hba);
785extern int ufshcd_system_suspend(struct ufs_hba *hba);
786extern int ufshcd_system_resume(struct ufs_hba *hba);
787extern int ufshcd_shutdown(struct ufs_hba *hba);
788extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
789 u8 attr_set, u32 mib_val, u8 peer);
790extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
791 u32 *mib_val, u8 peer);
792
793
794#define DME_LOCAL 0
795#define DME_PEER 1
796#define ATTR_SET_NOR 0
797#define ATTR_SET_ST 1
798
799static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
800 u32 mib_val)
801{
802 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
803 mib_val, DME_LOCAL);
804}
805
806static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
807 u32 mib_val)
808{
809 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
810 mib_val, DME_LOCAL);
811}
812
813static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
814 u32 mib_val)
815{
816 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
817 mib_val, DME_PEER);
818}
819
820static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
821 u32 mib_val)
822{
823 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
824 mib_val, DME_PEER);
825}
826
827static inline int ufshcd_dme_get(struct ufs_hba *hba,
828 u32 attr_sel, u32 *mib_val)
829{
830 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
831}
832
833static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
834 u32 attr_sel, u32 *mib_val)
835{
836 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
837}
838
839static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
840{
841 return (pwr_info->pwr_rx == FAST_MODE ||
842 pwr_info->pwr_rx == FASTAUTO_MODE) &&
843 (pwr_info->pwr_tx == FAST_MODE ||
844 pwr_info->pwr_tx == FASTAUTO_MODE);
845}
846
847
848int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
849 enum query_opcode opcode,
850 enum desc_idn idn, u8 index,
851 u8 selector,
852 u8 *desc_buf, int *buf_len);
853int ufshcd_read_desc_param(struct ufs_hba *hba,
854 enum desc_idn desc_id,
855 int desc_index,
856 u8 param_offset,
857 u8 *param_read_buf,
858 u8 param_size);
859int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
860 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
861int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
862 enum flag_idn idn, bool *flag_res);
863int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
864 u8 *buf, u32 size, bool ascii);
865
866int ufshcd_hold(struct ufs_hba *hba, bool async);
867void ufshcd_release(struct ufs_hba *hba);
868
869int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
870 int *desc_length);
871
872u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
873
874
875static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
876{
877 if (hba->vops)
878 return hba->vops->name;
879 return "";
880}
881
882static inline int ufshcd_vops_init(struct ufs_hba *hba)
883{
884 if (hba->vops && hba->vops->init)
885 return hba->vops->init(hba);
886
887 return 0;
888}
889
890static inline void ufshcd_vops_exit(struct ufs_hba *hba)
891{
892 if (hba->vops && hba->vops->exit)
893 return hba->vops->exit(hba);
894}
895
896static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
897{
898 if (hba->vops && hba->vops->get_ufs_hci_version)
899 return hba->vops->get_ufs_hci_version(hba);
900
901 return ufshcd_readl(hba, REG_UFS_VERSION);
902}
903
904static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
905 bool up, enum ufs_notify_change_status status)
906{
907 if (hba->vops && hba->vops->clk_scale_notify)
908 return hba->vops->clk_scale_notify(hba, up, status);
909 return 0;
910}
911
912static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
913 enum ufs_notify_change_status status)
914{
915 if (hba->vops && hba->vops->setup_clocks)
916 return hba->vops->setup_clocks(hba, on, status);
917 return 0;
918}
919
920static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
921{
922 if (hba->vops && hba->vops->setup_regulators)
923 return hba->vops->setup_regulators(hba, status);
924
925 return 0;
926}
927
928static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
929 bool status)
930{
931 if (hba->vops && hba->vops->hce_enable_notify)
932 return hba->vops->hce_enable_notify(hba, status);
933
934 return 0;
935}
936static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
937 bool status)
938{
939 if (hba->vops && hba->vops->link_startup_notify)
940 return hba->vops->link_startup_notify(hba, status);
941
942 return 0;
943}
944
945static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
946 bool status,
947 struct ufs_pa_layer_attr *dev_max_params,
948 struct ufs_pa_layer_attr *dev_req_params)
949{
950 if (hba->vops && hba->vops->pwr_change_notify)
951 return hba->vops->pwr_change_notify(hba, status,
952 dev_max_params, dev_req_params);
953
954 return -ENOTSUPP;
955}
956
957static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
958 bool is_scsi_cmd)
959{
960 if (hba->vops && hba->vops->setup_xfer_req)
961 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
962}
963
964static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
965 int tag, u8 tm_function)
966{
967 if (hba->vops && hba->vops->setup_task_mgmt)
968 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
969}
970
971static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
972 enum uic_cmd_dme cmd,
973 enum ufs_notify_change_status status)
974{
975 if (hba->vops && hba->vops->hibern8_notify)
976 return hba->vops->hibern8_notify(hba, cmd, status);
977}
978
979static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
980{
981 if (hba->vops && hba->vops->apply_dev_quirks)
982 return hba->vops->apply_dev_quirks(hba);
983 return 0;
984}
985
986static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
987{
988 if (hba->vops && hba->vops->suspend)
989 return hba->vops->suspend(hba, op);
990
991 return 0;
992}
993
994static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
995{
996 if (hba->vops && hba->vops->resume)
997 return hba->vops->resume(hba, op);
998
999 return 0;
1000}
1001
1002static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1003{
1004 if (hba->vops && hba->vops->dbg_register_dump)
1005 hba->vops->dbg_register_dump(hba);
1006}
1007
1008extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1009
1010
1011
1012
1013
1014
1015
1016static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1017{
1018 if (scsi_is_wlun(scsi_lun))
1019 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1020 | UFS_UPIU_WLUN_ID;
1021 else
1022 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1023}
1024
1025#endif
1026