1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef _UFSHCD_H
38#define _UFSHCD_H
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/init.h>
43#include <linux/interrupt.h>
44#include <linux/io.h>
45#include <linux/delay.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/rwsem.h>
49#include <linux/workqueue.h>
50#include <linux/errno.h>
51#include <linux/types.h>
52#include <linux/wait.h>
53#include <linux/bitops.h>
54#include <linux/pm_runtime.h>
55#include <linux/clk.h>
56#include <linux/completion.h>
57#include <linux/regulator/consumer.h>
58#include "unipro.h"
59
60#include <asm/irq.h>
61#include <asm/byteorder.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_dbg.h>
67#include <scsi/scsi_eh.h>
68
69#include "ufs.h"
70#include "ufshci.h"
71
72#define UFSHCD "ufshcd"
73#define UFSHCD_DRIVER_VERSION "0.2"
74
75struct ufs_hba;
76
77enum dev_cmd_type {
78 DEV_CMD_TYPE_NOP = 0x0,
79 DEV_CMD_TYPE_QUERY = 0x1,
80};
81
82
83
84
85
86
87
88
89
90
91
92struct uic_command {
93 u32 command;
94 u32 argument1;
95 u32 argument2;
96 u32 argument3;
97 int cmd_active;
98 int result;
99 struct completion done;
100};
101
102
103enum ufs_pm_op {
104 UFS_RUNTIME_PM,
105 UFS_SYSTEM_PM,
106 UFS_SHUTDOWN_PM,
107};
108
109#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
110#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
111#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
112
113
114enum uic_link_state {
115 UIC_LINK_OFF_STATE = 0,
116 UIC_LINK_ACTIVE_STATE = 1,
117 UIC_LINK_HIBERN8_STATE = 2,
118};
119
120#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
121#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
122 UIC_LINK_ACTIVE_STATE)
123#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
124 UIC_LINK_HIBERN8_STATE)
125#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
126#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
127 UIC_LINK_ACTIVE_STATE)
128#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
129 UIC_LINK_HIBERN8_STATE)
130
131
132
133
134
135enum ufs_pm_level {
136 UFS_PM_LVL_0,
137 UFS_PM_LVL_1,
138 UFS_PM_LVL_2,
139 UFS_PM_LVL_3,
140 UFS_PM_LVL_4,
141 UFS_PM_LVL_5,
142 UFS_PM_LVL_MAX
143};
144
145struct ufs_pm_lvl_states {
146 enum ufs_dev_pwr_mode dev_state;
147 enum uic_link_state link_state;
148};
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172struct ufshcd_lrb {
173 struct utp_transfer_req_desc *utr_descriptor_ptr;
174 struct utp_upiu_req *ucd_req_ptr;
175 struct utp_upiu_rsp *ucd_rsp_ptr;
176 struct ufshcd_sg_entry *ucd_prdt_ptr;
177
178 dma_addr_t utrd_dma_addr;
179 dma_addr_t ucd_req_dma_addr;
180 dma_addr_t ucd_rsp_dma_addr;
181 dma_addr_t ucd_prdt_dma_addr;
182
183 struct scsi_cmnd *cmd;
184 u8 *sense_buffer;
185 unsigned int sense_bufflen;
186 int scsi_status;
187
188 int command_type;
189 int task_tag;
190 u8 lun;
191 bool intr_cmd;
192 ktime_t issue_time_stamp;
193 ktime_t compl_time_stamp;
194
195 bool req_abort_skip;
196};
197
198
199
200
201
202
203
204struct ufs_query {
205 struct ufs_query_req request;
206 u8 *descriptor;
207 struct ufs_query_res response;
208};
209
210
211
212
213
214
215
216
217struct ufs_dev_cmd {
218 enum dev_cmd_type type;
219 struct mutex lock;
220 struct completion *complete;
221 wait_queue_head_t tag_wq;
222 struct ufs_query query;
223};
224
225struct ufs_desc_size {
226 int dev_desc;
227 int pwr_desc;
228 int geom_desc;
229 int interc_desc;
230 int unit_desc;
231 int conf_desc;
232 int hlth_desc;
233};
234
235
236
237
238
239
240
241
242
243
244
245struct ufs_clk_info {
246 struct list_head list;
247 struct clk *clk;
248 const char *name;
249 u32 max_freq;
250 u32 min_freq;
251 u32 curr_freq;
252 bool enabled;
253};
254
255enum ufs_notify_change_status {
256 PRE_CHANGE,
257 POST_CHANGE,
258};
259
260struct ufs_pa_layer_attr {
261 u32 gear_rx;
262 u32 gear_tx;
263 u32 lane_rx;
264 u32 lane_tx;
265 u32 pwr_rx;
266 u32 pwr_tx;
267 u32 hs_rate;
268};
269
270struct ufs_pwr_mode_info {
271 bool is_valid;
272 struct ufs_pa_layer_attr info;
273};
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302struct ufs_hba_variant_ops {
303 const char *name;
304 int (*init)(struct ufs_hba *);
305 void (*exit)(struct ufs_hba *);
306 u32 (*get_ufs_hci_version)(struct ufs_hba *);
307 int (*clk_scale_notify)(struct ufs_hba *, bool,
308 enum ufs_notify_change_status);
309 int (*setup_clocks)(struct ufs_hba *, bool,
310 enum ufs_notify_change_status);
311 int (*setup_regulators)(struct ufs_hba *, bool);
312 int (*hce_enable_notify)(struct ufs_hba *,
313 enum ufs_notify_change_status);
314 int (*link_startup_notify)(struct ufs_hba *,
315 enum ufs_notify_change_status);
316 int (*pwr_change_notify)(struct ufs_hba *,
317 enum ufs_notify_change_status status,
318 struct ufs_pa_layer_attr *,
319 struct ufs_pa_layer_attr *);
320 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
321 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
322 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
323 enum ufs_notify_change_status);
324 int (*apply_dev_quirks)(struct ufs_hba *);
325 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
326 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
327 void (*dbg_register_dump)(struct ufs_hba *hba);
328 int (*phy_initialization)(struct ufs_hba *);
329};
330
331
332enum clk_gating_state {
333 CLKS_OFF,
334 CLKS_ON,
335 REQ_CLKS_OFF,
336 REQ_CLKS_ON,
337};
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct ufs_clk_gating {
356 struct delayed_work gate_work;
357 struct work_struct ungate_work;
358 enum clk_gating_state state;
359 unsigned long delay_ms;
360 bool is_suspended;
361 struct device_attribute delay_attr;
362 struct device_attribute enable_attr;
363 bool is_enabled;
364 int active_reqs;
365 struct workqueue_struct *clk_gating_workq;
366};
367
368struct ufs_saved_pwr_info {
369 struct ufs_pa_layer_attr info;
370 bool is_valid;
371};
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391struct ufs_clk_scaling {
392 int active_reqs;
393 unsigned long tot_busy_t;
394 unsigned long window_start_t;
395 ktime_t busy_start_t;
396 struct device_attribute enable_attr;
397 struct ufs_saved_pwr_info saved_pwr_info;
398 struct workqueue_struct *workq;
399 struct work_struct suspend_work;
400 struct work_struct resume_work;
401 bool is_allowed;
402 bool is_busy_started;
403 bool is_suspended;
404};
405
406
407
408
409
410
411struct ufs_init_prefetch {
412 u32 icc_level;
413};
414
415#define UIC_ERR_REG_HIST_LENGTH 8
416
417
418
419
420
421
422struct ufs_uic_err_reg_hist {
423 int pos;
424 u32 reg[UIC_ERR_REG_HIST_LENGTH];
425 ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
426};
427
428
429
430
431
432
433
434
435
436
437
438
439
440struct ufs_stats {
441 u32 hibern8_exit_cnt;
442 ktime_t last_hibern8_exit_tstamp;
443 struct ufs_uic_err_reg_hist pa_err;
444 struct ufs_uic_err_reg_hist dl_err;
445 struct ufs_uic_err_reg_hist nl_err;
446 struct ufs_uic_err_reg_hist tl_err;
447 struct ufs_uic_err_reg_hist dme_err;
448};
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505struct ufs_hba {
506 void __iomem *mmio_base;
507
508
509 struct utp_transfer_cmd_desc *ucdl_base_addr;
510 struct utp_transfer_req_desc *utrdl_base_addr;
511 struct utp_task_req_desc *utmrdl_base_addr;
512
513
514 dma_addr_t ucdl_dma_addr;
515 dma_addr_t utrdl_dma_addr;
516 dma_addr_t utmrdl_dma_addr;
517
518 struct Scsi_Host *host;
519 struct device *dev;
520
521
522
523
524 struct scsi_device *sdev_ufs_device;
525
526 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
527 enum uic_link_state uic_link_state;
528
529 enum ufs_pm_level rpm_lvl;
530
531 enum ufs_pm_level spm_lvl;
532 struct device_attribute rpm_lvl_attr;
533 struct device_attribute spm_lvl_attr;
534 int pm_op_in_progress;
535
536
537 u32 ahit;
538
539 struct ufshcd_lrb *lrb;
540 unsigned long lrb_in_use;
541
542 unsigned long outstanding_tasks;
543 unsigned long outstanding_reqs;
544
545 u32 capabilities;
546 int nutrs;
547 int nutmrs;
548 u32 ufs_version;
549 const struct ufs_hba_variant_ops *vops;
550 void *priv;
551 unsigned int irq;
552 bool is_irq_enabled;
553 enum ufs_ref_clk_freq dev_ref_clk_freq;
554
555
556 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
557
558
559
560
561
562 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
563
564
565
566
567
568
569
570
571 #define UFSHCD_QUIRK_BROKEN_LCC 0x4
572
573
574
575
576
577
578 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
579
580
581
582
583
584
585 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
586
587
588
589
590
591
592
593 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
594
595
596
597
598
599 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
600
601
602
603
604 #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
605
606
607
608
609
610 #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
611
612
613
614
615
616 #define UFSHCI_QUIRK_BROKEN_HCE 0x400
617 unsigned int quirks;
618
619
620 unsigned int dev_quirks;
621
622 wait_queue_head_t tm_wq;
623 wait_queue_head_t tm_tag_wq;
624 unsigned long tm_condition;
625 unsigned long tm_slots_in_use;
626
627 struct uic_command *active_uic_cmd;
628 struct mutex uic_cmd_mutex;
629 struct completion *uic_async_done;
630
631 u32 ufshcd_state;
632 u32 eh_flags;
633 u32 intr_mask;
634 u16 ee_ctrl_mask;
635 bool is_powered;
636 bool is_init_prefetch;
637 struct ufs_init_prefetch init_prefetch_data;
638
639
640 struct work_struct eh_work;
641 struct work_struct eeh_work;
642
643
644 u32 errors;
645 u32 uic_error;
646 u32 saved_err;
647 u32 saved_uic_err;
648 struct ufs_stats ufs_stats;
649
650
651 struct ufs_dev_cmd dev_cmd;
652 ktime_t last_dme_cmd_tstamp;
653
654
655 struct ufs_dev_info dev_info;
656 bool auto_bkops_enabled;
657 struct ufs_vreg_info vreg_info;
658 struct list_head clk_list_head;
659
660 bool wlun_dev_clr_ua;
661
662
663 int req_abort_count;
664
665
666 u32 lanes_per_direction;
667 struct ufs_pa_layer_attr pwr_info;
668 struct ufs_pwr_mode_info max_pwr_info;
669
670 struct ufs_clk_gating clk_gating;
671
672 u32 caps;
673
674#define UFSHCD_CAP_CLK_GATING (1 << 0)
675
676#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
677
678#define UFSHCD_CAP_CLK_SCALING (1 << 2)
679
680#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
681
682
683
684
685
686#define UFSHCD_CAP_INTR_AGGR (1 << 4)
687
688
689
690
691
692
693
694#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
695
696 struct devfreq *devfreq;
697 struct ufs_clk_scaling clk_scaling;
698 bool is_sys_suspended;
699
700 enum bkops_status urgent_bkops_lvl;
701 bool is_urgent_bkops_lvl_checked;
702
703 struct rw_semaphore clk_scaling_lock;
704 struct ufs_desc_size desc_size;
705 atomic_t scsi_block_reqs_cnt;
706
707 struct device bsg_dev;
708 struct request_queue *bsg_queue;
709};
710
711
712static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
713{
714 return hba->caps & UFSHCD_CAP_CLK_GATING;
715}
716static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
717{
718 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
719}
720static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
721{
722 return hba->caps & UFSHCD_CAP_CLK_SCALING;
723}
724static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
725{
726 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
727}
728
729static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
730{
731
732#ifndef CONFIG_SCSI_UFS_DWC
733 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
734 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
735 return true;
736 else
737 return false;
738#else
739return true;
740#endif
741}
742
743static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
744{
745 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
746}
747
748#define ufshcd_writel(hba, val, reg) \
749 writel((val), (hba)->mmio_base + (reg))
750#define ufshcd_readl(hba, reg) \
751 readl((hba)->mmio_base + (reg))
752
753
754
755
756
757
758
759
760static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
761{
762 u32 tmp;
763
764 tmp = ufshcd_readl(hba, reg);
765 tmp &= ~mask;
766 tmp |= (val & mask);
767 ufshcd_writel(hba, tmp, reg);
768}
769
770int ufshcd_alloc_host(struct device *, struct ufs_hba **);
771void ufshcd_dealloc_host(struct ufs_hba *);
772int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
773void ufshcd_remove(struct ufs_hba *);
774int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
775 u32 val, unsigned long interval_us,
776 unsigned long timeout_ms, bool can_sleep);
777void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
778
779static inline void check_upiu_size(void)
780{
781 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
782 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
783}
784
785
786
787
788
789
790static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
791{
792 BUG_ON(!hba);
793 hba->priv = variant;
794}
795
796
797
798
799
800static inline void *ufshcd_get_variant(struct ufs_hba *hba)
801{
802 BUG_ON(!hba);
803 return hba->priv;
804}
805static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
806 struct ufs_hba *hba)
807{
808 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
809}
810
811extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
812extern int ufshcd_runtime_resume(struct ufs_hba *hba);
813extern int ufshcd_runtime_idle(struct ufs_hba *hba);
814extern int ufshcd_system_suspend(struct ufs_hba *hba);
815extern int ufshcd_system_resume(struct ufs_hba *hba);
816extern int ufshcd_shutdown(struct ufs_hba *hba);
817extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
818 u8 attr_set, u32 mib_val, u8 peer);
819extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
820 u32 *mib_val, u8 peer);
821extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
822 struct ufs_pa_layer_attr *desired_pwr_mode);
823
824
825#define DME_LOCAL 0
826#define DME_PEER 1
827#define ATTR_SET_NOR 0
828#define ATTR_SET_ST 1
829
830static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
831 u32 mib_val)
832{
833 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
834 mib_val, DME_LOCAL);
835}
836
837static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
838 u32 mib_val)
839{
840 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
841 mib_val, DME_LOCAL);
842}
843
844static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
845 u32 mib_val)
846{
847 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
848 mib_val, DME_PEER);
849}
850
851static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
852 u32 mib_val)
853{
854 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
855 mib_val, DME_PEER);
856}
857
858static inline int ufshcd_dme_get(struct ufs_hba *hba,
859 u32 attr_sel, u32 *mib_val)
860{
861 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
862}
863
864static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
865 u32 attr_sel, u32 *mib_val)
866{
867 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
868}
869
870static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
871{
872 return (pwr_info->pwr_rx == FAST_MODE ||
873 pwr_info->pwr_rx == FASTAUTO_MODE) &&
874 (pwr_info->pwr_tx == FAST_MODE ||
875 pwr_info->pwr_tx == FASTAUTO_MODE);
876}
877
878
879int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
880 enum query_opcode opcode,
881 enum desc_idn idn, u8 index,
882 u8 selector,
883 u8 *desc_buf, int *buf_len);
884int ufshcd_read_desc_param(struct ufs_hba *hba,
885 enum desc_idn desc_id,
886 int desc_index,
887 u8 param_offset,
888 u8 *param_read_buf,
889 u8 param_size);
890int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
891 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
892int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
893 enum flag_idn idn, bool *flag_res);
894int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
895 u8 *buf, u32 size, bool ascii);
896
897int ufshcd_hold(struct ufs_hba *hba, bool async);
898void ufshcd_release(struct ufs_hba *hba);
899
900int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
901 int *desc_length);
902
903u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
904
905int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
906
907int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
908 struct utp_upiu_req *req_upiu,
909 struct utp_upiu_req *rsp_upiu,
910 int msgcode,
911 u8 *desc_buff, int *buff_len,
912 enum query_opcode desc_op);
913
914
915static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
916{
917 if (hba->vops)
918 return hba->vops->name;
919 return "";
920}
921
922static inline int ufshcd_vops_init(struct ufs_hba *hba)
923{
924 if (hba->vops && hba->vops->init)
925 return hba->vops->init(hba);
926
927 return 0;
928}
929
930static inline void ufshcd_vops_exit(struct ufs_hba *hba)
931{
932 if (hba->vops && hba->vops->exit)
933 return hba->vops->exit(hba);
934}
935
936static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
937{
938 if (hba->vops && hba->vops->get_ufs_hci_version)
939 return hba->vops->get_ufs_hci_version(hba);
940
941 return ufshcd_readl(hba, REG_UFS_VERSION);
942}
943
944static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
945 bool up, enum ufs_notify_change_status status)
946{
947 if (hba->vops && hba->vops->clk_scale_notify)
948 return hba->vops->clk_scale_notify(hba, up, status);
949 return 0;
950}
951
952static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
953 enum ufs_notify_change_status status)
954{
955 if (hba->vops && hba->vops->setup_clocks)
956 return hba->vops->setup_clocks(hba, on, status);
957 return 0;
958}
959
960static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
961{
962 if (hba->vops && hba->vops->setup_regulators)
963 return hba->vops->setup_regulators(hba, status);
964
965 return 0;
966}
967
968static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
969 bool status)
970{
971 if (hba->vops && hba->vops->hce_enable_notify)
972 return hba->vops->hce_enable_notify(hba, status);
973
974 return 0;
975}
976static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
977 bool status)
978{
979 if (hba->vops && hba->vops->link_startup_notify)
980 return hba->vops->link_startup_notify(hba, status);
981
982 return 0;
983}
984
985static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
986 bool status,
987 struct ufs_pa_layer_attr *dev_max_params,
988 struct ufs_pa_layer_attr *dev_req_params)
989{
990 if (hba->vops && hba->vops->pwr_change_notify)
991 return hba->vops->pwr_change_notify(hba, status,
992 dev_max_params, dev_req_params);
993
994 return -ENOTSUPP;
995}
996
997static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
998 bool is_scsi_cmd)
999{
1000 if (hba->vops && hba->vops->setup_xfer_req)
1001 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
1002}
1003
1004static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1005 int tag, u8 tm_function)
1006{
1007 if (hba->vops && hba->vops->setup_task_mgmt)
1008 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1009}
1010
1011static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1012 enum uic_cmd_dme cmd,
1013 enum ufs_notify_change_status status)
1014{
1015 if (hba->vops && hba->vops->hibern8_notify)
1016 return hba->vops->hibern8_notify(hba, cmd, status);
1017}
1018
1019static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
1020{
1021 if (hba->vops && hba->vops->apply_dev_quirks)
1022 return hba->vops->apply_dev_quirks(hba);
1023 return 0;
1024}
1025
1026static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1027{
1028 if (hba->vops && hba->vops->suspend)
1029 return hba->vops->suspend(hba, op);
1030
1031 return 0;
1032}
1033
1034static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1035{
1036 if (hba->vops && hba->vops->resume)
1037 return hba->vops->resume(hba, op);
1038
1039 return 0;
1040}
1041
1042static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1043{
1044 if (hba->vops && hba->vops->dbg_register_dump)
1045 hba->vops->dbg_register_dump(hba);
1046}
1047
1048extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1049
1050
1051
1052
1053
1054
1055
1056static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1057{
1058 if (scsi_is_wlun(scsi_lun))
1059 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1060 | UFS_UPIU_WLUN_ID;
1061 else
1062 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1063}
1064
1065int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1066 const char *prefix);
1067
1068#endif
1069