1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef _UFSHCD_H
38#define _UFSHCD_H
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/init.h>
43#include <linux/interrupt.h>
44#include <linux/io.h>
45#include <linux/delay.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/rwsem.h>
49#include <linux/workqueue.h>
50#include <linux/errno.h>
51#include <linux/types.h>
52#include <linux/wait.h>
53#include <linux/bitops.h>
54#include <linux/pm_runtime.h>
55#include <linux/clk.h>
56#include <linux/completion.h>
57#include <linux/regulator/consumer.h>
58#include "unipro.h"
59
60#include <asm/irq.h>
61#include <asm/byteorder.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66#include <scsi/scsi_dbg.h>
67#include <scsi/scsi_eh.h>
68
69#include "ufs.h"
70#include "ufshci.h"
71
72#define UFSHCD "ufshcd"
73#define UFSHCD_DRIVER_VERSION "0.2"
74
75struct ufs_hba;
76
77enum dev_cmd_type {
78 DEV_CMD_TYPE_NOP = 0x0,
79 DEV_CMD_TYPE_QUERY = 0x1,
80};
81
82
83
84
85
86
87
88
89
90
91
92struct uic_command {
93 u32 command;
94 u32 argument1;
95 u32 argument2;
96 u32 argument3;
97 int cmd_active;
98 int result;
99 struct completion done;
100};
101
102
103enum ufs_pm_op {
104 UFS_RUNTIME_PM,
105 UFS_SYSTEM_PM,
106 UFS_SHUTDOWN_PM,
107};
108
109#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
110#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
111#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
112
113
114enum uic_link_state {
115 UIC_LINK_OFF_STATE = 0,
116 UIC_LINK_ACTIVE_STATE = 1,
117 UIC_LINK_HIBERN8_STATE = 2,
118};
119
120#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
121#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
122 UIC_LINK_ACTIVE_STATE)
123#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
124 UIC_LINK_HIBERN8_STATE)
125#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
126#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
127 UIC_LINK_ACTIVE_STATE)
128#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
129 UIC_LINK_HIBERN8_STATE)
130
131
132
133
134
135enum ufs_pm_level {
136 UFS_PM_LVL_0,
137 UFS_PM_LVL_1,
138 UFS_PM_LVL_2,
139 UFS_PM_LVL_3,
140 UFS_PM_LVL_4,
141 UFS_PM_LVL_5,
142 UFS_PM_LVL_MAX
143};
144
145struct ufs_pm_lvl_states {
146 enum ufs_dev_pwr_mode dev_state;
147 enum uic_link_state link_state;
148};
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172struct ufshcd_lrb {
173 struct utp_transfer_req_desc *utr_descriptor_ptr;
174 struct utp_upiu_req *ucd_req_ptr;
175 struct utp_upiu_rsp *ucd_rsp_ptr;
176 struct ufshcd_sg_entry *ucd_prdt_ptr;
177
178 dma_addr_t utrd_dma_addr;
179 dma_addr_t ucd_req_dma_addr;
180 dma_addr_t ucd_rsp_dma_addr;
181 dma_addr_t ucd_prdt_dma_addr;
182
183 struct scsi_cmnd *cmd;
184 u8 *sense_buffer;
185 unsigned int sense_bufflen;
186 int scsi_status;
187
188 int command_type;
189 int task_tag;
190 u8 lun;
191 bool intr_cmd;
192 ktime_t issue_time_stamp;
193 ktime_t compl_time_stamp;
194
195 bool req_abort_skip;
196};
197
198
199
200
201
202
203
204struct ufs_query {
205 struct ufs_query_req request;
206 u8 *descriptor;
207 struct ufs_query_res response;
208};
209
210
211
212
213
214
215
216
217struct ufs_dev_cmd {
218 enum dev_cmd_type type;
219 struct mutex lock;
220 struct completion *complete;
221 wait_queue_head_t tag_wq;
222 struct ufs_query query;
223};
224
225struct ufs_desc_size {
226 int dev_desc;
227 int pwr_desc;
228 int geom_desc;
229 int interc_desc;
230 int unit_desc;
231 int conf_desc;
232 int hlth_desc;
233};
234
235
236
237
238
239
240
241
242
243
244
245struct ufs_clk_info {
246 struct list_head list;
247 struct clk *clk;
248 const char *name;
249 u32 max_freq;
250 u32 min_freq;
251 u32 curr_freq;
252 bool enabled;
253};
254
255enum ufs_notify_change_status {
256 PRE_CHANGE,
257 POST_CHANGE,
258};
259
260struct ufs_pa_layer_attr {
261 u32 gear_rx;
262 u32 gear_tx;
263 u32 lane_rx;
264 u32 lane_tx;
265 u32 pwr_rx;
266 u32 pwr_tx;
267 u32 hs_rate;
268};
269
270struct ufs_pwr_mode_info {
271 bool is_valid;
272 struct ufs_pa_layer_attr info;
273};
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct ufs_hba_variant_ops {
304 const char *name;
305 int (*init)(struct ufs_hba *);
306 void (*exit)(struct ufs_hba *);
307 u32 (*get_ufs_hci_version)(struct ufs_hba *);
308 int (*clk_scale_notify)(struct ufs_hba *, bool,
309 enum ufs_notify_change_status);
310 int (*setup_clocks)(struct ufs_hba *, bool,
311 enum ufs_notify_change_status);
312 int (*setup_regulators)(struct ufs_hba *, bool);
313 int (*hce_enable_notify)(struct ufs_hba *,
314 enum ufs_notify_change_status);
315 int (*link_startup_notify)(struct ufs_hba *,
316 enum ufs_notify_change_status);
317 int (*pwr_change_notify)(struct ufs_hba *,
318 enum ufs_notify_change_status status,
319 struct ufs_pa_layer_attr *,
320 struct ufs_pa_layer_attr *);
321 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
322 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
323 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
324 enum ufs_notify_change_status);
325 int (*apply_dev_quirks)(struct ufs_hba *);
326 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
327 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
328 void (*dbg_register_dump)(struct ufs_hba *hba);
329 int (*phy_initialization)(struct ufs_hba *);
330 void (*device_reset)(struct ufs_hba *hba);
331};
332
333
334enum clk_gating_state {
335 CLKS_OFF,
336 CLKS_ON,
337 REQ_CLKS_OFF,
338 REQ_CLKS_ON,
339};
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357struct ufs_clk_gating {
358 struct delayed_work gate_work;
359 struct work_struct ungate_work;
360 enum clk_gating_state state;
361 unsigned long delay_ms;
362 bool is_suspended;
363 struct device_attribute delay_attr;
364 struct device_attribute enable_attr;
365 bool is_enabled;
366 int active_reqs;
367 struct workqueue_struct *clk_gating_workq;
368};
369
370struct ufs_saved_pwr_info {
371 struct ufs_pa_layer_attr info;
372 bool is_valid;
373};
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393struct ufs_clk_scaling {
394 int active_reqs;
395 unsigned long tot_busy_t;
396 unsigned long window_start_t;
397 ktime_t busy_start_t;
398 struct device_attribute enable_attr;
399 struct ufs_saved_pwr_info saved_pwr_info;
400 struct workqueue_struct *workq;
401 struct work_struct suspend_work;
402 struct work_struct resume_work;
403 bool is_allowed;
404 bool is_busy_started;
405 bool is_suspended;
406};
407
408
409
410
411
412
413struct ufs_init_prefetch {
414 u32 icc_level;
415};
416
417#define UFS_ERR_REG_HIST_LENGTH 8
418
419
420
421
422
423
424struct ufs_err_reg_hist {
425 int pos;
426 u32 reg[UFS_ERR_REG_HIST_LENGTH];
427 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
428};
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450struct ufs_stats {
451 u32 hibern8_exit_cnt;
452 ktime_t last_hibern8_exit_tstamp;
453
454
455 struct ufs_err_reg_hist pa_err;
456 struct ufs_err_reg_hist dl_err;
457 struct ufs_err_reg_hist nl_err;
458 struct ufs_err_reg_hist tl_err;
459 struct ufs_err_reg_hist dme_err;
460
461
462 struct ufs_err_reg_hist auto_hibern8_err;
463 struct ufs_err_reg_hist fatal_err;
464 struct ufs_err_reg_hist link_startup_err;
465 struct ufs_err_reg_hist resume_err;
466 struct ufs_err_reg_hist suspend_err;
467
468
469 struct ufs_err_reg_hist dev_reset;
470 struct ufs_err_reg_hist host_reset;
471 struct ufs_err_reg_hist task_abort;
472};
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529struct ufs_hba {
530 void __iomem *mmio_base;
531
532
533 struct utp_transfer_cmd_desc *ucdl_base_addr;
534 struct utp_transfer_req_desc *utrdl_base_addr;
535 struct utp_task_req_desc *utmrdl_base_addr;
536
537
538 dma_addr_t ucdl_dma_addr;
539 dma_addr_t utrdl_dma_addr;
540 dma_addr_t utmrdl_dma_addr;
541
542 struct Scsi_Host *host;
543 struct device *dev;
544
545
546
547
548 struct scsi_device *sdev_ufs_device;
549
550 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
551 enum uic_link_state uic_link_state;
552
553 enum ufs_pm_level rpm_lvl;
554
555 enum ufs_pm_level spm_lvl;
556 struct device_attribute rpm_lvl_attr;
557 struct device_attribute spm_lvl_attr;
558 int pm_op_in_progress;
559
560
561 u32 ahit;
562
563 struct ufshcd_lrb *lrb;
564 unsigned long lrb_in_use;
565
566 unsigned long outstanding_tasks;
567 unsigned long outstanding_reqs;
568
569 u32 capabilities;
570 int nutrs;
571 int nutmrs;
572 u32 ufs_version;
573 const struct ufs_hba_variant_ops *vops;
574 void *priv;
575 unsigned int irq;
576 bool is_irq_enabled;
577 enum ufs_ref_clk_freq dev_ref_clk_freq;
578
579
580 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
581
582
583
584
585
586 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
587
588
589
590
591
592
593
594
595 #define UFSHCD_QUIRK_BROKEN_LCC 0x4
596
597
598
599
600
601
602 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
603
604
605
606
607
608
609 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
610
611
612
613
614
615
616
617 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
618
619
620
621
622
623 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
624
625
626
627
628 #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
629
630
631
632
633
634 #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
635
636
637
638
639
640 #define UFSHCI_QUIRK_BROKEN_HCE 0x400
641 unsigned int quirks;
642
643
644 unsigned int dev_quirks;
645
646 wait_queue_head_t tm_wq;
647 wait_queue_head_t tm_tag_wq;
648 unsigned long tm_condition;
649 unsigned long tm_slots_in_use;
650
651 struct uic_command *active_uic_cmd;
652 struct mutex uic_cmd_mutex;
653 struct completion *uic_async_done;
654
655 u32 ufshcd_state;
656 u32 eh_flags;
657 u32 intr_mask;
658 u16 ee_ctrl_mask;
659 bool is_powered;
660 bool is_init_prefetch;
661 struct ufs_init_prefetch init_prefetch_data;
662
663
664 struct work_struct eh_work;
665 struct work_struct eeh_work;
666
667
668 u32 errors;
669 u32 uic_error;
670 u32 saved_err;
671 u32 saved_uic_err;
672 struct ufs_stats ufs_stats;
673
674
675 struct ufs_dev_cmd dev_cmd;
676 ktime_t last_dme_cmd_tstamp;
677
678
679 struct ufs_dev_info dev_info;
680 bool auto_bkops_enabled;
681 struct ufs_vreg_info vreg_info;
682 struct list_head clk_list_head;
683
684 bool wlun_dev_clr_ua;
685
686
687 int req_abort_count;
688
689
690 u32 lanes_per_direction;
691 struct ufs_pa_layer_attr pwr_info;
692 struct ufs_pwr_mode_info max_pwr_info;
693
694 struct ufs_clk_gating clk_gating;
695
696 u32 caps;
697
698#define UFSHCD_CAP_CLK_GATING (1 << 0)
699
700#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
701
702#define UFSHCD_CAP_CLK_SCALING (1 << 2)
703
704#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
705
706
707
708
709
710#define UFSHCD_CAP_INTR_AGGR (1 << 4)
711
712
713
714
715
716
717
718#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
719
720 struct devfreq *devfreq;
721 struct ufs_clk_scaling clk_scaling;
722 bool is_sys_suspended;
723
724 enum bkops_status urgent_bkops_lvl;
725 bool is_urgent_bkops_lvl_checked;
726
727 struct rw_semaphore clk_scaling_lock;
728 struct ufs_desc_size desc_size;
729 atomic_t scsi_block_reqs_cnt;
730
731 struct device bsg_dev;
732 struct request_queue *bsg_queue;
733};
734
735
736static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
737{
738 return hba->caps & UFSHCD_CAP_CLK_GATING;
739}
740static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
741{
742 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
743}
744static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
745{
746 return hba->caps & UFSHCD_CAP_CLK_SCALING;
747}
748static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
749{
750 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
751}
752
753static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
754{
755
756#ifndef CONFIG_SCSI_UFS_DWC
757 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
758 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
759 return true;
760 else
761 return false;
762#else
763return true;
764#endif
765}
766
767static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
768{
769 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
770}
771
772#define ufshcd_writel(hba, val, reg) \
773 writel((val), (hba)->mmio_base + (reg))
774#define ufshcd_readl(hba, reg) \
775 readl((hba)->mmio_base + (reg))
776
777
778
779
780
781
782
783
784static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
785{
786 u32 tmp;
787
788 tmp = ufshcd_readl(hba, reg);
789 tmp &= ~mask;
790 tmp |= (val & mask);
791 ufshcd_writel(hba, tmp, reg);
792}
793
794int ufshcd_alloc_host(struct device *, struct ufs_hba **);
795void ufshcd_dealloc_host(struct ufs_hba *);
796int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
797void ufshcd_remove(struct ufs_hba *);
798int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
799 u32 val, unsigned long interval_us,
800 unsigned long timeout_ms, bool can_sleep);
801void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
802
803static inline void check_upiu_size(void)
804{
805 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
806 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
807}
808
809
810
811
812
813
814static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
815{
816 BUG_ON(!hba);
817 hba->priv = variant;
818}
819
820
821
822
823
824static inline void *ufshcd_get_variant(struct ufs_hba *hba)
825{
826 BUG_ON(!hba);
827 return hba->priv;
828}
829static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
830 struct ufs_hba *hba)
831{
832 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
833}
834
835extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
836extern int ufshcd_runtime_resume(struct ufs_hba *hba);
837extern int ufshcd_runtime_idle(struct ufs_hba *hba);
838extern int ufshcd_system_suspend(struct ufs_hba *hba);
839extern int ufshcd_system_resume(struct ufs_hba *hba);
840extern int ufshcd_shutdown(struct ufs_hba *hba);
841extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
842 u8 attr_set, u32 mib_val, u8 peer);
843extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
844 u32 *mib_val, u8 peer);
845extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
846 struct ufs_pa_layer_attr *desired_pwr_mode);
847
848
849#define DME_LOCAL 0
850#define DME_PEER 1
851#define ATTR_SET_NOR 0
852#define ATTR_SET_ST 1
853
854static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
855 u32 mib_val)
856{
857 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
858 mib_val, DME_LOCAL);
859}
860
861static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
862 u32 mib_val)
863{
864 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
865 mib_val, DME_LOCAL);
866}
867
868static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
869 u32 mib_val)
870{
871 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
872 mib_val, DME_PEER);
873}
874
875static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
876 u32 mib_val)
877{
878 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
879 mib_val, DME_PEER);
880}
881
882static inline int ufshcd_dme_get(struct ufs_hba *hba,
883 u32 attr_sel, u32 *mib_val)
884{
885 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
886}
887
888static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
889 u32 attr_sel, u32 *mib_val)
890{
891 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
892}
893
894static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
895{
896 return (pwr_info->pwr_rx == FAST_MODE ||
897 pwr_info->pwr_rx == FASTAUTO_MODE) &&
898 (pwr_info->pwr_tx == FAST_MODE ||
899 pwr_info->pwr_tx == FASTAUTO_MODE);
900}
901
902
903int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
904 enum query_opcode opcode,
905 enum desc_idn idn, u8 index,
906 u8 selector,
907 u8 *desc_buf, int *buf_len);
908int ufshcd_read_desc_param(struct ufs_hba *hba,
909 enum desc_idn desc_id,
910 int desc_index,
911 u8 param_offset,
912 u8 *param_read_buf,
913 u8 param_size);
914int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
915 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
916int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
917 enum flag_idn idn, bool *flag_res);
918
919#define SD_ASCII_STD true
920#define SD_RAW false
921int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
922 u8 **buf, bool ascii);
923
924int ufshcd_hold(struct ufs_hba *hba, bool async);
925void ufshcd_release(struct ufs_hba *hba);
926
927int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
928 int *desc_length);
929
930u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
931
932int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
933
934int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
935 struct utp_upiu_req *req_upiu,
936 struct utp_upiu_req *rsp_upiu,
937 int msgcode,
938 u8 *desc_buff, int *buff_len,
939 enum query_opcode desc_op);
940
941
942static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
943{
944 if (hba->vops)
945 return hba->vops->name;
946 return "";
947}
948
949static inline int ufshcd_vops_init(struct ufs_hba *hba)
950{
951 if (hba->vops && hba->vops->init)
952 return hba->vops->init(hba);
953
954 return 0;
955}
956
957static inline void ufshcd_vops_exit(struct ufs_hba *hba)
958{
959 if (hba->vops && hba->vops->exit)
960 return hba->vops->exit(hba);
961}
962
963static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
964{
965 if (hba->vops && hba->vops->get_ufs_hci_version)
966 return hba->vops->get_ufs_hci_version(hba);
967
968 return ufshcd_readl(hba, REG_UFS_VERSION);
969}
970
971static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
972 bool up, enum ufs_notify_change_status status)
973{
974 if (hba->vops && hba->vops->clk_scale_notify)
975 return hba->vops->clk_scale_notify(hba, up, status);
976 return 0;
977}
978
979static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
980 enum ufs_notify_change_status status)
981{
982 if (hba->vops && hba->vops->setup_clocks)
983 return hba->vops->setup_clocks(hba, on, status);
984 return 0;
985}
986
987static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
988{
989 if (hba->vops && hba->vops->setup_regulators)
990 return hba->vops->setup_regulators(hba, status);
991
992 return 0;
993}
994
995static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
996 bool status)
997{
998 if (hba->vops && hba->vops->hce_enable_notify)
999 return hba->vops->hce_enable_notify(hba, status);
1000
1001 return 0;
1002}
1003static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
1004 bool status)
1005{
1006 if (hba->vops && hba->vops->link_startup_notify)
1007 return hba->vops->link_startup_notify(hba, status);
1008
1009 return 0;
1010}
1011
1012static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
1013 bool status,
1014 struct ufs_pa_layer_attr *dev_max_params,
1015 struct ufs_pa_layer_attr *dev_req_params)
1016{
1017 if (hba->vops && hba->vops->pwr_change_notify)
1018 return hba->vops->pwr_change_notify(hba, status,
1019 dev_max_params, dev_req_params);
1020
1021 return -ENOTSUPP;
1022}
1023
1024static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
1025 bool is_scsi_cmd)
1026{
1027 if (hba->vops && hba->vops->setup_xfer_req)
1028 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
1029}
1030
1031static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1032 int tag, u8 tm_function)
1033{
1034 if (hba->vops && hba->vops->setup_task_mgmt)
1035 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1036}
1037
1038static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1039 enum uic_cmd_dme cmd,
1040 enum ufs_notify_change_status status)
1041{
1042 if (hba->vops && hba->vops->hibern8_notify)
1043 return hba->vops->hibern8_notify(hba, cmd, status);
1044}
1045
1046static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
1047{
1048 if (hba->vops && hba->vops->apply_dev_quirks)
1049 return hba->vops->apply_dev_quirks(hba);
1050 return 0;
1051}
1052
1053static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1054{
1055 if (hba->vops && hba->vops->suspend)
1056 return hba->vops->suspend(hba, op);
1057
1058 return 0;
1059}
1060
1061static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1062{
1063 if (hba->vops && hba->vops->resume)
1064 return hba->vops->resume(hba, op);
1065
1066 return 0;
1067}
1068
1069static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1070{
1071 if (hba->vops && hba->vops->dbg_register_dump)
1072 hba->vops->dbg_register_dump(hba);
1073}
1074
1075static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
1076{
1077 if (hba->vops && hba->vops->device_reset)
1078 hba->vops->device_reset(hba);
1079}
1080
1081extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1082
1083
1084
1085
1086
1087
1088
1089static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1090{
1091 if (scsi_is_wlun(scsi_lun))
1092 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1093 | UFS_UPIU_WLUN_ID;
1094 else
1095 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1096}
1097
1098int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1099 const char *prefix);
1100
1101#endif
1102