1
2
3
4
5
6
7
8
9
10
11
12#ifndef _UFSHCD_H
13#define _UFSHCD_H
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/rwsem.h>
24#include <linux/workqueue.h>
25#include <linux/errno.h>
26#include <linux/types.h>
27#include <linux/wait.h>
28#include <linux/bitops.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <linux/completion.h>
32#include <linux/regulator/consumer.h>
33#include <linux/bitfield.h>
34#include <linux/devfreq.h>
35#include <linux/keyslot-manager.h>
36#include "unipro.h"
37
38#include <asm/irq.h>
39#include <asm/byteorder.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_dbg.h>
45#include <scsi/scsi_eh.h>
46
47#include "ufs.h"
48#include "ufs_quirks.h"
49#include "ufshci.h"
50
51#define UFSHCD "ufshcd"
52#define UFSHCD_DRIVER_VERSION "0.2"
53
54struct ufs_hba;
55
56enum dev_cmd_type {
57 DEV_CMD_TYPE_NOP = 0x0,
58 DEV_CMD_TYPE_QUERY = 0x1,
59};
60
61
62
63
64
65
66
67
68
69
70struct uic_command {
71 u32 command;
72 u32 argument1;
73 u32 argument2;
74 u32 argument3;
75 int cmd_active;
76 struct completion done;
77};
78
79
80enum ufs_pm_op {
81 UFS_RUNTIME_PM,
82 UFS_SYSTEM_PM,
83 UFS_SHUTDOWN_PM,
84};
85
86#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
87#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
88#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
89
90
91enum uic_link_state {
92 UIC_LINK_OFF_STATE = 0,
93 UIC_LINK_ACTIVE_STATE = 1,
94 UIC_LINK_HIBERN8_STATE = 2,
95 UIC_LINK_BROKEN_STATE = 3,
96};
97
98#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
99#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
100 UIC_LINK_ACTIVE_STATE)
101#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
102 UIC_LINK_HIBERN8_STATE)
103#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
104 UIC_LINK_BROKEN_STATE)
105#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
106#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
107 UIC_LINK_ACTIVE_STATE)
108#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
109 UIC_LINK_HIBERN8_STATE)
110#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
111 UIC_LINK_BROKEN_STATE)
112
113#define ufshcd_set_ufs_dev_active(h) \
114 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
115#define ufshcd_set_ufs_dev_sleep(h) \
116 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
117#define ufshcd_set_ufs_dev_poweroff(h) \
118 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
119#define ufshcd_is_ufs_dev_active(h) \
120 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
121#define ufshcd_is_ufs_dev_sleep(h) \
122 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
123#define ufshcd_is_ufs_dev_poweroff(h) \
124 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
125
126
127
128
129
130enum ufs_pm_level {
131 UFS_PM_LVL_0,
132 UFS_PM_LVL_1,
133 UFS_PM_LVL_2,
134 UFS_PM_LVL_3,
135 UFS_PM_LVL_4,
136 UFS_PM_LVL_5,
137 UFS_PM_LVL_MAX
138};
139
140struct ufs_pm_lvl_states {
141 enum ufs_dev_pwr_mode dev_state;
142 enum uic_link_state link_state;
143};
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169struct ufshcd_lrb {
170 struct utp_transfer_req_desc *utr_descriptor_ptr;
171 struct utp_upiu_req *ucd_req_ptr;
172 struct utp_upiu_rsp *ucd_rsp_ptr;
173 struct ufshcd_sg_entry *ucd_prdt_ptr;
174
175 dma_addr_t utrd_dma_addr;
176 dma_addr_t ucd_req_dma_addr;
177 dma_addr_t ucd_rsp_dma_addr;
178 dma_addr_t ucd_prdt_dma_addr;
179
180 struct scsi_cmnd *cmd;
181 u8 *sense_buffer;
182 unsigned int sense_bufflen;
183 int scsi_status;
184
185 int command_type;
186 int task_tag;
187 u8 lun;
188 bool intr_cmd;
189 ktime_t issue_time_stamp;
190 ktime_t compl_time_stamp;
191#ifdef CONFIG_SCSI_UFS_CRYPTO
192 int crypto_key_slot;
193 u64 data_unit_num;
194#endif
195
196 bool req_abort_skip;
197};
198
199
200
201
202
203
204
205struct ufs_query {
206 struct ufs_query_req request;
207 u8 *descriptor;
208 struct ufs_query_res response;
209};
210
211
212
213
214
215
216
217struct ufs_dev_cmd {
218 enum dev_cmd_type type;
219 struct mutex lock;
220 struct completion *complete;
221 struct ufs_query query;
222};
223
224
225
226
227
228
229
230
231
232
233
234struct ufs_clk_info {
235 struct list_head list;
236 struct clk *clk;
237 const char *name;
238 u32 max_freq;
239 u32 min_freq;
240 u32 curr_freq;
241 bool enabled;
242};
243
244enum ufs_notify_change_status {
245 PRE_CHANGE,
246 POST_CHANGE,
247};
248
249struct ufs_pa_layer_attr {
250 u32 gear_rx;
251 u32 gear_tx;
252 u32 lane_rx;
253 u32 lane_tx;
254 u32 pwr_rx;
255 u32 pwr_tx;
256 u32 hs_rate;
257};
258
259struct ufs_pwr_mode_info {
260 bool is_valid;
261 struct ufs_pa_layer_attr info;
262};
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293struct ufs_hba_variant_ops {
294 const char *name;
295 int (*init)(struct ufs_hba *);
296 void (*exit)(struct ufs_hba *);
297 u32 (*get_ufs_hci_version)(struct ufs_hba *);
298 int (*clk_scale_notify)(struct ufs_hba *, bool,
299 enum ufs_notify_change_status);
300 int (*setup_clocks)(struct ufs_hba *, bool,
301 enum ufs_notify_change_status);
302 int (*setup_regulators)(struct ufs_hba *, bool);
303 int (*hce_enable_notify)(struct ufs_hba *,
304 enum ufs_notify_change_status);
305 int (*link_startup_notify)(struct ufs_hba *,
306 enum ufs_notify_change_status);
307 int (*pwr_change_notify)(struct ufs_hba *,
308 enum ufs_notify_change_status status,
309 struct ufs_pa_layer_attr *,
310 struct ufs_pa_layer_attr *);
311 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
312 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
313 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
314 enum ufs_notify_change_status);
315 int (*apply_dev_quirks)(struct ufs_hba *hba);
316 void (*fixup_dev_quirks)(struct ufs_hba *hba);
317 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
318 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
319 void (*dbg_register_dump)(struct ufs_hba *hba);
320 int (*phy_initialization)(struct ufs_hba *);
321 void (*device_reset)(struct ufs_hba *hba);
322 void (*config_scaling_param)(struct ufs_hba *hba,
323 struct devfreq_dev_profile *profile,
324 void *data);
325 int (*program_key)(struct ufs_hba *hba,
326 const union ufs_crypto_cfg_entry *cfg, int slot);
327};
328
329
330enum clk_gating_state {
331 CLKS_OFF,
332 CLKS_ON,
333 REQ_CLKS_OFF,
334 REQ_CLKS_ON,
335};
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353struct ufs_clk_gating {
354 struct delayed_work gate_work;
355 struct work_struct ungate_work;
356 enum clk_gating_state state;
357 unsigned long delay_ms;
358 bool is_suspended;
359 struct device_attribute delay_attr;
360 struct device_attribute enable_attr;
361 bool is_enabled;
362 int active_reqs;
363 struct workqueue_struct *clk_gating_workq;
364};
365
366struct ufs_saved_pwr_info {
367 struct ufs_pa_layer_attr info;
368 bool is_valid;
369};
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389struct ufs_clk_scaling {
390 int active_reqs;
391 unsigned long tot_busy_t;
392 ktime_t window_start_t;
393 ktime_t busy_start_t;
394 struct device_attribute enable_attr;
395 struct ufs_saved_pwr_info saved_pwr_info;
396 struct workqueue_struct *workq;
397 struct work_struct suspend_work;
398 struct work_struct resume_work;
399 bool is_allowed;
400 bool is_busy_started;
401 bool is_suspended;
402};
403
404#define UFS_ERR_REG_HIST_LENGTH 8
405
406
407
408
409
410
411struct ufs_err_reg_hist {
412 int pos;
413 u32 reg[UFS_ERR_REG_HIST_LENGTH];
414 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
415};
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439struct ufs_stats {
440 u32 last_intr_status;
441 ktime_t last_intr_ts;
442
443 u32 hibern8_exit_cnt;
444 ktime_t last_hibern8_exit_tstamp;
445
446
447 struct ufs_err_reg_hist pa_err;
448 struct ufs_err_reg_hist dl_err;
449 struct ufs_err_reg_hist nl_err;
450 struct ufs_err_reg_hist tl_err;
451 struct ufs_err_reg_hist dme_err;
452
453
454 struct ufs_err_reg_hist auto_hibern8_err;
455 struct ufs_err_reg_hist fatal_err;
456 struct ufs_err_reg_hist link_startup_err;
457 struct ufs_err_reg_hist resume_err;
458 struct ufs_err_reg_hist suspend_err;
459
460
461 struct ufs_err_reg_hist dev_reset;
462 struct ufs_err_reg_hist host_reset;
463 struct ufs_err_reg_hist task_abort;
464};
465
466enum ufshcd_quirks {
467
468 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
469
470
471
472
473
474 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
475
476
477
478
479
480
481
482
483 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
484
485
486
487
488
489
490 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
491
492
493
494
495
496
497 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
498
499
500
501
502
503
504
505 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
506
507
508
509
510 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
511
512
513
514
515
516 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
517
518
519
520
521
522 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
523
524
525
526
527
528 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
529
530
531
532
533
534 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
535
536
537
538
539
540 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
541
542
543
544
545 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
546
547};
548
549enum ufshcd_caps {
550
551 UFSHCD_CAP_CLK_GATING = 1 << 0,
552
553
554 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
555
556
557 UFSHCD_CAP_CLK_SCALING = 1 << 2,
558
559
560 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
561
562
563
564
565
566
567 UFSHCD_CAP_INTR_AGGR = 1 << 4,
568
569
570
571
572
573
574
575
576 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
577
578
579
580
581
582
583 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
584
585
586
587
588
589
590 UFSHCD_CAP_WB_EN = 1 << 7,
591
592
593
594
595
596 UFSHCD_CAP_CRYPTO = 1 << 8,
597};
598
599struct ufs_hba_variant_params {
600 struct devfreq_dev_profile devfreq_profile;
601 struct devfreq_simple_ondemand_data ondemand_data;
602 u16 hba_enable_delay_us;
603 u32 wb_flush_threshold;
604};
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665struct ufs_hba {
666 void __iomem *mmio_base;
667
668
669 struct utp_transfer_cmd_desc *ucdl_base_addr;
670 struct utp_transfer_req_desc *utrdl_base_addr;
671 struct utp_task_req_desc *utmrdl_base_addr;
672
673
674 dma_addr_t ucdl_dma_addr;
675 dma_addr_t utrdl_dma_addr;
676 dma_addr_t utmrdl_dma_addr;
677
678 struct Scsi_Host *host;
679 struct device *dev;
680 struct request_queue *cmd_queue;
681
682
683
684
685 struct scsi_device *sdev_ufs_device;
686
687 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
688 enum uic_link_state uic_link_state;
689
690 enum ufs_pm_level rpm_lvl;
691
692 enum ufs_pm_level spm_lvl;
693 struct device_attribute rpm_lvl_attr;
694 struct device_attribute spm_lvl_attr;
695 int pm_op_in_progress;
696
697
698 u32 ahit;
699
700 struct ufshcd_lrb *lrb;
701
702 unsigned long outstanding_tasks;
703 unsigned long outstanding_reqs;
704
705 u32 capabilities;
706 int nutrs;
707 int nutmrs;
708 u32 ufs_version;
709 const struct ufs_hba_variant_ops *vops;
710 struct ufs_hba_variant_params *vps;
711 void *priv;
712 unsigned int irq;
713 bool is_irq_enabled;
714 enum ufs_ref_clk_freq dev_ref_clk_freq;
715
716 unsigned int quirks;
717
718
719 unsigned int dev_quirks;
720
721 struct blk_mq_tag_set tmf_tag_set;
722 struct request_queue *tmf_queue;
723
724 struct uic_command *active_uic_cmd;
725 struct mutex uic_cmd_mutex;
726 struct completion *uic_async_done;
727
728 u32 ufshcd_state;
729 u32 eh_flags;
730 u32 intr_mask;
731 u16 ee_ctrl_mask;
732 bool is_powered;
733
734
735 struct workqueue_struct *eh_wq;
736 struct work_struct eh_work;
737 struct work_struct eeh_work;
738
739
740 u32 errors;
741 u32 uic_error;
742 u32 saved_err;
743 u32 saved_uic_err;
744 struct ufs_stats ufs_stats;
745 bool force_reset;
746 bool force_pmc;
747 bool silence_err_logs;
748
749
750 struct ufs_dev_cmd dev_cmd;
751 ktime_t last_dme_cmd_tstamp;
752
753
754 struct ufs_dev_info dev_info;
755 bool auto_bkops_enabled;
756 struct ufs_vreg_info vreg_info;
757 struct list_head clk_list_head;
758
759 bool wlun_dev_clr_ua;
760
761
762 int req_abort_count;
763
764
765 u32 lanes_per_direction;
766 struct ufs_pa_layer_attr pwr_info;
767 struct ufs_pwr_mode_info max_pwr_info;
768
769 struct ufs_clk_gating clk_gating;
770
771 u32 caps;
772
773 struct devfreq *devfreq;
774 struct ufs_clk_scaling clk_scaling;
775 bool is_sys_suspended;
776
777 enum bkops_status urgent_bkops_lvl;
778 bool is_urgent_bkops_lvl_checked;
779
780 struct rw_semaphore clk_scaling_lock;
781 unsigned char desc_size[QUERY_DESC_IDN_MAX];
782 atomic_t scsi_block_reqs_cnt;
783
784 struct device bsg_dev;
785 struct request_queue *bsg_queue;
786 bool wb_buf_flush_enabled;
787 bool wb_enabled;
788 struct delayed_work rpm_dev_flush_recheck_work;
789
790#ifdef CONFIG_SCSI_UFS_CRYPTO
791 union ufs_crypto_capabilities crypto_capabilities;
792 union ufs_crypto_cap_entry *crypto_cap_array;
793 u32 crypto_cfg_register;
794 struct blk_keyslot_manager ksm;
795#endif
796};
797
798
799static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
800{
801 return hba->caps & UFSHCD_CAP_CLK_GATING;
802}
803static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
804{
805 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
806}
807static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
808{
809 return hba->caps & UFSHCD_CAP_CLK_SCALING;
810}
811static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
812{
813 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
814}
815static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
816{
817 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
818}
819
820static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
821{
822
823#ifndef CONFIG_SCSI_UFS_DWC
824 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
825 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
826 return true;
827 else
828 return false;
829#else
830return true;
831#endif
832}
833
834static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
835{
836 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
837 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
838}
839
840static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
841{
842 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
843}
844
845static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
846{
847 return hba->caps & UFSHCD_CAP_WB_EN;
848}
849
850#define ufshcd_writel(hba, val, reg) \
851 writel((val), (hba)->mmio_base + (reg))
852#define ufshcd_readl(hba, reg) \
853 readl((hba)->mmio_base + (reg))
854
855
856
857
858
859
860
861
862static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
863{
864 u32 tmp;
865
866 tmp = ufshcd_readl(hba, reg);
867 tmp &= ~mask;
868 tmp |= (val & mask);
869 ufshcd_writel(hba, tmp, reg);
870}
871
872int ufshcd_alloc_host(struct device *, struct ufs_hba **);
873void ufshcd_dealloc_host(struct ufs_hba *);
874int ufshcd_hba_enable(struct ufs_hba *hba);
875int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
876int ufshcd_link_recovery(struct ufs_hba *hba);
877int ufshcd_make_hba_operational(struct ufs_hba *hba);
878void ufshcd_remove(struct ufs_hba *);
879int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
880void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
881int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
882 u32 val, unsigned long interval_us,
883 unsigned long timeout_ms);
884void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
885void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
886 u32 reg);
887
888static inline void check_upiu_size(void)
889{
890 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
891 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
892}
893
894
895
896
897
898
899static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
900{
901 BUG_ON(!hba);
902 hba->priv = variant;
903}
904
905
906
907
908
909static inline void *ufshcd_get_variant(struct ufs_hba *hba)
910{
911 BUG_ON(!hba);
912 return hba->priv;
913}
914static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
915 struct ufs_hba *hba)
916{
917 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
918}
919
920static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
921{
922 if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
923 return hba->dev_info.wb_dedicated_lu;
924 return 0;
925}
926
927extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
928extern int ufshcd_runtime_resume(struct ufs_hba *hba);
929extern int ufshcd_runtime_idle(struct ufs_hba *hba);
930extern int ufshcd_system_suspend(struct ufs_hba *hba);
931extern int ufshcd_system_resume(struct ufs_hba *hba);
932extern int ufshcd_shutdown(struct ufs_hba *hba);
933extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
934 u8 attr_set, u32 mib_val, u8 peer);
935extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
936 u32 *mib_val, u8 peer);
937extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
938 struct ufs_pa_layer_attr *desired_pwr_mode);
939
940
941#define DME_LOCAL 0
942#define DME_PEER 1
943#define ATTR_SET_NOR 0
944#define ATTR_SET_ST 1
945
946static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
947 u32 mib_val)
948{
949 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
950 mib_val, DME_LOCAL);
951}
952
953static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
954 u32 mib_val)
955{
956 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
957 mib_val, DME_LOCAL);
958}
959
960static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
961 u32 mib_val)
962{
963 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
964 mib_val, DME_PEER);
965}
966
967static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
968 u32 mib_val)
969{
970 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
971 mib_val, DME_PEER);
972}
973
974static inline int ufshcd_dme_get(struct ufs_hba *hba,
975 u32 attr_sel, u32 *mib_val)
976{
977 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
978}
979
980static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
981 u32 attr_sel, u32 *mib_val)
982{
983 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
984}
985
986static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
987{
988 return (pwr_info->pwr_rx == FAST_MODE ||
989 pwr_info->pwr_rx == FASTAUTO_MODE) &&
990 (pwr_info->pwr_tx == FAST_MODE ||
991 pwr_info->pwr_tx == FASTAUTO_MODE);
992}
993
994static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
995{
996 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
997}
998
999
1000int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1001 enum query_opcode opcode,
1002 enum desc_idn idn, u8 index,
1003 u8 selector,
1004 u8 *desc_buf, int *buf_len);
1005int ufshcd_read_desc_param(struct ufs_hba *hba,
1006 enum desc_idn desc_id,
1007 int desc_index,
1008 u8 param_offset,
1009 u8 *param_read_buf,
1010 u8 param_size);
1011int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1012 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
1013int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1014 enum flag_idn idn, u8 index, bool *flag_res);
1015
1016void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
1017void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
1018void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
1019#define SD_ASCII_STD true
1020#define SD_RAW false
1021int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
1022 u8 **buf, bool ascii);
1023
1024int ufshcd_hold(struct ufs_hba *hba, bool async);
1025void ufshcd_release(struct ufs_hba *hba);
1026
1027void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1028 int *desc_length);
1029
1030u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
1031
1032int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
1033
1034int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
1035 struct utp_upiu_req *req_upiu,
1036 struct utp_upiu_req *rsp_upiu,
1037 int msgcode,
1038 u8 *desc_buff, int *buff_len,
1039 enum query_opcode desc_op);
1040
1041
1042static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
1043{
1044 if (hba->vops)
1045 return hba->vops->name;
1046 return "";
1047}
1048
1049static inline int ufshcd_vops_init(struct ufs_hba *hba)
1050{
1051 if (hba->vops && hba->vops->init)
1052 return hba->vops->init(hba);
1053
1054 return 0;
1055}
1056
1057static inline void ufshcd_vops_exit(struct ufs_hba *hba)
1058{
1059 if (hba->vops && hba->vops->exit)
1060 return hba->vops->exit(hba);
1061}
1062
1063static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
1064{
1065 if (hba->vops && hba->vops->get_ufs_hci_version)
1066 return hba->vops->get_ufs_hci_version(hba);
1067
1068 return ufshcd_readl(hba, REG_UFS_VERSION);
1069}
1070
1071static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
1072 bool up, enum ufs_notify_change_status status)
1073{
1074 if (hba->vops && hba->vops->clk_scale_notify)
1075 return hba->vops->clk_scale_notify(hba, up, status);
1076 return 0;
1077}
1078
1079static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
1080 enum ufs_notify_change_status status)
1081{
1082 if (hba->vops && hba->vops->setup_clocks)
1083 return hba->vops->setup_clocks(hba, on, status);
1084 return 0;
1085}
1086
1087static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
1088{
1089 if (hba->vops && hba->vops->setup_regulators)
1090 return hba->vops->setup_regulators(hba, status);
1091
1092 return 0;
1093}
1094
1095static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
1096 bool status)
1097{
1098 if (hba->vops && hba->vops->hce_enable_notify)
1099 return hba->vops->hce_enable_notify(hba, status);
1100
1101 return 0;
1102}
1103static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
1104 bool status)
1105{
1106 if (hba->vops && hba->vops->link_startup_notify)
1107 return hba->vops->link_startup_notify(hba, status);
1108
1109 return 0;
1110}
1111
1112static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
1113 bool status,
1114 struct ufs_pa_layer_attr *dev_max_params,
1115 struct ufs_pa_layer_attr *dev_req_params)
1116{
1117 if (hba->vops && hba->vops->pwr_change_notify)
1118 return hba->vops->pwr_change_notify(hba, status,
1119 dev_max_params, dev_req_params);
1120
1121 return -ENOTSUPP;
1122}
1123
1124static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
1125 bool is_scsi_cmd)
1126{
1127 if (hba->vops && hba->vops->setup_xfer_req)
1128 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
1129}
1130
1131static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1132 int tag, u8 tm_function)
1133{
1134 if (hba->vops && hba->vops->setup_task_mgmt)
1135 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1136}
1137
1138static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1139 enum uic_cmd_dme cmd,
1140 enum ufs_notify_change_status status)
1141{
1142 if (hba->vops && hba->vops->hibern8_notify)
1143 return hba->vops->hibern8_notify(hba, cmd, status);
1144}
1145
1146static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
1147{
1148 if (hba->vops && hba->vops->apply_dev_quirks)
1149 return hba->vops->apply_dev_quirks(hba);
1150 return 0;
1151}
1152
1153static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
1154{
1155 if (hba->vops && hba->vops->fixup_dev_quirks)
1156 hba->vops->fixup_dev_quirks(hba);
1157}
1158
1159static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1160{
1161 if (hba->vops && hba->vops->suspend)
1162 return hba->vops->suspend(hba, op);
1163
1164 return 0;
1165}
1166
1167static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1168{
1169 if (hba->vops && hba->vops->resume)
1170 return hba->vops->resume(hba, op);
1171
1172 return 0;
1173}
1174
1175static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1176{
1177 if (hba->vops && hba->vops->dbg_register_dump)
1178 hba->vops->dbg_register_dump(hba);
1179}
1180
1181static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
1182{
1183 if (hba->vops && hba->vops->device_reset) {
1184 hba->vops->device_reset(hba);
1185 ufshcd_set_ufs_dev_active(hba);
1186 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
1187 }
1188}
1189
1190static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
1191 struct devfreq_dev_profile
1192 *profile, void *data)
1193{
1194 if (hba->vops && hba->vops->config_scaling_param)
1195 hba->vops->config_scaling_param(hba, profile, data);
1196}
1197
1198extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1199
1200
1201
1202
1203
1204
1205
1206static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1207{
1208 if (scsi_is_wlun(scsi_lun))
1209 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1210 | UFS_UPIU_WLUN_ID;
1211 else
1212 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1213}
1214
1215int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1216 const char *prefix);
1217
1218#endif
1219