1
2
3
4
5
6
7
8
9
10
11
12#ifndef _UFSHCD_H
13#define _UFSHCD_H
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/rwsem.h>
24#include <linux/workqueue.h>
25#include <linux/errno.h>
26#include <linux/types.h>
27#include <linux/wait.h>
28#include <linux/bitops.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <linux/completion.h>
32#include <linux/regulator/consumer.h>
33#include <linux/bitfield.h>
34#include <linux/devfreq.h>
35#include <linux/keyslot-manager.h>
36#include "unipro.h"
37
38#include <asm/irq.h>
39#include <asm/byteorder.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_dbg.h>
45#include <scsi/scsi_eh.h>
46
47#include "ufs.h"
48#include "ufs_quirks.h"
49#include "ufshci.h"
50
51#define UFSHCD "ufshcd"
52#define UFSHCD_DRIVER_VERSION "0.2"
53
54struct ufs_hba;
55
56enum dev_cmd_type {
57 DEV_CMD_TYPE_NOP = 0x0,
58 DEV_CMD_TYPE_QUERY = 0x1,
59};
60
61enum ufs_event_type {
62
63 UFS_EVT_PA_ERR = 0,
64 UFS_EVT_DL_ERR,
65 UFS_EVT_NL_ERR,
66 UFS_EVT_TL_ERR,
67 UFS_EVT_DME_ERR,
68
69
70 UFS_EVT_AUTO_HIBERN8_ERR,
71 UFS_EVT_FATAL_ERR,
72 UFS_EVT_LINK_STARTUP_FAIL,
73 UFS_EVT_RESUME_ERR,
74 UFS_EVT_SUSPEND_ERR,
75 UFS_EVT_WL_SUSP_ERR,
76 UFS_EVT_WL_RES_ERR,
77
78
79 UFS_EVT_DEV_RESET,
80 UFS_EVT_HOST_RESET,
81 UFS_EVT_ABORT,
82
83 UFS_EVT_CNT,
84};
85
86
87
88
89
90
91
92
93
94
95struct uic_command {
96 u32 command;
97 u32 argument1;
98 u32 argument2;
99 u32 argument3;
100 int cmd_active;
101 struct completion done;
102};
103
104
105enum ufs_pm_op {
106 UFS_RUNTIME_PM,
107 UFS_SYSTEM_PM,
108 UFS_SHUTDOWN_PM,
109};
110
111
112enum uic_link_state {
113 UIC_LINK_OFF_STATE = 0,
114 UIC_LINK_ACTIVE_STATE = 1,
115 UIC_LINK_HIBERN8_STATE = 2,
116 UIC_LINK_BROKEN_STATE = 3,
117};
118
119#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
120#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
121 UIC_LINK_ACTIVE_STATE)
122#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
123 UIC_LINK_HIBERN8_STATE)
124#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
125 UIC_LINK_BROKEN_STATE)
126#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
127#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
128 UIC_LINK_ACTIVE_STATE)
129#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
130 UIC_LINK_HIBERN8_STATE)
131#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
132 UIC_LINK_BROKEN_STATE)
133
134#define ufshcd_set_ufs_dev_active(h) \
135 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
136#define ufshcd_set_ufs_dev_sleep(h) \
137 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
138#define ufshcd_set_ufs_dev_poweroff(h) \
139 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
140#define ufshcd_set_ufs_dev_deepsleep(h) \
141 ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
142#define ufshcd_is_ufs_dev_active(h) \
143 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
144#define ufshcd_is_ufs_dev_sleep(h) \
145 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
146#define ufshcd_is_ufs_dev_poweroff(h) \
147 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
148#define ufshcd_is_ufs_dev_deepsleep(h) \
149 ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
150
151
152
153
154
155
156
157enum ufs_pm_level {
158 UFS_PM_LVL_0,
159 UFS_PM_LVL_1,
160 UFS_PM_LVL_2,
161 UFS_PM_LVL_3,
162 UFS_PM_LVL_4,
163 UFS_PM_LVL_5,
164 UFS_PM_LVL_6,
165 UFS_PM_LVL_MAX
166};
167
168struct ufs_pm_lvl_states {
169 enum ufs_dev_pwr_mode dev_state;
170 enum uic_link_state link_state;
171};
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct ufshcd_lrb {
198 struct utp_transfer_req_desc *utr_descriptor_ptr;
199 struct utp_upiu_req *ucd_req_ptr;
200 struct utp_upiu_rsp *ucd_rsp_ptr;
201 struct ufshcd_sg_entry *ucd_prdt_ptr;
202
203 dma_addr_t utrd_dma_addr;
204 dma_addr_t ucd_req_dma_addr;
205 dma_addr_t ucd_rsp_dma_addr;
206 dma_addr_t ucd_prdt_dma_addr;
207
208 struct scsi_cmnd *cmd;
209 u8 *sense_buffer;
210 unsigned int sense_bufflen;
211 int scsi_status;
212
213 int command_type;
214 int task_tag;
215 u8 lun;
216 bool intr_cmd;
217 ktime_t issue_time_stamp;
218 ktime_t compl_time_stamp;
219#ifdef CONFIG_SCSI_UFS_CRYPTO
220 int crypto_key_slot;
221 u64 data_unit_num;
222#endif
223
224 bool req_abort_skip;
225};
226
227
228
229
230
231
232
233struct ufs_query {
234 struct ufs_query_req request;
235 u8 *descriptor;
236 struct ufs_query_res response;
237};
238
239
240
241
242
243
244
245struct ufs_dev_cmd {
246 enum dev_cmd_type type;
247 struct mutex lock;
248 struct completion *complete;
249 struct ufs_query query;
250};
251
252
253
254
255
256
257
258
259
260
261
262
263
264struct ufs_clk_info {
265 struct list_head list;
266 struct clk *clk;
267 const char *name;
268 u32 max_freq;
269 u32 min_freq;
270 u32 curr_freq;
271 bool keep_link_active;
272 bool enabled;
273};
274
275enum ufs_notify_change_status {
276 PRE_CHANGE,
277 POST_CHANGE,
278};
279
280struct ufs_pa_layer_attr {
281 u32 gear_rx;
282 u32 gear_tx;
283 u32 lane_rx;
284 u32 lane_tx;
285 u32 pwr_rx;
286 u32 pwr_tx;
287 u32 hs_rate;
288};
289
290struct ufs_pwr_mode_info {
291 bool is_valid;
292 struct ufs_pa_layer_attr info;
293};
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct ufs_hba_variant_ops {
325 const char *name;
326 int (*init)(struct ufs_hba *);
327 void (*exit)(struct ufs_hba *);
328 u32 (*get_ufs_hci_version)(struct ufs_hba *);
329 int (*clk_scale_notify)(struct ufs_hba *, bool,
330 enum ufs_notify_change_status);
331 int (*setup_clocks)(struct ufs_hba *, bool,
332 enum ufs_notify_change_status);
333 int (*hce_enable_notify)(struct ufs_hba *,
334 enum ufs_notify_change_status);
335 int (*link_startup_notify)(struct ufs_hba *,
336 enum ufs_notify_change_status);
337 int (*pwr_change_notify)(struct ufs_hba *,
338 enum ufs_notify_change_status status,
339 struct ufs_pa_layer_attr *,
340 struct ufs_pa_layer_attr *);
341 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
342 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
343 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
344 enum ufs_notify_change_status);
345 int (*apply_dev_quirks)(struct ufs_hba *hba);
346 void (*fixup_dev_quirks)(struct ufs_hba *hba);
347 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
348 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
349 void (*dbg_register_dump)(struct ufs_hba *hba);
350 int (*phy_initialization)(struct ufs_hba *);
351 int (*device_reset)(struct ufs_hba *hba);
352 void (*config_scaling_param)(struct ufs_hba *hba,
353 struct devfreq_dev_profile *profile,
354 void *data);
355 int (*program_key)(struct ufs_hba *hba,
356 const union ufs_crypto_cfg_entry *cfg, int slot);
357 void (*event_notify)(struct ufs_hba *hba,
358 enum ufs_event_type evt, void *data);
359};
360
361
362enum clk_gating_state {
363 CLKS_OFF,
364 CLKS_ON,
365 REQ_CLKS_OFF,
366 REQ_CLKS_ON,
367};
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386struct ufs_clk_gating {
387 struct delayed_work gate_work;
388 struct work_struct ungate_work;
389 enum clk_gating_state state;
390 unsigned long delay_ms;
391 bool is_suspended;
392 struct device_attribute delay_attr;
393 struct device_attribute enable_attr;
394 bool is_enabled;
395 bool is_initialized;
396 int active_reqs;
397 struct workqueue_struct *clk_gating_workq;
398};
399
400struct ufs_saved_pwr_info {
401 struct ufs_pa_layer_attr info;
402 bool is_valid;
403};
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428struct ufs_clk_scaling {
429 int active_reqs;
430 unsigned long tot_busy_t;
431 ktime_t window_start_t;
432 ktime_t busy_start_t;
433 struct device_attribute enable_attr;
434 struct ufs_saved_pwr_info saved_pwr_info;
435 struct workqueue_struct *workq;
436 struct work_struct suspend_work;
437 struct work_struct resume_work;
438 u32 min_gear;
439 bool is_enabled;
440 bool is_allowed;
441 bool is_initialized;
442 bool is_busy_started;
443 bool is_suspended;
444};
445
446#define UFS_EVENT_HIST_LENGTH 8
447
448
449
450
451
452
453
454struct ufs_event_hist {
455 int pos;
456 u32 val[UFS_EVENT_HIST_LENGTH];
457 ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
458 unsigned long long cnt;
459};
460
461
462
463
464
465
466
467
468
469
470struct ufs_stats {
471 u32 last_intr_status;
472 ktime_t last_intr_ts;
473
474 u32 hibern8_exit_cnt;
475 ktime_t last_hibern8_exit_tstamp;
476 struct ufs_event_hist event[UFS_EVT_CNT];
477};
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492enum ufshcd_state {
493 UFSHCD_STATE_RESET,
494 UFSHCD_STATE_OPERATIONAL,
495 UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
496 UFSHCD_STATE_EH_SCHEDULED_FATAL,
497 UFSHCD_STATE_ERROR,
498};
499
500enum ufshcd_quirks {
501
502 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
503
504
505
506
507
508 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
509
510
511
512
513
514
515
516
517 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
518
519
520
521
522
523
524 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
525
526
527
528
529
530
531 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
532
533
534
535
536
537
538
539 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
540
541
542
543
544 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
545
546
547
548
549
550 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
551
552
553
554
555
556 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
557
558
559
560
561
562 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
563
564
565
566
567
568 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
569
570
571
572
573
574 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
575
576
577
578
579 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
580
581
582
583
584
585 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
586
587
588
589
590 UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
591};
592
593enum ufshcd_caps {
594
595 UFSHCD_CAP_CLK_GATING = 1 << 0,
596
597
598 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
599
600
601 UFSHCD_CAP_CLK_SCALING = 1 << 2,
602
603
604 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
605
606
607
608
609
610
611 UFSHCD_CAP_INTR_AGGR = 1 << 4,
612
613
614
615
616
617
618
619
620 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
621
622
623
624
625
626
627 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
628
629
630
631
632
633
634 UFSHCD_CAP_WB_EN = 1 << 7,
635
636
637
638
639
640 UFSHCD_CAP_CRYPTO = 1 << 8,
641
642
643
644
645
646
647 UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
648
649
650
651
652
653
654
655 UFSHCD_CAP_DEEPSLEEP = 1 << 10,
656};
657
658struct ufs_hba_variant_params {
659 struct devfreq_dev_profile devfreq_profile;
660 struct devfreq_simple_ondemand_data ondemand_data;
661 u16 hba_enable_delay_us;
662 u32 wb_flush_threshold;
663};
664
665#ifdef CONFIG_SCSI_UFS_HPB
666
667
668
669
670
671
672
673
674
675
676
677
678struct ufshpb_dev_info {
679 int num_lu;
680 int rgn_size;
681 int srgn_size;
682 atomic_t slave_conf_cnt;
683 bool hpb_disabled;
684 u8 max_hpb_single_cmd;
685 bool is_legacy;
686 u8 control_mode;
687};
688#endif
689
690struct ufs_hba_monitor {
691 unsigned long chunk_size;
692
693 unsigned long nr_sec_rw[2];
694 ktime_t total_busy[2];
695
696 unsigned long nr_req[2];
697
698 ktime_t lat_sum[2];
699 ktime_t lat_max[2];
700 ktime_t lat_min[2];
701
702 u32 nr_queued[2];
703 ktime_t busy_start_ts[2];
704
705 ktime_t enabled_ts;
706 bool enabled;
707};
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771struct ufs_hba {
772 void __iomem *mmio_base;
773
774
775 struct utp_transfer_cmd_desc *ucdl_base_addr;
776 struct utp_transfer_req_desc *utrdl_base_addr;
777 struct utp_task_req_desc *utmrdl_base_addr;
778
779
780 dma_addr_t ucdl_dma_addr;
781 dma_addr_t utrdl_dma_addr;
782 dma_addr_t utmrdl_dma_addr;
783
784 struct Scsi_Host *host;
785 struct device *dev;
786 struct request_queue *cmd_queue;
787
788
789
790
791 struct scsi_device *sdev_ufs_device;
792 struct scsi_device *sdev_rpmb;
793
794 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
795 enum uic_link_state uic_link_state;
796
797 enum ufs_pm_level rpm_lvl;
798
799 enum ufs_pm_level spm_lvl;
800 struct device_attribute rpm_lvl_attr;
801 struct device_attribute spm_lvl_attr;
802 int pm_op_in_progress;
803
804
805 u32 ahit;
806
807 struct ufshcd_lrb *lrb;
808
809 unsigned long outstanding_tasks;
810 spinlock_t outstanding_lock;
811 unsigned long outstanding_reqs;
812
813 u32 capabilities;
814 int nutrs;
815 int nutmrs;
816 u32 ufs_version;
817 const struct ufs_hba_variant_ops *vops;
818 struct ufs_hba_variant_params *vps;
819 void *priv;
820 unsigned int irq;
821 bool is_irq_enabled;
822 enum ufs_ref_clk_freq dev_ref_clk_freq;
823
824 unsigned int quirks;
825
826
827 unsigned int dev_quirks;
828
829 struct blk_mq_tag_set tmf_tag_set;
830 struct request_queue *tmf_queue;
831 struct request **tmf_rqs;
832
833 struct uic_command *active_uic_cmd;
834 struct mutex uic_cmd_mutex;
835 struct completion *uic_async_done;
836
837 enum ufshcd_state ufshcd_state;
838 u32 eh_flags;
839 u32 intr_mask;
840 u16 ee_ctrl_mask;
841 u16 ee_drv_mask;
842 u16 ee_usr_mask;
843 struct mutex ee_ctrl_mutex;
844 bool is_powered;
845 bool shutting_down;
846 struct semaphore host_sem;
847
848
849 struct workqueue_struct *eh_wq;
850 struct work_struct eh_work;
851 struct work_struct eeh_work;
852
853
854 u32 errors;
855 u32 uic_error;
856 u32 saved_err;
857 u32 saved_uic_err;
858 struct ufs_stats ufs_stats;
859 bool force_reset;
860 bool force_pmc;
861 bool silence_err_logs;
862
863
864 struct ufs_dev_cmd dev_cmd;
865 ktime_t last_dme_cmd_tstamp;
866 int nop_out_timeout;
867
868
869 struct ufs_dev_info dev_info;
870 bool auto_bkops_enabled;
871 struct ufs_vreg_info vreg_info;
872 struct list_head clk_list_head;
873
874 bool wlun_dev_clr_ua;
875 bool wlun_rpmb_clr_ua;
876
877
878 int req_abort_count;
879
880
881 u32 lanes_per_direction;
882 struct ufs_pa_layer_attr pwr_info;
883 struct ufs_pwr_mode_info max_pwr_info;
884
885 struct ufs_clk_gating clk_gating;
886
887 u32 caps;
888
889 struct devfreq *devfreq;
890 struct ufs_clk_scaling clk_scaling;
891 bool is_sys_suspended;
892
893 enum bkops_status urgent_bkops_lvl;
894 bool is_urgent_bkops_lvl_checked;
895
896 struct rw_semaphore clk_scaling_lock;
897 unsigned char desc_size[QUERY_DESC_IDN_MAX];
898 atomic_t scsi_block_reqs_cnt;
899
900 struct device bsg_dev;
901 struct request_queue *bsg_queue;
902 struct delayed_work rpm_dev_flush_recheck_work;
903
904#ifdef CONFIG_SCSI_UFS_HPB
905 struct ufshpb_dev_info ufshpb_dev;
906#endif
907
908 struct ufs_hba_monitor monitor;
909
910#ifdef CONFIG_SCSI_UFS_CRYPTO
911 union ufs_crypto_capabilities crypto_capabilities;
912 union ufs_crypto_cap_entry *crypto_cap_array;
913 u32 crypto_cfg_register;
914 struct blk_keyslot_manager ksm;
915#endif
916#ifdef CONFIG_DEBUG_FS
917 struct dentry *debugfs_root;
918 struct delayed_work debugfs_ee_work;
919 u32 debugfs_ee_rate_limit_ms;
920#endif
921 u32 luns_avail;
922 bool complete_put;
923 bool rpmb_complete_put;
924};
925
926
927static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
928{
929 return hba->caps & UFSHCD_CAP_CLK_GATING;
930}
931static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
932{
933 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
934}
935static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
936{
937 return hba->caps & UFSHCD_CAP_CLK_SCALING;
938}
939static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
940{
941 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
942}
943static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
944{
945 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
946}
947
948static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
949{
950 return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
951 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
952}
953
954static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
955{
956 return !!(ufshcd_is_link_hibern8(hba) &&
957 (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
958}
959
960static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
961{
962 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
963 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
964}
965
966static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
967{
968 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
969}
970
971static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
972{
973 return hba->caps & UFSHCD_CAP_WB_EN;
974}
975
976static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
977{
978 return !hba->shutting_down;
979}
980
981#define ufshcd_writel(hba, val, reg) \
982 writel((val), (hba)->mmio_base + (reg))
983#define ufshcd_readl(hba, reg) \
984 readl((hba)->mmio_base + (reg))
985
986
987
988
989
990
991
992
993static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
994{
995 u32 tmp;
996
997 tmp = ufshcd_readl(hba, reg);
998 tmp &= ~mask;
999 tmp |= (val & mask);
1000 ufshcd_writel(hba, tmp, reg);
1001}
1002
1003int ufshcd_alloc_host(struct device *, struct ufs_hba **);
1004void ufshcd_dealloc_host(struct ufs_hba *);
1005int ufshcd_hba_enable(struct ufs_hba *hba);
1006int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
1007int ufshcd_link_recovery(struct ufs_hba *hba);
1008int ufshcd_make_hba_operational(struct ufs_hba *hba);
1009void ufshcd_remove(struct ufs_hba *);
1010int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
1011void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
1012int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
1013 u32 val, unsigned long interval_us,
1014 unsigned long timeout_ms);
1015void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
1016void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
1017void ufshcd_hba_stop(struct ufs_hba *hba);
1018
1019static inline void check_upiu_size(void)
1020{
1021 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
1022 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
1023}
1024
1025
1026
1027
1028
1029
1030static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
1031{
1032 BUG_ON(!hba);
1033 hba->priv = variant;
1034}
1035
1036
1037
1038
1039
1040static inline void *ufshcd_get_variant(struct ufs_hba *hba)
1041{
1042 BUG_ON(!hba);
1043 return hba->priv;
1044}
1045static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
1046 struct ufs_hba *hba)
1047{
1048 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
1049}
1050
1051static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
1052{
1053 if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
1054 return hba->dev_info.wb_dedicated_lu;
1055 return 0;
1056}
1057
1058#ifdef CONFIG_PM
1059extern int ufshcd_runtime_suspend(struct device *dev);
1060extern int ufshcd_runtime_resume(struct device *dev);
1061#endif
1062#ifdef CONFIG_PM_SLEEP
1063extern int ufshcd_system_suspend(struct device *dev);
1064extern int ufshcd_system_resume(struct device *dev);
1065#endif
1066extern int ufshcd_shutdown(struct ufs_hba *hba);
1067extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
1068 int agreed_gear,
1069 int adapt_val);
1070extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1071 u8 attr_set, u32 mib_val, u8 peer);
1072extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1073 u32 *mib_val, u8 peer);
1074extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
1075 struct ufs_pa_layer_attr *desired_pwr_mode);
1076
1077
1078#define DME_LOCAL 0
1079#define DME_PEER 1
1080#define ATTR_SET_NOR 0
1081#define ATTR_SET_ST 1
1082
1083static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
1084 u32 mib_val)
1085{
1086 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1087 mib_val, DME_LOCAL);
1088}
1089
1090static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
1091 u32 mib_val)
1092{
1093 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1094 mib_val, DME_LOCAL);
1095}
1096
1097static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
1098 u32 mib_val)
1099{
1100 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1101 mib_val, DME_PEER);
1102}
1103
1104static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
1105 u32 mib_val)
1106{
1107 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1108 mib_val, DME_PEER);
1109}
1110
1111static inline int ufshcd_dme_get(struct ufs_hba *hba,
1112 u32 attr_sel, u32 *mib_val)
1113{
1114 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
1115}
1116
1117static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
1118 u32 attr_sel, u32 *mib_val)
1119{
1120 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
1121}
1122
1123static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
1124{
1125 return (pwr_info->pwr_rx == FAST_MODE ||
1126 pwr_info->pwr_rx == FASTAUTO_MODE) &&
1127 (pwr_info->pwr_tx == FAST_MODE ||
1128 pwr_info->pwr_tx == FASTAUTO_MODE);
1129}
1130
1131static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
1132{
1133 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
1134}
1135
1136
1137int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1138 enum query_opcode opcode,
1139 enum desc_idn idn, u8 index,
1140 u8 selector,
1141 u8 *desc_buf, int *buf_len);
1142int ufshcd_read_desc_param(struct ufs_hba *hba,
1143 enum desc_idn desc_id,
1144 int desc_index,
1145 u8 param_offset,
1146 u8 *param_read_buf,
1147 u8 param_size);
1148int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
1149 enum attr_idn idn, u8 index, u8 selector,
1150 u32 *attr_val);
1151int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1152 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
1153int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1154 enum flag_idn idn, u8 index, bool *flag_res);
1155
1156void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
1157void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
1158void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
1159#define SD_ASCII_STD true
1160#define SD_RAW false
1161int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
1162 u8 **buf, bool ascii);
1163
1164int ufshcd_hold(struct ufs_hba *hba, bool async);
1165void ufshcd_release(struct ufs_hba *hba);
1166
1167void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1168 int *desc_length);
1169
1170u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
1171
1172int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
1173
1174int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
1175 struct utp_upiu_req *req_upiu,
1176 struct utp_upiu_req *rsp_upiu,
1177 int msgcode,
1178 u8 *desc_buff, int *buff_len,
1179 enum query_opcode desc_op);
1180
1181int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
1182int ufshcd_suspend_prepare(struct device *dev);
1183void ufshcd_resume_complete(struct device *dev);
1184
1185
1186static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
1187{
1188 if (hba->vops)
1189 return hba->vops->name;
1190 return "";
1191}
1192
1193static inline int ufshcd_vops_init(struct ufs_hba *hba)
1194{
1195 if (hba->vops && hba->vops->init)
1196 return hba->vops->init(hba);
1197
1198 return 0;
1199}
1200
1201static inline void ufshcd_vops_exit(struct ufs_hba *hba)
1202{
1203 if (hba->vops && hba->vops->exit)
1204 return hba->vops->exit(hba);
1205}
1206
1207static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
1208{
1209 if (hba->vops && hba->vops->get_ufs_hci_version)
1210 return hba->vops->get_ufs_hci_version(hba);
1211
1212 return ufshcd_readl(hba, REG_UFS_VERSION);
1213}
1214
1215static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
1216 bool up, enum ufs_notify_change_status status)
1217{
1218 if (hba->vops && hba->vops->clk_scale_notify)
1219 return hba->vops->clk_scale_notify(hba, up, status);
1220 return 0;
1221}
1222
1223static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
1224 enum ufs_event_type evt,
1225 void *data)
1226{
1227 if (hba->vops && hba->vops->event_notify)
1228 hba->vops->event_notify(hba, evt, data);
1229}
1230
1231static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
1232 enum ufs_notify_change_status status)
1233{
1234 if (hba->vops && hba->vops->setup_clocks)
1235 return hba->vops->setup_clocks(hba, on, status);
1236 return 0;
1237}
1238
1239static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
1240 bool status)
1241{
1242 if (hba->vops && hba->vops->hce_enable_notify)
1243 return hba->vops->hce_enable_notify(hba, status);
1244
1245 return 0;
1246}
1247static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
1248 bool status)
1249{
1250 if (hba->vops && hba->vops->link_startup_notify)
1251 return hba->vops->link_startup_notify(hba, status);
1252
1253 return 0;
1254}
1255
1256static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
1257{
1258 if (hba->vops && hba->vops->phy_initialization)
1259 return hba->vops->phy_initialization(hba);
1260
1261 return 0;
1262}
1263
1264static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
1265 enum ufs_notify_change_status status,
1266 struct ufs_pa_layer_attr *dev_max_params,
1267 struct ufs_pa_layer_attr *dev_req_params)
1268{
1269 if (hba->vops && hba->vops->pwr_change_notify)
1270 return hba->vops->pwr_change_notify(hba, status,
1271 dev_max_params, dev_req_params);
1272
1273 return -ENOTSUPP;
1274}
1275
1276static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1277 int tag, u8 tm_function)
1278{
1279 if (hba->vops && hba->vops->setup_task_mgmt)
1280 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1281}
1282
1283static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1284 enum uic_cmd_dme cmd,
1285 enum ufs_notify_change_status status)
1286{
1287 if (hba->vops && hba->vops->hibern8_notify)
1288 return hba->vops->hibern8_notify(hba, cmd, status);
1289}
1290
1291static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
1292{
1293 if (hba->vops && hba->vops->apply_dev_quirks)
1294 return hba->vops->apply_dev_quirks(hba);
1295 return 0;
1296}
1297
1298static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
1299{
1300 if (hba->vops && hba->vops->fixup_dev_quirks)
1301 hba->vops->fixup_dev_quirks(hba);
1302}
1303
1304static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1305{
1306 if (hba->vops && hba->vops->suspend)
1307 return hba->vops->suspend(hba, op);
1308
1309 return 0;
1310}
1311
1312static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1313{
1314 if (hba->vops && hba->vops->resume)
1315 return hba->vops->resume(hba, op);
1316
1317 return 0;
1318}
1319
1320static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1321{
1322 if (hba->vops && hba->vops->dbg_register_dump)
1323 hba->vops->dbg_register_dump(hba);
1324}
1325
1326static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
1327{
1328 if (hba->vops && hba->vops->device_reset)
1329 return hba->vops->device_reset(hba);
1330
1331 return -EOPNOTSUPP;
1332}
1333
1334static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
1335 struct devfreq_dev_profile
1336 *profile, void *data)
1337{
1338 if (hba->vops && hba->vops->config_scaling_param)
1339 hba->vops->config_scaling_param(hba, profile, data);
1340}
1341
1342extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1343
1344
1345
1346
1347
1348
1349
1350static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1351{
1352 if (scsi_is_wlun(scsi_lun))
1353 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1354 | UFS_UPIU_WLUN_ID;
1355 else
1356 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1357}
1358
1359int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1360 const char *prefix);
1361
1362int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
1363int ufshcd_write_ee_control(struct ufs_hba *hba);
1364int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
1365 u16 set, u16 clr);
1366
1367static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
1368 u16 set, u16 clr)
1369{
1370 return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
1371 &hba->ee_usr_mask, set, clr);
1372}
1373
1374static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
1375 u16 set, u16 clr)
1376{
1377 return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
1378 &hba->ee_drv_mask, set, clr);
1379}
1380
1381static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
1382{
1383 return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
1384}
1385
1386static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
1387{
1388 return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
1389}
1390
1391static inline int ufshcd_rpm_put(struct ufs_hba *hba)
1392{
1393 return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
1394}
1395
1396static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
1397{
1398 return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
1399}
1400
1401static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
1402{
1403 return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
1404}
1405
1406#endif
1407