1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef _UFSHCD_H
38#define _UFSHCD_H
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/init.h>
43#include <linux/interrupt.h>
44#include <linux/io.h>
45#include <linux/delay.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/workqueue.h>
49#include <linux/errno.h>
50#include <linux/types.h>
51#include <linux/wait.h>
52#include <linux/bitops.h>
53#include <linux/pm_runtime.h>
54#include <linux/clk.h>
55#include <linux/completion.h>
56#include <linux/regulator/consumer.h>
57#include "unipro.h"
58
59#include <asm/irq.h>
60#include <asm/byteorder.h>
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_dbg.h>
66#include <scsi/scsi_eh.h>
67
68#include "ufs.h"
69#include "ufshci.h"
70
71#define UFSHCD "ufshcd"
72#define UFSHCD_DRIVER_VERSION "0.2"
73
74struct ufs_hba;
75
76enum dev_cmd_type {
77 DEV_CMD_TYPE_NOP = 0x0,
78 DEV_CMD_TYPE_QUERY = 0x1,
79};
80
81
82
83
84
85
86
87
88
89
90
91struct uic_command {
92 u32 command;
93 u32 argument1;
94 u32 argument2;
95 u32 argument3;
96 int cmd_active;
97 int result;
98 struct completion done;
99};
100
101
102enum ufs_pm_op {
103 UFS_RUNTIME_PM,
104 UFS_SYSTEM_PM,
105 UFS_SHUTDOWN_PM,
106};
107
108#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
109#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
110#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
111
112
113enum uic_link_state {
114 UIC_LINK_OFF_STATE = 0,
115 UIC_LINK_ACTIVE_STATE = 1,
116 UIC_LINK_HIBERN8_STATE = 2,
117};
118
119#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
120#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
121 UIC_LINK_ACTIVE_STATE)
122#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
123 UIC_LINK_HIBERN8_STATE)
124#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
125#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
126 UIC_LINK_ACTIVE_STATE)
127#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
128 UIC_LINK_HIBERN8_STATE)
129
130
131
132
133
134enum ufs_pm_level {
135 UFS_PM_LVL_0,
136 UFS_PM_LVL_1,
137 UFS_PM_LVL_2,
138 UFS_PM_LVL_3,
139 UFS_PM_LVL_4,
140 UFS_PM_LVL_5,
141 UFS_PM_LVL_MAX
142};
143
144struct ufs_pm_lvl_states {
145 enum ufs_dev_pwr_mode dev_state;
146 enum uic_link_state link_state;
147};
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct ufshcd_lrb {
165 struct utp_transfer_req_desc *utr_descriptor_ptr;
166 struct utp_upiu_req *ucd_req_ptr;
167 struct utp_upiu_rsp *ucd_rsp_ptr;
168 struct ufshcd_sg_entry *ucd_prdt_ptr;
169
170 struct scsi_cmnd *cmd;
171 u8 *sense_buffer;
172 unsigned int sense_bufflen;
173 int scsi_status;
174
175 int command_type;
176 int task_tag;
177 u8 lun;
178 bool intr_cmd;
179};
180
181
182
183
184
185
186
187struct ufs_query {
188 struct ufs_query_req request;
189 u8 *descriptor;
190 struct ufs_query_res response;
191};
192
193
194
195
196
197
198
199
200struct ufs_dev_cmd {
201 enum dev_cmd_type type;
202 struct mutex lock;
203 struct completion *complete;
204 wait_queue_head_t tag_wq;
205 struct ufs_query query;
206};
207
208
209
210
211
212
213
214
215
216
217
218struct ufs_clk_info {
219 struct list_head list;
220 struct clk *clk;
221 const char *name;
222 u32 max_freq;
223 u32 min_freq;
224 u32 curr_freq;
225 bool enabled;
226};
227
228enum ufs_notify_change_status {
229 PRE_CHANGE,
230 POST_CHANGE,
231};
232
233struct ufs_pa_layer_attr {
234 u32 gear_rx;
235 u32 gear_tx;
236 u32 lane_rx;
237 u32 lane_tx;
238 u32 pwr_rx;
239 u32 pwr_tx;
240 u32 hs_rate;
241};
242
243struct ufs_pwr_mode_info {
244 bool is_valid;
245 struct ufs_pa_layer_attr info;
246};
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct ufs_hba_variant_ops {
269 const char *name;
270 int (*init)(struct ufs_hba *);
271 void (*exit)(struct ufs_hba *);
272 u32 (*get_ufs_hci_version)(struct ufs_hba *);
273 int (*clk_scale_notify)(struct ufs_hba *, bool,
274 enum ufs_notify_change_status);
275 int (*setup_clocks)(struct ufs_hba *, bool);
276 int (*setup_regulators)(struct ufs_hba *, bool);
277 int (*hce_enable_notify)(struct ufs_hba *,
278 enum ufs_notify_change_status);
279 int (*link_startup_notify)(struct ufs_hba *,
280 enum ufs_notify_change_status);
281 int (*pwr_change_notify)(struct ufs_hba *,
282 enum ufs_notify_change_status status,
283 struct ufs_pa_layer_attr *,
284 struct ufs_pa_layer_attr *);
285 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
286 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
287 void (*dbg_register_dump)(struct ufs_hba *hba);
288};
289
290
291enum clk_gating_state {
292 CLKS_OFF,
293 CLKS_ON,
294 REQ_CLKS_OFF,
295 REQ_CLKS_ON,
296};
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312struct ufs_clk_gating {
313 struct delayed_work gate_work;
314 struct work_struct ungate_work;
315 enum clk_gating_state state;
316 unsigned long delay_ms;
317 bool is_suspended;
318 struct device_attribute delay_attr;
319 int active_reqs;
320};
321
322struct ufs_clk_scaling {
323 ktime_t busy_start_t;
324 bool is_busy_started;
325 unsigned long tot_busy_t;
326 unsigned long window_start_t;
327};
328
329
330
331
332
333
334struct ufs_init_prefetch {
335 u32 icc_level;
336};
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391struct ufs_hba {
392 void __iomem *mmio_base;
393
394
395 struct utp_transfer_cmd_desc *ucdl_base_addr;
396 struct utp_transfer_req_desc *utrdl_base_addr;
397 struct utp_task_req_desc *utmrdl_base_addr;
398
399
400 dma_addr_t ucdl_dma_addr;
401 dma_addr_t utrdl_dma_addr;
402 dma_addr_t utmrdl_dma_addr;
403
404 struct Scsi_Host *host;
405 struct device *dev;
406
407
408
409
410 struct scsi_device *sdev_ufs_device;
411
412 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
413 enum uic_link_state uic_link_state;
414
415 enum ufs_pm_level rpm_lvl;
416
417 enum ufs_pm_level spm_lvl;
418 int pm_op_in_progress;
419
420 struct ufshcd_lrb *lrb;
421 unsigned long lrb_in_use;
422
423 unsigned long outstanding_tasks;
424 unsigned long outstanding_reqs;
425
426 u32 capabilities;
427 int nutrs;
428 int nutmrs;
429 u32 ufs_version;
430 struct ufs_hba_variant_ops *vops;
431 void *priv;
432 unsigned int irq;
433 bool is_irq_enabled;
434
435
436 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
437
438
439
440
441
442 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
443
444
445
446
447
448
449
450
451 #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
452
453
454
455
456
457
458 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
459
460
461
462
463
464
465 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
466
467
468
469
470
471
472
473 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
474
475 unsigned int quirks;
476
477
478 unsigned int dev_quirks;
479
480 wait_queue_head_t tm_wq;
481 wait_queue_head_t tm_tag_wq;
482 unsigned long tm_condition;
483 unsigned long tm_slots_in_use;
484
485 struct uic_command *active_uic_cmd;
486 struct mutex uic_cmd_mutex;
487 struct completion *uic_async_done;
488
489 u32 ufshcd_state;
490 u32 eh_flags;
491 u32 intr_mask;
492 u16 ee_ctrl_mask;
493 bool is_powered;
494 bool is_init_prefetch;
495 struct ufs_init_prefetch init_prefetch_data;
496
497
498 struct work_struct eh_work;
499 struct work_struct eeh_work;
500
501
502 u32 errors;
503 u32 uic_error;
504 u32 saved_err;
505 u32 saved_uic_err;
506
507
508 struct ufs_dev_cmd dev_cmd;
509 ktime_t last_dme_cmd_tstamp;
510
511
512 struct ufs_dev_info dev_info;
513 bool auto_bkops_enabled;
514 struct ufs_vreg_info vreg_info;
515 struct list_head clk_list_head;
516
517 bool wlun_dev_clr_ua;
518
519
520 u32 lanes_per_direction;
521 struct ufs_pa_layer_attr pwr_info;
522 struct ufs_pwr_mode_info max_pwr_info;
523
524 struct ufs_clk_gating clk_gating;
525
526 u32 caps;
527
528#define UFSHCD_CAP_CLK_GATING (1 << 0)
529
530#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
531
532#define UFSHCD_CAP_CLK_SCALING (1 << 2)
533
534#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
535
536
537
538
539
540#define UFSHCD_CAP_INTR_AGGR (1 << 4)
541
542 struct devfreq *devfreq;
543 struct ufs_clk_scaling clk_scaling;
544 bool is_sys_suspended;
545
546 enum bkops_status urgent_bkops_lvl;
547 bool is_urgent_bkops_lvl_checked;
548};
549
550
551static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
552{
553 return hba->caps & UFSHCD_CAP_CLK_GATING;
554}
555static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
556{
557 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
558}
559static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
560{
561 return hba->caps & UFSHCD_CAP_CLK_SCALING;
562}
563static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
564{
565 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
566}
567
568static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
569{
570 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
571 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
572 return true;
573 else
574 return false;
575}
576
577#define ufshcd_writel(hba, val, reg) \
578 writel((val), (hba)->mmio_base + (reg))
579#define ufshcd_readl(hba, reg) \
580 readl((hba)->mmio_base + (reg))
581
582
583
584
585
586
587
588
589static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
590{
591 u32 tmp;
592
593 tmp = ufshcd_readl(hba, reg);
594 tmp &= ~mask;
595 tmp |= (val & mask);
596 ufshcd_writel(hba, tmp, reg);
597}
598
599int ufshcd_alloc_host(struct device *, struct ufs_hba **);
600void ufshcd_dealloc_host(struct ufs_hba *);
601int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
602void ufshcd_remove(struct ufs_hba *);
603int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
604 u32 val, unsigned long interval_us,
605 unsigned long timeout_ms, bool can_sleep);
606
607static inline void check_upiu_size(void)
608{
609 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
610 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
611}
612
613
614
615
616
617
618static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
619{
620 BUG_ON(!hba);
621 hba->priv = variant;
622}
623
624
625
626
627
628static inline void *ufshcd_get_variant(struct ufs_hba *hba)
629{
630 BUG_ON(!hba);
631 return hba->priv;
632}
633
634extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
635extern int ufshcd_runtime_resume(struct ufs_hba *hba);
636extern int ufshcd_runtime_idle(struct ufs_hba *hba);
637extern int ufshcd_system_suspend(struct ufs_hba *hba);
638extern int ufshcd_system_resume(struct ufs_hba *hba);
639extern int ufshcd_shutdown(struct ufs_hba *hba);
640extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
641 u8 attr_set, u32 mib_val, u8 peer);
642extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
643 u32 *mib_val, u8 peer);
644
645
646#define DME_LOCAL 0
647#define DME_PEER 1
648#define ATTR_SET_NOR 0
649#define ATTR_SET_ST 1
650
651static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
652 u32 mib_val)
653{
654 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
655 mib_val, DME_LOCAL);
656}
657
658static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
659 u32 mib_val)
660{
661 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
662 mib_val, DME_LOCAL);
663}
664
665static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
666 u32 mib_val)
667{
668 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
669 mib_val, DME_PEER);
670}
671
672static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
673 u32 mib_val)
674{
675 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
676 mib_val, DME_PEER);
677}
678
679static inline int ufshcd_dme_get(struct ufs_hba *hba,
680 u32 attr_sel, u32 *mib_val)
681{
682 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
683}
684
685static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
686 u32 attr_sel, u32 *mib_val)
687{
688 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
689}
690
691int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
692
693static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
694{
695 return (pwr_info->pwr_rx == FAST_MODE ||
696 pwr_info->pwr_rx == FASTAUTO_MODE) &&
697 (pwr_info->pwr_tx == FAST_MODE ||
698 pwr_info->pwr_tx == FASTAUTO_MODE);
699}
700
701#define ASCII_STD true
702
703int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
704 u32 size, bool ascii);
705
706
707int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
708 enum flag_idn idn, bool *flag_res);
709int ufshcd_hold(struct ufs_hba *hba, bool async);
710void ufshcd_release(struct ufs_hba *hba);
711u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
712
713
714static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
715{
716 if (hba->vops)
717 return hba->vops->name;
718 return "";
719}
720
721static inline int ufshcd_vops_init(struct ufs_hba *hba)
722{
723 if (hba->vops && hba->vops->init)
724 return hba->vops->init(hba);
725
726 return 0;
727}
728
729static inline void ufshcd_vops_exit(struct ufs_hba *hba)
730{
731 if (hba->vops && hba->vops->exit)
732 return hba->vops->exit(hba);
733}
734
735static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
736{
737 if (hba->vops && hba->vops->get_ufs_hci_version)
738 return hba->vops->get_ufs_hci_version(hba);
739
740 return ufshcd_readl(hba, REG_UFS_VERSION);
741}
742
743static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
744 bool up, enum ufs_notify_change_status status)
745{
746 if (hba->vops && hba->vops->clk_scale_notify)
747 return hba->vops->clk_scale_notify(hba, up, status);
748 return 0;
749}
750
751static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
752{
753 if (hba->vops && hba->vops->setup_clocks)
754 return hba->vops->setup_clocks(hba, on);
755 return 0;
756}
757
758static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
759{
760 if (hba->vops && hba->vops->setup_regulators)
761 return hba->vops->setup_regulators(hba, status);
762
763 return 0;
764}
765
766static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
767 bool status)
768{
769 if (hba->vops && hba->vops->hce_enable_notify)
770 return hba->vops->hce_enable_notify(hba, status);
771
772 return 0;
773}
774static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
775 bool status)
776{
777 if (hba->vops && hba->vops->link_startup_notify)
778 return hba->vops->link_startup_notify(hba, status);
779
780 return 0;
781}
782
783static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
784 bool status,
785 struct ufs_pa_layer_attr *dev_max_params,
786 struct ufs_pa_layer_attr *dev_req_params)
787{
788 if (hba->vops && hba->vops->pwr_change_notify)
789 return hba->vops->pwr_change_notify(hba, status,
790 dev_max_params, dev_req_params);
791
792 return -ENOTSUPP;
793}
794
795static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
796{
797 if (hba->vops && hba->vops->suspend)
798 return hba->vops->suspend(hba, op);
799
800 return 0;
801}
802
803static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
804{
805 if (hba->vops && hba->vops->resume)
806 return hba->vops->resume(hba, op);
807
808 return 0;
809}
810
811static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
812{
813 if (hba->vops && hba->vops->dbg_register_dump)
814 hba->vops->dbg_register_dump(hba);
815}
816
817#endif
818