1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef _UFSHCD_H
38#define _UFSHCD_H
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/init.h>
43#include <linux/interrupt.h>
44#include <linux/io.h>
45#include <linux/delay.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/workqueue.h>
49#include <linux/errno.h>
50#include <linux/types.h>
51#include <linux/wait.h>
52#include <linux/bitops.h>
53#include <linux/pm_runtime.h>
54#include <linux/clk.h>
55#include <linux/completion.h>
56#include <linux/regulator/consumer.h>
57#include "unipro.h"
58
59#include <asm/irq.h>
60#include <asm/byteorder.h>
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_dbg.h>
66#include <scsi/scsi_eh.h>
67
68#include "ufs.h"
69#include "ufshci.h"
70
71#define UFSHCD "ufshcd"
72#define UFSHCD_DRIVER_VERSION "0.2"
73
74struct ufs_hba;
75
76enum dev_cmd_type {
77 DEV_CMD_TYPE_NOP = 0x0,
78 DEV_CMD_TYPE_QUERY = 0x1,
79};
80
81
82
83
84
85
86
87
88
89
90
91struct uic_command {
92 u32 command;
93 u32 argument1;
94 u32 argument2;
95 u32 argument3;
96 int cmd_active;
97 int result;
98 struct completion done;
99};
100
101
102enum ufs_pm_op {
103 UFS_RUNTIME_PM,
104 UFS_SYSTEM_PM,
105 UFS_SHUTDOWN_PM,
106};
107
108#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
109#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
110#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
111
112
113enum uic_link_state {
114 UIC_LINK_OFF_STATE = 0,
115 UIC_LINK_ACTIVE_STATE = 1,
116 UIC_LINK_HIBERN8_STATE = 2,
117};
118
119#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
120#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
121 UIC_LINK_ACTIVE_STATE)
122#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
123 UIC_LINK_HIBERN8_STATE)
124#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
125#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
126 UIC_LINK_ACTIVE_STATE)
127#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
128 UIC_LINK_HIBERN8_STATE)
129
130
131
132
133
134enum ufs_pm_level {
135 UFS_PM_LVL_0,
136 UFS_PM_LVL_1,
137 UFS_PM_LVL_2,
138 UFS_PM_LVL_3,
139 UFS_PM_LVL_4,
140 UFS_PM_LVL_5,
141 UFS_PM_LVL_MAX
142};
143
144struct ufs_pm_lvl_states {
145 enum ufs_dev_pwr_mode dev_state;
146 enum uic_link_state link_state;
147};
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct ufshcd_lrb {
165 struct utp_transfer_req_desc *utr_descriptor_ptr;
166 struct utp_upiu_req *ucd_req_ptr;
167 struct utp_upiu_rsp *ucd_rsp_ptr;
168 struct ufshcd_sg_entry *ucd_prdt_ptr;
169
170 struct scsi_cmnd *cmd;
171 u8 *sense_buffer;
172 unsigned int sense_bufflen;
173 int scsi_status;
174
175 int command_type;
176 int task_tag;
177 u8 lun;
178 bool intr_cmd;
179};
180
181
182
183
184
185
186
187struct ufs_query {
188 struct ufs_query_req request;
189 u8 *descriptor;
190 struct ufs_query_res response;
191};
192
193
194
195
196
197
198
199
200struct ufs_dev_cmd {
201 enum dev_cmd_type type;
202 struct mutex lock;
203 struct completion *complete;
204 wait_queue_head_t tag_wq;
205 struct ufs_query query;
206};
207
208
209
210
211
212
213
214
215
216
217
218struct ufs_clk_info {
219 struct list_head list;
220 struct clk *clk;
221 const char *name;
222 u32 max_freq;
223 u32 min_freq;
224 u32 curr_freq;
225 bool enabled;
226};
227
228enum ufs_notify_change_status {
229 PRE_CHANGE,
230 POST_CHANGE,
231};
232
233struct ufs_pa_layer_attr {
234 u32 gear_rx;
235 u32 gear_tx;
236 u32 lane_rx;
237 u32 lane_tx;
238 u32 pwr_rx;
239 u32 pwr_tx;
240 u32 hs_rate;
241};
242
243struct ufs_pwr_mode_info {
244 bool is_valid;
245 struct ufs_pa_layer_attr info;
246};
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269struct ufs_hba_variant_ops {
270 const char *name;
271 int (*init)(struct ufs_hba *);
272 void (*exit)(struct ufs_hba *);
273 u32 (*get_ufs_hci_version)(struct ufs_hba *);
274 int (*clk_scale_notify)(struct ufs_hba *, bool,
275 enum ufs_notify_change_status);
276 int (*setup_clocks)(struct ufs_hba *, bool);
277 int (*setup_regulators)(struct ufs_hba *, bool);
278 int (*hce_enable_notify)(struct ufs_hba *,
279 enum ufs_notify_change_status);
280 int (*link_startup_notify)(struct ufs_hba *,
281 enum ufs_notify_change_status);
282 int (*pwr_change_notify)(struct ufs_hba *,
283 enum ufs_notify_change_status status,
284 struct ufs_pa_layer_attr *,
285 struct ufs_pa_layer_attr *);
286 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
287 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
288 void (*dbg_register_dump)(struct ufs_hba *hba);
289 int (*phy_initialization)(struct ufs_hba *);
290};
291
292
293enum clk_gating_state {
294 CLKS_OFF,
295 CLKS_ON,
296 REQ_CLKS_OFF,
297 REQ_CLKS_ON,
298};
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314struct ufs_clk_gating {
315 struct delayed_work gate_work;
316 struct work_struct ungate_work;
317 enum clk_gating_state state;
318 unsigned long delay_ms;
319 bool is_suspended;
320 struct device_attribute delay_attr;
321 int active_reqs;
322};
323
324struct ufs_clk_scaling {
325 ktime_t busy_start_t;
326 bool is_busy_started;
327 unsigned long tot_busy_t;
328 unsigned long window_start_t;
329};
330
331
332
333
334
335
336struct ufs_init_prefetch {
337 u32 icc_level;
338};
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393struct ufs_hba {
394 void __iomem *mmio_base;
395
396
397 struct utp_transfer_cmd_desc *ucdl_base_addr;
398 struct utp_transfer_req_desc *utrdl_base_addr;
399 struct utp_task_req_desc *utmrdl_base_addr;
400
401
402 dma_addr_t ucdl_dma_addr;
403 dma_addr_t utrdl_dma_addr;
404 dma_addr_t utmrdl_dma_addr;
405
406 struct Scsi_Host *host;
407 struct device *dev;
408
409
410
411
412 struct scsi_device *sdev_ufs_device;
413
414 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
415 enum uic_link_state uic_link_state;
416
417 enum ufs_pm_level rpm_lvl;
418
419 enum ufs_pm_level spm_lvl;
420 int pm_op_in_progress;
421
422 struct ufshcd_lrb *lrb;
423 unsigned long lrb_in_use;
424
425 unsigned long outstanding_tasks;
426 unsigned long outstanding_reqs;
427
428 u32 capabilities;
429 int nutrs;
430 int nutmrs;
431 u32 ufs_version;
432 struct ufs_hba_variant_ops *vops;
433 void *priv;
434 unsigned int irq;
435 bool is_irq_enabled;
436
437
438 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
439
440
441
442
443
444 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
445
446
447
448
449
450
451
452
453 #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
454
455
456
457
458
459
460 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
461
462
463
464
465
466
467 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
468
469
470
471
472
473
474
475 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
476
477 unsigned int quirks;
478
479
480 unsigned int dev_quirks;
481
482 wait_queue_head_t tm_wq;
483 wait_queue_head_t tm_tag_wq;
484 unsigned long tm_condition;
485 unsigned long tm_slots_in_use;
486
487 struct uic_command *active_uic_cmd;
488 struct mutex uic_cmd_mutex;
489 struct completion *uic_async_done;
490
491 u32 ufshcd_state;
492 u32 eh_flags;
493 u32 intr_mask;
494 u16 ee_ctrl_mask;
495 bool is_powered;
496 bool is_init_prefetch;
497 struct ufs_init_prefetch init_prefetch_data;
498
499
500 struct work_struct eh_work;
501 struct work_struct eeh_work;
502
503
504 u32 errors;
505 u32 uic_error;
506 u32 saved_err;
507 u32 saved_uic_err;
508
509
510 struct ufs_dev_cmd dev_cmd;
511 ktime_t last_dme_cmd_tstamp;
512
513
514 struct ufs_dev_info dev_info;
515 bool auto_bkops_enabled;
516 struct ufs_vreg_info vreg_info;
517 struct list_head clk_list_head;
518
519 bool wlun_dev_clr_ua;
520
521
522 u32 lanes_per_direction;
523 struct ufs_pa_layer_attr pwr_info;
524 struct ufs_pwr_mode_info max_pwr_info;
525
526 struct ufs_clk_gating clk_gating;
527
528 u32 caps;
529
530#define UFSHCD_CAP_CLK_GATING (1 << 0)
531
532#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
533
534#define UFSHCD_CAP_CLK_SCALING (1 << 2)
535
536#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
537
538
539
540
541
542#define UFSHCD_CAP_INTR_AGGR (1 << 4)
543
544 struct devfreq *devfreq;
545 struct ufs_clk_scaling clk_scaling;
546 bool is_sys_suspended;
547
548 enum bkops_status urgent_bkops_lvl;
549 bool is_urgent_bkops_lvl_checked;
550};
551
552
553static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
554{
555 return hba->caps & UFSHCD_CAP_CLK_GATING;
556}
557static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
558{
559 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
560}
561static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
562{
563 return hba->caps & UFSHCD_CAP_CLK_SCALING;
564}
565static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
566{
567 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
568}
569
570static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
571{
572
573#ifndef CONFIG_SCSI_UFS_DWC
574 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
575 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
576 return true;
577 else
578 return false;
579#else
580return true;
581#endif
582}
583
584#define ufshcd_writel(hba, val, reg) \
585 writel((val), (hba)->mmio_base + (reg))
586#define ufshcd_readl(hba, reg) \
587 readl((hba)->mmio_base + (reg))
588
589
590
591
592
593
594
595
596static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
597{
598 u32 tmp;
599
600 tmp = ufshcd_readl(hba, reg);
601 tmp &= ~mask;
602 tmp |= (val & mask);
603 ufshcd_writel(hba, tmp, reg);
604}
605
606int ufshcd_alloc_host(struct device *, struct ufs_hba **);
607void ufshcd_dealloc_host(struct ufs_hba *);
608int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
609void ufshcd_remove(struct ufs_hba *);
610int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
611 u32 val, unsigned long interval_us,
612 unsigned long timeout_ms, bool can_sleep);
613
614static inline void check_upiu_size(void)
615{
616 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
617 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
618}
619
620
621
622
623
624
625static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
626{
627 BUG_ON(!hba);
628 hba->priv = variant;
629}
630
631
632
633
634
635static inline void *ufshcd_get_variant(struct ufs_hba *hba)
636{
637 BUG_ON(!hba);
638 return hba->priv;
639}
640
641extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
642extern int ufshcd_runtime_resume(struct ufs_hba *hba);
643extern int ufshcd_runtime_idle(struct ufs_hba *hba);
644extern int ufshcd_system_suspend(struct ufs_hba *hba);
645extern int ufshcd_system_resume(struct ufs_hba *hba);
646extern int ufshcd_shutdown(struct ufs_hba *hba);
647extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
648 u8 attr_set, u32 mib_val, u8 peer);
649extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
650 u32 *mib_val, u8 peer);
651
652
653#define DME_LOCAL 0
654#define DME_PEER 1
655#define ATTR_SET_NOR 0
656#define ATTR_SET_ST 1
657
658static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
659 u32 mib_val)
660{
661 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
662 mib_val, DME_LOCAL);
663}
664
665static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
666 u32 mib_val)
667{
668 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
669 mib_val, DME_LOCAL);
670}
671
672static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
673 u32 mib_val)
674{
675 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
676 mib_val, DME_PEER);
677}
678
679static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
680 u32 mib_val)
681{
682 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
683 mib_val, DME_PEER);
684}
685
686static inline int ufshcd_dme_get(struct ufs_hba *hba,
687 u32 attr_sel, u32 *mib_val)
688{
689 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
690}
691
692static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
693 u32 attr_sel, u32 *mib_val)
694{
695 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
696}
697
698int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
699
700static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
701{
702 return (pwr_info->pwr_rx == FAST_MODE ||
703 pwr_info->pwr_rx == FASTAUTO_MODE) &&
704 (pwr_info->pwr_tx == FAST_MODE ||
705 pwr_info->pwr_tx == FASTAUTO_MODE);
706}
707
708#define ASCII_STD true
709
710int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
711 u32 size, bool ascii);
712
713
714int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
715 enum flag_idn idn, bool *flag_res);
716int ufshcd_hold(struct ufs_hba *hba, bool async);
717void ufshcd_release(struct ufs_hba *hba);
718u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
719
720
721static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
722{
723 if (hba->vops)
724 return hba->vops->name;
725 return "";
726}
727
728static inline int ufshcd_vops_init(struct ufs_hba *hba)
729{
730 if (hba->vops && hba->vops->init)
731 return hba->vops->init(hba);
732
733 return 0;
734}
735
736static inline void ufshcd_vops_exit(struct ufs_hba *hba)
737{
738 if (hba->vops && hba->vops->exit)
739 return hba->vops->exit(hba);
740}
741
742static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
743{
744 if (hba->vops && hba->vops->get_ufs_hci_version)
745 return hba->vops->get_ufs_hci_version(hba);
746
747 return ufshcd_readl(hba, REG_UFS_VERSION);
748}
749
750static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
751 bool up, enum ufs_notify_change_status status)
752{
753 if (hba->vops && hba->vops->clk_scale_notify)
754 return hba->vops->clk_scale_notify(hba, up, status);
755 return 0;
756}
757
758static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
759{
760 if (hba->vops && hba->vops->setup_clocks)
761 return hba->vops->setup_clocks(hba, on);
762 return 0;
763}
764
765static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
766{
767 if (hba->vops && hba->vops->setup_regulators)
768 return hba->vops->setup_regulators(hba, status);
769
770 return 0;
771}
772
773static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
774 bool status)
775{
776 if (hba->vops && hba->vops->hce_enable_notify)
777 return hba->vops->hce_enable_notify(hba, status);
778
779 return 0;
780}
781static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
782 bool status)
783{
784 if (hba->vops && hba->vops->link_startup_notify)
785 return hba->vops->link_startup_notify(hba, status);
786
787 return 0;
788}
789
790static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
791 bool status,
792 struct ufs_pa_layer_attr *dev_max_params,
793 struct ufs_pa_layer_attr *dev_req_params)
794{
795 if (hba->vops && hba->vops->pwr_change_notify)
796 return hba->vops->pwr_change_notify(hba, status,
797 dev_max_params, dev_req_params);
798
799 return -ENOTSUPP;
800}
801
802static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
803{
804 if (hba->vops && hba->vops->suspend)
805 return hba->vops->suspend(hba, op);
806
807 return 0;
808}
809
810static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
811{
812 if (hba->vops && hba->vops->resume)
813 return hba->vops->resume(hba, op);
814
815 return 0;
816}
817
818static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
819{
820 if (hba->vops && hba->vops->dbg_register_dump)
821 hba->vops->dbg_register_dump(hba);
822}
823
824#endif
825