1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#ifndef _UFSHCD_H
37#define _UFSHCD_H
38
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/io.h>
44#include <linux/delay.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47#include <linux/workqueue.h>
48#include <linux/errno.h>
49#include <linux/types.h>
50#include <linux/wait.h>
51#include <linux/bitops.h>
52#include <linux/pm_runtime.h>
53#include <linux/clk.h>
54#include <linux/completion.h>
55#include <linux/regulator/consumer.h>
56
57#include <asm/irq.h>
58#include <asm/byteorder.h>
59#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h>
61#include <scsi/scsi_host.h>
62#include <scsi/scsi_tcq.h>
63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
65
66#include "ufs.h"
67#include "ufshci.h"
68
69#define UFSHCD "ufshcd"
70#define UFSHCD_DRIVER_VERSION "0.2"
71
72struct ufs_hba;
73
74enum dev_cmd_type {
75 DEV_CMD_TYPE_NOP = 0x0,
76 DEV_CMD_TYPE_QUERY = 0x1,
77};
78
79
80
81
82
83
84
85
86
87
88
89struct uic_command {
90 u32 command;
91 u32 argument1;
92 u32 argument2;
93 u32 argument3;
94 int cmd_active;
95 int result;
96 struct completion done;
97};
98
99
100enum ufs_pm_op {
101 UFS_RUNTIME_PM,
102 UFS_SYSTEM_PM,
103 UFS_SHUTDOWN_PM,
104};
105
106#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
107#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
108#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
109
110
111enum uic_link_state {
112 UIC_LINK_OFF_STATE = 0,
113 UIC_LINK_ACTIVE_STATE = 1,
114 UIC_LINK_HIBERN8_STATE = 2,
115};
116
117#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
118#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
119 UIC_LINK_ACTIVE_STATE)
120#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
121 UIC_LINK_HIBERN8_STATE)
122#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
123#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
124 UIC_LINK_ACTIVE_STATE)
125#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
126 UIC_LINK_HIBERN8_STATE)
127
128
129
130
131
132enum ufs_pm_level {
133 UFS_PM_LVL_0,
134 UFS_PM_LVL_1,
135 UFS_PM_LVL_2,
136 UFS_PM_LVL_3,
137 UFS_PM_LVL_4,
138 UFS_PM_LVL_5,
139 UFS_PM_LVL_MAX
140};
141
142struct ufs_pm_lvl_states {
143 enum ufs_dev_pwr_mode dev_state;
144 enum uic_link_state link_state;
145};
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162struct ufshcd_lrb {
163 struct utp_transfer_req_desc *utr_descriptor_ptr;
164 struct utp_upiu_req *ucd_req_ptr;
165 struct utp_upiu_rsp *ucd_rsp_ptr;
166 struct ufshcd_sg_entry *ucd_prdt_ptr;
167
168 struct scsi_cmnd *cmd;
169 u8 *sense_buffer;
170 unsigned int sense_bufflen;
171 int scsi_status;
172
173 int command_type;
174 int task_tag;
175 u8 lun;
176 bool intr_cmd;
177};
178
179
180
181
182
183
184
185struct ufs_query {
186 struct ufs_query_req request;
187 u8 *descriptor;
188 struct ufs_query_res response;
189};
190
191
192
193
194
195
196
197
198struct ufs_dev_cmd {
199 enum dev_cmd_type type;
200 struct mutex lock;
201 struct completion *complete;
202 wait_queue_head_t tag_wq;
203 struct ufs_query query;
204};
205
206
207
208
209
210
211
212
213
214
215
216struct ufs_clk_info {
217 struct list_head list;
218 struct clk *clk;
219 const char *name;
220 u32 max_freq;
221 u32 min_freq;
222 u32 curr_freq;
223 bool enabled;
224};
225
226#define PRE_CHANGE 0
227#define POST_CHANGE 1
228
229struct ufs_pa_layer_attr {
230 u32 gear_rx;
231 u32 gear_tx;
232 u32 lane_rx;
233 u32 lane_tx;
234 u32 pwr_rx;
235 u32 pwr_tx;
236 u32 hs_rate;
237};
238
239struct ufs_pwr_mode_info {
240 bool is_valid;
241 struct ufs_pa_layer_attr info;
242};
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263struct ufs_hba_variant_ops {
264 const char *name;
265 int (*init)(struct ufs_hba *);
266 void (*exit)(struct ufs_hba *);
267 u32 (*get_ufs_hci_version)(struct ufs_hba *);
268 void (*clk_scale_notify)(struct ufs_hba *);
269 int (*setup_clocks)(struct ufs_hba *, bool);
270 int (*setup_regulators)(struct ufs_hba *, bool);
271 int (*hce_enable_notify)(struct ufs_hba *, bool);
272 int (*link_startup_notify)(struct ufs_hba *, bool);
273 int (*pwr_change_notify)(struct ufs_hba *,
274 bool, struct ufs_pa_layer_attr *,
275 struct ufs_pa_layer_attr *);
276 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
277 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
278};
279
280
281enum clk_gating_state {
282 CLKS_OFF,
283 CLKS_ON,
284 REQ_CLKS_OFF,
285 REQ_CLKS_ON,
286};
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302struct ufs_clk_gating {
303 struct delayed_work gate_work;
304 struct work_struct ungate_work;
305 enum clk_gating_state state;
306 unsigned long delay_ms;
307 bool is_suspended;
308 struct device_attribute delay_attr;
309 int active_reqs;
310};
311
312struct ufs_clk_scaling {
313 ktime_t busy_start_t;
314 bool is_busy_started;
315 unsigned long tot_busy_t;
316 unsigned long window_start_t;
317};
318
319
320
321
322
323
324struct ufs_init_prefetch {
325 u32 icc_level;
326};
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378struct ufs_hba {
379 void __iomem *mmio_base;
380
381
382 struct utp_transfer_cmd_desc *ucdl_base_addr;
383 struct utp_transfer_req_desc *utrdl_base_addr;
384 struct utp_task_req_desc *utmrdl_base_addr;
385
386
387 dma_addr_t ucdl_dma_addr;
388 dma_addr_t utrdl_dma_addr;
389 dma_addr_t utmrdl_dma_addr;
390
391 struct Scsi_Host *host;
392 struct device *dev;
393
394
395
396
397 struct scsi_device *sdev_ufs_device;
398
399 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
400 enum uic_link_state uic_link_state;
401
402 enum ufs_pm_level rpm_lvl;
403
404 enum ufs_pm_level spm_lvl;
405 int pm_op_in_progress;
406
407 struct ufshcd_lrb *lrb;
408 unsigned long lrb_in_use;
409
410 unsigned long outstanding_tasks;
411 unsigned long outstanding_reqs;
412
413 u32 capabilities;
414 int nutrs;
415 int nutmrs;
416 u32 ufs_version;
417 struct ufs_hba_variant_ops *vops;
418 void *priv;
419 unsigned int irq;
420 bool is_irq_enabled;
421
422
423 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
424
425
426
427
428
429 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
430
431
432
433
434
435
436
437
438 #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
439
440
441
442
443
444
445 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
446
447
448
449
450
451
452 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
453
454
455
456
457
458
459
460 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
461
462 unsigned int quirks;
463
464 wait_queue_head_t tm_wq;
465 wait_queue_head_t tm_tag_wq;
466 unsigned long tm_condition;
467 unsigned long tm_slots_in_use;
468
469 struct uic_command *active_uic_cmd;
470 struct mutex uic_cmd_mutex;
471 struct completion *uic_async_done;
472
473 u32 ufshcd_state;
474 u32 eh_flags;
475 u32 intr_mask;
476 u16 ee_ctrl_mask;
477 bool is_powered;
478 bool is_init_prefetch;
479 struct ufs_init_prefetch init_prefetch_data;
480
481
482 struct work_struct eh_work;
483 struct work_struct eeh_work;
484
485
486 u32 errors;
487 u32 uic_error;
488 u32 saved_err;
489 u32 saved_uic_err;
490
491
492 struct ufs_dev_cmd dev_cmd;
493 ktime_t last_dme_cmd_tstamp;
494
495
496 struct ufs_dev_info dev_info;
497 bool auto_bkops_enabled;
498 struct ufs_vreg_info vreg_info;
499 struct list_head clk_list_head;
500
501 bool wlun_dev_clr_ua;
502
503 struct ufs_pa_layer_attr pwr_info;
504 struct ufs_pwr_mode_info max_pwr_info;
505
506 struct ufs_clk_gating clk_gating;
507
508 u32 caps;
509
510#define UFSHCD_CAP_CLK_GATING (1 << 0)
511
512#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
513
514#define UFSHCD_CAP_CLK_SCALING (1 << 2)
515
516#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
517
518
519
520
521
522#define UFSHCD_CAP_INTR_AGGR (1 << 4)
523
524 struct devfreq *devfreq;
525 struct ufs_clk_scaling clk_scaling;
526 bool is_sys_suspended;
527};
528
529
530static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
531{
532 return hba->caps & UFSHCD_CAP_CLK_GATING;
533}
534static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
535{
536 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
537}
538static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
539{
540 return hba->caps & UFSHCD_CAP_CLK_SCALING;
541}
542static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
543{
544 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
545}
546
547static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
548{
549 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
550 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
551 return true;
552 else
553 return false;
554}
555
556#define ufshcd_writel(hba, val, reg) \
557 writel((val), (hba)->mmio_base + (reg))
558#define ufshcd_readl(hba, reg) \
559 readl((hba)->mmio_base + (reg))
560
561
562
563
564
565
566
567
568static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
569{
570 u32 tmp;
571
572 tmp = ufshcd_readl(hba, reg);
573 tmp &= ~mask;
574 tmp |= (val & mask);
575 ufshcd_writel(hba, tmp, reg);
576}
577
578int ufshcd_alloc_host(struct device *, struct ufs_hba **);
579int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
580void ufshcd_remove(struct ufs_hba *);
581
582
583
584
585
586static inline void ufshcd_hba_stop(struct ufs_hba *hba)
587{
588 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
589}
590
591static inline void check_upiu_size(void)
592{
593 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
594 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
595}
596
597extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
598extern int ufshcd_runtime_resume(struct ufs_hba *hba);
599extern int ufshcd_runtime_idle(struct ufs_hba *hba);
600extern int ufshcd_system_suspend(struct ufs_hba *hba);
601extern int ufshcd_system_resume(struct ufs_hba *hba);
602extern int ufshcd_shutdown(struct ufs_hba *hba);
603extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
604 u8 attr_set, u32 mib_val, u8 peer);
605extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
606 u32 *mib_val, u8 peer);
607
608
609#define DME_LOCAL 0
610#define DME_PEER 1
611#define ATTR_SET_NOR 0
612#define ATTR_SET_ST 1
613
614static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
615 u32 mib_val)
616{
617 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
618 mib_val, DME_LOCAL);
619}
620
621static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
622 u32 mib_val)
623{
624 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
625 mib_val, DME_LOCAL);
626}
627
628static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
629 u32 mib_val)
630{
631 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
632 mib_val, DME_PEER);
633}
634
635static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
636 u32 mib_val)
637{
638 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
639 mib_val, DME_PEER);
640}
641
642static inline int ufshcd_dme_get(struct ufs_hba *hba,
643 u32 attr_sel, u32 *mib_val)
644{
645 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
646}
647
648static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
649 u32 attr_sel, u32 *mib_val)
650{
651 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
652}
653
654int ufshcd_hold(struct ufs_hba *hba, bool async);
655void ufshcd_release(struct ufs_hba *hba);
656#endif
657