1
2
3
4#include <linux/err.h>
5#include <linux/string.h>
6#include <linux/bitfield.h>
7#include <asm/unaligned.h>
8
9#include "ufs.h"
10#include "ufs-sysfs.h"
11
12static const char *ufshcd_uic_link_state_to_string(
13 enum uic_link_state state)
14{
15 switch (state) {
16 case UIC_LINK_OFF_STATE: return "OFF";
17 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
18 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
19 case UIC_LINK_BROKEN_STATE: return "BROKEN";
20 default: return "UNKNOWN";
21 }
22}
23
24static const char *ufshcd_ufs_dev_pwr_mode_to_string(
25 enum ufs_dev_pwr_mode state)
26{
27 switch (state) {
28 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
29 case UFS_SLEEP_PWR_MODE: return "SLEEP";
30 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
31 case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
32 default: return "UNKNOWN";
33 }
34}
35
36static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
37 struct device_attribute *attr,
38 const char *buf, size_t count,
39 bool rpm)
40{
41 struct ufs_hba *hba = dev_get_drvdata(dev);
42 struct ufs_dev_info *dev_info = &hba->dev_info;
43 unsigned long flags, value;
44
45 if (kstrtoul(buf, 0, &value))
46 return -EINVAL;
47
48 if (value >= UFS_PM_LVL_MAX)
49 return -EINVAL;
50
51 if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
52 (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
53 !(dev_info->wspecversion >= 0x310)))
54 return -EINVAL;
55
56 spin_lock_irqsave(hba->host->host_lock, flags);
57 if (rpm)
58 hba->rpm_lvl = value;
59 else
60 hba->spm_lvl = value;
61 spin_unlock_irqrestore(hba->host->host_lock, flags);
62 return count;
63}
64
65static ssize_t rpm_lvl_show(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 struct ufs_hba *hba = dev_get_drvdata(dev);
69
70 return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
71}
72
73static ssize_t rpm_lvl_store(struct device *dev,
74 struct device_attribute *attr, const char *buf, size_t count)
75{
76 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
77}
78
79static ssize_t rpm_target_dev_state_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct ufs_hba *hba = dev_get_drvdata(dev);
83
84 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
85 ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
86}
87
88static ssize_t rpm_target_link_state_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90{
91 struct ufs_hba *hba = dev_get_drvdata(dev);
92
93 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
94 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
95}
96
97static ssize_t spm_lvl_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
99{
100 struct ufs_hba *hba = dev_get_drvdata(dev);
101
102 return sysfs_emit(buf, "%d\n", hba->spm_lvl);
103}
104
105static ssize_t spm_lvl_store(struct device *dev,
106 struct device_attribute *attr, const char *buf, size_t count)
107{
108 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
109}
110
111static ssize_t spm_target_dev_state_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113{
114 struct ufs_hba *hba = dev_get_drvdata(dev);
115
116 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
117 ufs_pm_lvl_states[hba->spm_lvl].dev_state));
118}
119
120static ssize_t spm_target_link_state_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
122{
123 struct ufs_hba *hba = dev_get_drvdata(dev);
124
125 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
126 ufs_pm_lvl_states[hba->spm_lvl].link_state));
127}
128
129
130static int ufshcd_ahit_to_us(u32 ahit)
131{
132 int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
133 int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
134
135 for (; scale > 0; --scale)
136 timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
137
138 return timer;
139}
140
141
142static u32 ufshcd_us_to_ahit(unsigned int timer)
143{
144 unsigned int scale;
145
146 for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
147 timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
148
149 return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
150 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
151}
152
153static ssize_t auto_hibern8_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 u32 ahit;
157 int ret;
158 struct ufs_hba *hba = dev_get_drvdata(dev);
159
160 if (!ufshcd_is_auto_hibern8_supported(hba))
161 return -EOPNOTSUPP;
162
163 down(&hba->host_sem);
164 if (!ufshcd_is_user_access_allowed(hba)) {
165 ret = -EBUSY;
166 goto out;
167 }
168
169 pm_runtime_get_sync(hba->dev);
170 ufshcd_hold(hba, false);
171 ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
172 ufshcd_release(hba);
173 pm_runtime_put_sync(hba->dev);
174
175 ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
176
177out:
178 up(&hba->host_sem);
179 return ret;
180}
181
182static ssize_t auto_hibern8_store(struct device *dev,
183 struct device_attribute *attr,
184 const char *buf, size_t count)
185{
186 struct ufs_hba *hba = dev_get_drvdata(dev);
187 unsigned int timer;
188 int ret = 0;
189
190 if (!ufshcd_is_auto_hibern8_supported(hba))
191 return -EOPNOTSUPP;
192
193 if (kstrtouint(buf, 0, &timer))
194 return -EINVAL;
195
196 if (timer > UFSHCI_AHIBERN8_MAX)
197 return -EINVAL;
198
199 down(&hba->host_sem);
200 if (!ufshcd_is_user_access_allowed(hba)) {
201 ret = -EBUSY;
202 goto out;
203 }
204
205 ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
206
207out:
208 up(&hba->host_sem);
209 return ret ? ret : count;
210}
211
212static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
213 char *buf)
214{
215 struct ufs_hba *hba = dev_get_drvdata(dev);
216
217 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
218}
219
220static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
221 const char *buf, size_t count)
222{
223 struct ufs_hba *hba = dev_get_drvdata(dev);
224 unsigned int wb_enable;
225 ssize_t res;
226
227 if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
228
229
230
231
232 dev_warn(dev, "To control WB through wb_on is not allowed!\n");
233 return -EOPNOTSUPP;
234 }
235
236 if (kstrtouint(buf, 0, &wb_enable))
237 return -EINVAL;
238
239 if (wb_enable != 0 && wb_enable != 1)
240 return -EINVAL;
241
242 down(&hba->host_sem);
243 if (!ufshcd_is_user_access_allowed(hba)) {
244 res = -EBUSY;
245 goto out;
246 }
247
248 ufshcd_rpm_get_sync(hba);
249 res = ufshcd_wb_toggle(hba, wb_enable);
250 ufshcd_rpm_put_sync(hba);
251out:
252 up(&hba->host_sem);
253 return res < 0 ? res : count;
254}
255
256static DEVICE_ATTR_RW(rpm_lvl);
257static DEVICE_ATTR_RO(rpm_target_dev_state);
258static DEVICE_ATTR_RO(rpm_target_link_state);
259static DEVICE_ATTR_RW(spm_lvl);
260static DEVICE_ATTR_RO(spm_target_dev_state);
261static DEVICE_ATTR_RO(spm_target_link_state);
262static DEVICE_ATTR_RW(auto_hibern8);
263static DEVICE_ATTR_RW(wb_on);
264
265static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
266 &dev_attr_rpm_lvl.attr,
267 &dev_attr_rpm_target_dev_state.attr,
268 &dev_attr_rpm_target_link_state.attr,
269 &dev_attr_spm_lvl.attr,
270 &dev_attr_spm_target_dev_state.attr,
271 &dev_attr_spm_target_link_state.attr,
272 &dev_attr_auto_hibern8.attr,
273 &dev_attr_wb_on.attr,
274 NULL
275};
276
277static const struct attribute_group ufs_sysfs_default_group = {
278 .attrs = ufs_sysfs_ufshcd_attrs,
279};
280
281static ssize_t monitor_enable_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct ufs_hba *hba = dev_get_drvdata(dev);
285
286 return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
287}
288
289static ssize_t monitor_enable_store(struct device *dev,
290 struct device_attribute *attr,
291 const char *buf, size_t count)
292{
293 struct ufs_hba *hba = dev_get_drvdata(dev);
294 unsigned long value, flags;
295
296 if (kstrtoul(buf, 0, &value))
297 return -EINVAL;
298
299 value = !!value;
300 spin_lock_irqsave(hba->host->host_lock, flags);
301 if (value == hba->monitor.enabled)
302 goto out_unlock;
303
304 if (!value) {
305 memset(&hba->monitor, 0, sizeof(hba->monitor));
306 } else {
307 hba->monitor.enabled = true;
308 hba->monitor.enabled_ts = ktime_get();
309 }
310
311out_unlock:
312 spin_unlock_irqrestore(hba->host->host_lock, flags);
313 return count;
314}
315
316static ssize_t monitor_chunk_size_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct ufs_hba *hba = dev_get_drvdata(dev);
320
321 return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
322}
323
324static ssize_t monitor_chunk_size_store(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 struct ufs_hba *hba = dev_get_drvdata(dev);
329 unsigned long value, flags;
330
331 if (kstrtoul(buf, 0, &value))
332 return -EINVAL;
333
334 spin_lock_irqsave(hba->host->host_lock, flags);
335
336 if (!hba->monitor.enabled)
337 hba->monitor.chunk_size = value;
338 spin_unlock_irqrestore(hba->host->host_lock, flags);
339 return count;
340}
341
342static ssize_t read_total_sectors_show(struct device *dev,
343 struct device_attribute *attr, char *buf)
344{
345 struct ufs_hba *hba = dev_get_drvdata(dev);
346
347 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
348}
349
350static ssize_t read_total_busy_show(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct ufs_hba *hba = dev_get_drvdata(dev);
354
355 return sysfs_emit(buf, "%llu\n",
356 ktime_to_us(hba->monitor.total_busy[READ]));
357}
358
359static ssize_t read_nr_requests_show(struct device *dev,
360 struct device_attribute *attr, char *buf)
361{
362 struct ufs_hba *hba = dev_get_drvdata(dev);
363
364 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
365}
366
367static ssize_t read_req_latency_avg_show(struct device *dev,
368 struct device_attribute *attr,
369 char *buf)
370{
371 struct ufs_hba *hba = dev_get_drvdata(dev);
372 struct ufs_hba_monitor *m = &hba->monitor;
373
374 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
375 m->nr_req[READ]));
376}
377
378static ssize_t read_req_latency_max_show(struct device *dev,
379 struct device_attribute *attr,
380 char *buf)
381{
382 struct ufs_hba *hba = dev_get_drvdata(dev);
383
384 return sysfs_emit(buf, "%llu\n",
385 ktime_to_us(hba->monitor.lat_max[READ]));
386}
387
388static ssize_t read_req_latency_min_show(struct device *dev,
389 struct device_attribute *attr,
390 char *buf)
391{
392 struct ufs_hba *hba = dev_get_drvdata(dev);
393
394 return sysfs_emit(buf, "%llu\n",
395 ktime_to_us(hba->monitor.lat_min[READ]));
396}
397
398static ssize_t read_req_latency_sum_show(struct device *dev,
399 struct device_attribute *attr,
400 char *buf)
401{
402 struct ufs_hba *hba = dev_get_drvdata(dev);
403
404 return sysfs_emit(buf, "%llu\n",
405 ktime_to_us(hba->monitor.lat_sum[READ]));
406}
407
408static ssize_t write_total_sectors_show(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
411{
412 struct ufs_hba *hba = dev_get_drvdata(dev);
413
414 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
415}
416
417static ssize_t write_total_busy_show(struct device *dev,
418 struct device_attribute *attr, char *buf)
419{
420 struct ufs_hba *hba = dev_get_drvdata(dev);
421
422 return sysfs_emit(buf, "%llu\n",
423 ktime_to_us(hba->monitor.total_busy[WRITE]));
424}
425
426static ssize_t write_nr_requests_show(struct device *dev,
427 struct device_attribute *attr, char *buf)
428{
429 struct ufs_hba *hba = dev_get_drvdata(dev);
430
431 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
432}
433
434static ssize_t write_req_latency_avg_show(struct device *dev,
435 struct device_attribute *attr,
436 char *buf)
437{
438 struct ufs_hba *hba = dev_get_drvdata(dev);
439 struct ufs_hba_monitor *m = &hba->monitor;
440
441 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
442 m->nr_req[WRITE]));
443}
444
445static ssize_t write_req_latency_max_show(struct device *dev,
446 struct device_attribute *attr,
447 char *buf)
448{
449 struct ufs_hba *hba = dev_get_drvdata(dev);
450
451 return sysfs_emit(buf, "%llu\n",
452 ktime_to_us(hba->monitor.lat_max[WRITE]));
453}
454
455static ssize_t write_req_latency_min_show(struct device *dev,
456 struct device_attribute *attr,
457 char *buf)
458{
459 struct ufs_hba *hba = dev_get_drvdata(dev);
460
461 return sysfs_emit(buf, "%llu\n",
462 ktime_to_us(hba->monitor.lat_min[WRITE]));
463}
464
465static ssize_t write_req_latency_sum_show(struct device *dev,
466 struct device_attribute *attr,
467 char *buf)
468{
469 struct ufs_hba *hba = dev_get_drvdata(dev);
470
471 return sysfs_emit(buf, "%llu\n",
472 ktime_to_us(hba->monitor.lat_sum[WRITE]));
473}
474
475static DEVICE_ATTR_RW(monitor_enable);
476static DEVICE_ATTR_RW(monitor_chunk_size);
477static DEVICE_ATTR_RO(read_total_sectors);
478static DEVICE_ATTR_RO(read_total_busy);
479static DEVICE_ATTR_RO(read_nr_requests);
480static DEVICE_ATTR_RO(read_req_latency_avg);
481static DEVICE_ATTR_RO(read_req_latency_max);
482static DEVICE_ATTR_RO(read_req_latency_min);
483static DEVICE_ATTR_RO(read_req_latency_sum);
484static DEVICE_ATTR_RO(write_total_sectors);
485static DEVICE_ATTR_RO(write_total_busy);
486static DEVICE_ATTR_RO(write_nr_requests);
487static DEVICE_ATTR_RO(write_req_latency_avg);
488static DEVICE_ATTR_RO(write_req_latency_max);
489static DEVICE_ATTR_RO(write_req_latency_min);
490static DEVICE_ATTR_RO(write_req_latency_sum);
491
492static struct attribute *ufs_sysfs_monitor_attrs[] = {
493 &dev_attr_monitor_enable.attr,
494 &dev_attr_monitor_chunk_size.attr,
495 &dev_attr_read_total_sectors.attr,
496 &dev_attr_read_total_busy.attr,
497 &dev_attr_read_nr_requests.attr,
498 &dev_attr_read_req_latency_avg.attr,
499 &dev_attr_read_req_latency_max.attr,
500 &dev_attr_read_req_latency_min.attr,
501 &dev_attr_read_req_latency_sum.attr,
502 &dev_attr_write_total_sectors.attr,
503 &dev_attr_write_total_busy.attr,
504 &dev_attr_write_nr_requests.attr,
505 &dev_attr_write_req_latency_avg.attr,
506 &dev_attr_write_req_latency_max.attr,
507 &dev_attr_write_req_latency_min.attr,
508 &dev_attr_write_req_latency_sum.attr,
509 NULL
510};
511
512static const struct attribute_group ufs_sysfs_monitor_group = {
513 .name = "monitor",
514 .attrs = ufs_sysfs_monitor_attrs,
515};
516
517static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
518 enum desc_idn desc_id,
519 u8 desc_index,
520 u8 param_offset,
521 u8 *sysfs_buf,
522 u8 param_size)
523{
524 u8 desc_buf[8] = {0};
525 int ret;
526
527 if (param_size > 8)
528 return -EINVAL;
529
530 down(&hba->host_sem);
531 if (!ufshcd_is_user_access_allowed(hba)) {
532 ret = -EBUSY;
533 goto out;
534 }
535
536 ufshcd_rpm_get_sync(hba);
537 ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
538 param_offset, desc_buf, param_size);
539 ufshcd_rpm_put_sync(hba);
540 if (ret) {
541 ret = -EINVAL;
542 goto out;
543 }
544
545 switch (param_size) {
546 case 1:
547 ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
548 break;
549 case 2:
550 ret = sysfs_emit(sysfs_buf, "0x%04X\n",
551 get_unaligned_be16(desc_buf));
552 break;
553 case 4:
554 ret = sysfs_emit(sysfs_buf, "0x%08X\n",
555 get_unaligned_be32(desc_buf));
556 break;
557 case 8:
558 ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
559 get_unaligned_be64(desc_buf));
560 break;
561 }
562
563out:
564 up(&hba->host_sem);
565 return ret;
566}
567
568#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
569static ssize_t _name##_show(struct device *dev, \
570 struct device_attribute *attr, char *buf) \
571{ \
572 struct ufs_hba *hba = dev_get_drvdata(dev); \
573 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
574 0, _duname##_DESC_PARAM##_puname, buf, _size); \
575} \
576static DEVICE_ATTR_RO(_name)
577
578#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
579 UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
580
581UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
582UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
583UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
584UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
585UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
586UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
587UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
588UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
589UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
590UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
591UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
592UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
593UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
594UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
595UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
596UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
597UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
598UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
599UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
600UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
601UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
602UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
603UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
604UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
605UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
606UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
607UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
608UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
609UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
610UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
611
612static struct attribute *ufs_sysfs_device_descriptor[] = {
613 &dev_attr_device_type.attr,
614 &dev_attr_device_class.attr,
615 &dev_attr_device_sub_class.attr,
616 &dev_attr_protocol.attr,
617 &dev_attr_number_of_luns.attr,
618 &dev_attr_number_of_wluns.attr,
619 &dev_attr_boot_enable.attr,
620 &dev_attr_descriptor_access_enable.attr,
621 &dev_attr_initial_power_mode.attr,
622 &dev_attr_high_priority_lun.attr,
623 &dev_attr_secure_removal_type.attr,
624 &dev_attr_support_security_lun.attr,
625 &dev_attr_bkops_termination_latency.attr,
626 &dev_attr_initial_active_icc_level.attr,
627 &dev_attr_specification_version.attr,
628 &dev_attr_manufacturing_date.attr,
629 &dev_attr_manufacturer_id.attr,
630 &dev_attr_rtt_capability.attr,
631 &dev_attr_rtc_update.attr,
632 &dev_attr_ufs_features.attr,
633 &dev_attr_ffu_timeout.attr,
634 &dev_attr_queue_depth.attr,
635 &dev_attr_device_version.attr,
636 &dev_attr_number_of_secure_wpa.attr,
637 &dev_attr_psa_max_data_size.attr,
638 &dev_attr_psa_state_timeout.attr,
639 &dev_attr_ext_feature_sup.attr,
640 &dev_attr_wb_presv_us_en.attr,
641 &dev_attr_wb_type.attr,
642 &dev_attr_wb_shared_alloc_units.attr,
643 NULL,
644};
645
646static const struct attribute_group ufs_sysfs_device_descriptor_group = {
647 .name = "device_descriptor",
648 .attrs = ufs_sysfs_device_descriptor,
649};
650
651#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
652 UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
653
654UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
655UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
656
657static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
658 &dev_attr_unipro_version.attr,
659 &dev_attr_mphy_version.attr,
660 NULL,
661};
662
663static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
664 .name = "interconnect_descriptor",
665 .attrs = ufs_sysfs_interconnect_descriptor,
666};
667
668#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
669 UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
670
671UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
672UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
673UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
674UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
675UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
676UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
677UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
678UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
679UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
680UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
681UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
682UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
683UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
684UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
685UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
686UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
687UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
688UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
689 _SCM_MAX_NUM_UNITS, 4);
690UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
691 _SCM_CAP_ADJ_FCTR, 2);
692UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
693 _NPM_MAX_NUM_UNITS, 4);
694UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
695 _NPM_CAP_ADJ_FCTR, 2);
696UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
697 _ENM1_MAX_NUM_UNITS, 4);
698UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
699 _ENM1_CAP_ADJ_FCTR, 2);
700UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
701 _ENM2_MAX_NUM_UNITS, 4);
702UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
703 _ENM2_CAP_ADJ_FCTR, 2);
704UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
705 _ENM3_MAX_NUM_UNITS, 4);
706UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
707 _ENM3_CAP_ADJ_FCTR, 2);
708UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
709 _ENM4_MAX_NUM_UNITS, 4);
710UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
711 _ENM4_CAP_ADJ_FCTR, 2);
712UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
713UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
714UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
715UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
716UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
717
718
719static struct attribute *ufs_sysfs_geometry_descriptor[] = {
720 &dev_attr_raw_device_capacity.attr,
721 &dev_attr_max_number_of_luns.attr,
722 &dev_attr_segment_size.attr,
723 &dev_attr_allocation_unit_size.attr,
724 &dev_attr_min_addressable_block_size.attr,
725 &dev_attr_optimal_read_block_size.attr,
726 &dev_attr_optimal_write_block_size.attr,
727 &dev_attr_max_in_buffer_size.attr,
728 &dev_attr_max_out_buffer_size.attr,
729 &dev_attr_rpmb_rw_size.attr,
730 &dev_attr_dyn_capacity_resource_policy.attr,
731 &dev_attr_data_ordering.attr,
732 &dev_attr_max_number_of_contexts.attr,
733 &dev_attr_sys_data_tag_unit_size.attr,
734 &dev_attr_sys_data_tag_resource_size.attr,
735 &dev_attr_secure_removal_types.attr,
736 &dev_attr_memory_types.attr,
737 &dev_attr_sys_code_memory_max_alloc_units.attr,
738 &dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
739 &dev_attr_non_persist_memory_max_alloc_units.attr,
740 &dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
741 &dev_attr_enh1_memory_max_alloc_units.attr,
742 &dev_attr_enh1_memory_capacity_adjustment_factor.attr,
743 &dev_attr_enh2_memory_max_alloc_units.attr,
744 &dev_attr_enh2_memory_capacity_adjustment_factor.attr,
745 &dev_attr_enh3_memory_max_alloc_units.attr,
746 &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
747 &dev_attr_enh4_memory_max_alloc_units.attr,
748 &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
749 &dev_attr_wb_max_alloc_units.attr,
750 &dev_attr_wb_max_wb_luns.attr,
751 &dev_attr_wb_buff_cap_adj.attr,
752 &dev_attr_wb_sup_red_type.attr,
753 &dev_attr_wb_sup_wb_type.attr,
754 NULL,
755};
756
757static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
758 .name = "geometry_descriptor",
759 .attrs = ufs_sysfs_geometry_descriptor,
760};
761
762#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
763 UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
764
765UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
766UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
767UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
768
769static struct attribute *ufs_sysfs_health_descriptor[] = {
770 &dev_attr_eol_info.attr,
771 &dev_attr_life_time_estimation_a.attr,
772 &dev_attr_life_time_estimation_b.attr,
773 NULL,
774};
775
776static const struct attribute_group ufs_sysfs_health_descriptor_group = {
777 .name = "health_descriptor",
778 .attrs = ufs_sysfs_health_descriptor,
779};
780
781#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
782static ssize_t _name##_index##_show(struct device *dev, \
783 struct device_attribute *attr, char *buf) \
784{ \
785 struct ufs_hba *hba = dev_get_drvdata(dev); \
786 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
787 PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
788} \
789static DEVICE_ATTR_RO(_name##_index)
790
791UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
792UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
793UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
794UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
795UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
796UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
797UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
798UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
799UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
800UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
801UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
802UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
803UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
804UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
805UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
806UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
807UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
808UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
809UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
810UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
811UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
812UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
813UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
814UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
815UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
816UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
817UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
818UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
819UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
820UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
821UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
822UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
823UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
824UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
825UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
826UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
827UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
828UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
829UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
830UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
831UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
832UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
833UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
834UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
835UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
836UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
837UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
838UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
839
840static struct attribute *ufs_sysfs_power_descriptor[] = {
841 &dev_attr_active_icc_levels_vcc0.attr,
842 &dev_attr_active_icc_levels_vcc1.attr,
843 &dev_attr_active_icc_levels_vcc2.attr,
844 &dev_attr_active_icc_levels_vcc3.attr,
845 &dev_attr_active_icc_levels_vcc4.attr,
846 &dev_attr_active_icc_levels_vcc5.attr,
847 &dev_attr_active_icc_levels_vcc6.attr,
848 &dev_attr_active_icc_levels_vcc7.attr,
849 &dev_attr_active_icc_levels_vcc8.attr,
850 &dev_attr_active_icc_levels_vcc9.attr,
851 &dev_attr_active_icc_levels_vcc10.attr,
852 &dev_attr_active_icc_levels_vcc11.attr,
853 &dev_attr_active_icc_levels_vcc12.attr,
854 &dev_attr_active_icc_levels_vcc13.attr,
855 &dev_attr_active_icc_levels_vcc14.attr,
856 &dev_attr_active_icc_levels_vcc15.attr,
857 &dev_attr_active_icc_levels_vccq0.attr,
858 &dev_attr_active_icc_levels_vccq1.attr,
859 &dev_attr_active_icc_levels_vccq2.attr,
860 &dev_attr_active_icc_levels_vccq3.attr,
861 &dev_attr_active_icc_levels_vccq4.attr,
862 &dev_attr_active_icc_levels_vccq5.attr,
863 &dev_attr_active_icc_levels_vccq6.attr,
864 &dev_attr_active_icc_levels_vccq7.attr,
865 &dev_attr_active_icc_levels_vccq8.attr,
866 &dev_attr_active_icc_levels_vccq9.attr,
867 &dev_attr_active_icc_levels_vccq10.attr,
868 &dev_attr_active_icc_levels_vccq11.attr,
869 &dev_attr_active_icc_levels_vccq12.attr,
870 &dev_attr_active_icc_levels_vccq13.attr,
871 &dev_attr_active_icc_levels_vccq14.attr,
872 &dev_attr_active_icc_levels_vccq15.attr,
873 &dev_attr_active_icc_levels_vccq20.attr,
874 &dev_attr_active_icc_levels_vccq21.attr,
875 &dev_attr_active_icc_levels_vccq22.attr,
876 &dev_attr_active_icc_levels_vccq23.attr,
877 &dev_attr_active_icc_levels_vccq24.attr,
878 &dev_attr_active_icc_levels_vccq25.attr,
879 &dev_attr_active_icc_levels_vccq26.attr,
880 &dev_attr_active_icc_levels_vccq27.attr,
881 &dev_attr_active_icc_levels_vccq28.attr,
882 &dev_attr_active_icc_levels_vccq29.attr,
883 &dev_attr_active_icc_levels_vccq210.attr,
884 &dev_attr_active_icc_levels_vccq211.attr,
885 &dev_attr_active_icc_levels_vccq212.attr,
886 &dev_attr_active_icc_levels_vccq213.attr,
887 &dev_attr_active_icc_levels_vccq214.attr,
888 &dev_attr_active_icc_levels_vccq215.attr,
889 NULL,
890};
891
892static const struct attribute_group ufs_sysfs_power_descriptor_group = {
893 .name = "power_descriptor",
894 .attrs = ufs_sysfs_power_descriptor,
895};
896
897#define UFS_STRING_DESCRIPTOR(_name, _pname) \
898static ssize_t _name##_show(struct device *dev, \
899 struct device_attribute *attr, char *buf) \
900{ \
901 u8 index; \
902 struct ufs_hba *hba = dev_get_drvdata(dev); \
903 int ret; \
904 int desc_len = QUERY_DESC_MAX_SIZE; \
905 u8 *desc_buf; \
906 \
907 down(&hba->host_sem); \
908 if (!ufshcd_is_user_access_allowed(hba)) { \
909 up(&hba->host_sem); \
910 return -EBUSY; \
911 } \
912 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
913 if (!desc_buf) { \
914 up(&hba->host_sem); \
915 return -ENOMEM; \
916 } \
917 ufshcd_rpm_get_sync(hba); \
918 ret = ufshcd_query_descriptor_retry(hba, \
919 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
920 0, 0, desc_buf, &desc_len); \
921 if (ret) { \
922 ret = -EINVAL; \
923 goto out; \
924 } \
925 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
926 kfree(desc_buf); \
927 desc_buf = NULL; \
928 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
929 SD_ASCII_STD); \
930 if (ret < 0) \
931 goto out; \
932 ret = sysfs_emit(buf, "%s\n", desc_buf); \
933out: \
934 ufshcd_rpm_put_sync(hba); \
935 kfree(desc_buf); \
936 up(&hba->host_sem); \
937 return ret; \
938} \
939static DEVICE_ATTR_RO(_name)
940
941UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
942UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
943UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
944UFS_STRING_DESCRIPTOR(serial_number, _SN);
945UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
946
947static struct attribute *ufs_sysfs_string_descriptors[] = {
948 &dev_attr_manufacturer_name.attr,
949 &dev_attr_product_name.attr,
950 &dev_attr_oem_id.attr,
951 &dev_attr_serial_number.attr,
952 &dev_attr_product_revision.attr,
953 NULL,
954};
955
956static const struct attribute_group ufs_sysfs_string_descriptors_group = {
957 .name = "string_descriptors",
958 .attrs = ufs_sysfs_string_descriptors,
959};
960
961static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
962{
963 return idn >= QUERY_FLAG_IDN_WB_EN &&
964 idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
965}
966
967#define UFS_FLAG(_name, _uname) \
968static ssize_t _name##_show(struct device *dev, \
969 struct device_attribute *attr, char *buf) \
970{ \
971 bool flag; \
972 u8 index = 0; \
973 int ret; \
974 struct ufs_hba *hba = dev_get_drvdata(dev); \
975 \
976 down(&hba->host_sem); \
977 if (!ufshcd_is_user_access_allowed(hba)) { \
978 up(&hba->host_sem); \
979 return -EBUSY; \
980 } \
981 if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
982 index = ufshcd_wb_get_query_index(hba); \
983 ufshcd_rpm_get_sync(hba); \
984 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
985 QUERY_FLAG_IDN##_uname, index, &flag); \
986 ufshcd_rpm_put_sync(hba); \
987 if (ret) { \
988 ret = -EINVAL; \
989 goto out; \
990 } \
991 ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
992out: \
993 up(&hba->host_sem); \
994 return ret; \
995} \
996static DEVICE_ATTR_RO(_name)
997
998UFS_FLAG(device_init, _FDEVICEINIT);
999UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
1000UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
1001UFS_FLAG(bkops_enable, _BKOPS_EN);
1002UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
1003UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
1004UFS_FLAG(busy_rtc, _BUSY_RTC);
1005UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
1006UFS_FLAG(wb_enable, _WB_EN);
1007UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
1008UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
1009
1010static struct attribute *ufs_sysfs_device_flags[] = {
1011 &dev_attr_device_init.attr,
1012 &dev_attr_permanent_wpe.attr,
1013 &dev_attr_power_on_wpe.attr,
1014 &dev_attr_bkops_enable.attr,
1015 &dev_attr_life_span_mode_enable.attr,
1016 &dev_attr_phy_resource_removal.attr,
1017 &dev_attr_busy_rtc.attr,
1018 &dev_attr_disable_fw_update.attr,
1019 &dev_attr_wb_enable.attr,
1020 &dev_attr_wb_flush_en.attr,
1021 &dev_attr_wb_flush_during_h8.attr,
1022 NULL,
1023};
1024
1025static const struct attribute_group ufs_sysfs_flags_group = {
1026 .name = "flags",
1027 .attrs = ufs_sysfs_device_flags,
1028};
1029
1030static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
1031{
1032 return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
1033 idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
1034}
1035
1036#define UFS_ATTRIBUTE(_name, _uname) \
1037static ssize_t _name##_show(struct device *dev, \
1038 struct device_attribute *attr, char *buf) \
1039{ \
1040 struct ufs_hba *hba = dev_get_drvdata(dev); \
1041 u32 value; \
1042 int ret; \
1043 u8 index = 0; \
1044 \
1045 down(&hba->host_sem); \
1046 if (!ufshcd_is_user_access_allowed(hba)) { \
1047 up(&hba->host_sem); \
1048 return -EBUSY; \
1049 } \
1050 if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
1051 index = ufshcd_wb_get_query_index(hba); \
1052 ufshcd_rpm_get_sync(hba); \
1053 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
1054 QUERY_ATTR_IDN##_uname, index, 0, &value); \
1055 ufshcd_rpm_put_sync(hba); \
1056 if (ret) { \
1057 ret = -EINVAL; \
1058 goto out; \
1059 } \
1060 ret = sysfs_emit(buf, "0x%08X\n", value); \
1061out: \
1062 up(&hba->host_sem); \
1063 return ret; \
1064} \
1065static DEVICE_ATTR_RO(_name)
1066
1067UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
1068UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
1069UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
1070UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
1071UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
1072UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
1073UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
1074UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
1075UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
1076UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
1077UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
1078UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
1079UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
1080UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
1081UFS_ATTRIBUTE(psa_state, _PSA_STATE);
1082UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
1083UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
1084UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
1085UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
1086UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
1087
1088
1089static struct attribute *ufs_sysfs_attributes[] = {
1090 &dev_attr_boot_lun_enabled.attr,
1091 &dev_attr_current_power_mode.attr,
1092 &dev_attr_active_icc_level.attr,
1093 &dev_attr_ooo_data_enabled.attr,
1094 &dev_attr_bkops_status.attr,
1095 &dev_attr_purge_status.attr,
1096 &dev_attr_max_data_in_size.attr,
1097 &dev_attr_max_data_out_size.attr,
1098 &dev_attr_reference_clock_frequency.attr,
1099 &dev_attr_configuration_descriptor_lock.attr,
1100 &dev_attr_max_number_of_rtt.attr,
1101 &dev_attr_exception_event_control.attr,
1102 &dev_attr_exception_event_status.attr,
1103 &dev_attr_ffu_status.attr,
1104 &dev_attr_psa_state.attr,
1105 &dev_attr_psa_data_size.attr,
1106 &dev_attr_wb_flush_status.attr,
1107 &dev_attr_wb_avail_buf.attr,
1108 &dev_attr_wb_life_time_est.attr,
1109 &dev_attr_wb_cur_buf.attr,
1110 NULL,
1111};
1112
1113static const struct attribute_group ufs_sysfs_attributes_group = {
1114 .name = "attributes",
1115 .attrs = ufs_sysfs_attributes,
1116};
1117
1118static const struct attribute_group *ufs_sysfs_groups[] = {
1119 &ufs_sysfs_default_group,
1120 &ufs_sysfs_monitor_group,
1121 &ufs_sysfs_device_descriptor_group,
1122 &ufs_sysfs_interconnect_descriptor_group,
1123 &ufs_sysfs_geometry_descriptor_group,
1124 &ufs_sysfs_health_descriptor_group,
1125 &ufs_sysfs_power_descriptor_group,
1126 &ufs_sysfs_string_descriptors_group,
1127 &ufs_sysfs_flags_group,
1128 &ufs_sysfs_attributes_group,
1129 NULL,
1130};
1131
1132#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
1133static ssize_t _pname##_show(struct device *dev, \
1134 struct device_attribute *attr, char *buf) \
1135{ \
1136 struct scsi_device *sdev = to_scsi_device(dev); \
1137 struct ufs_hba *hba = shost_priv(sdev->host); \
1138 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
1139 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \
1140 _duname##_DESC_PARAM##_puname)) \
1141 return -EINVAL; \
1142 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1143 lun, _duname##_DESC_PARAM##_puname, buf, _size); \
1144} \
1145static DEVICE_ATTR_RO(_pname)
1146
1147#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
1148 UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
1149
1150UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
1151UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
1152UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
1153UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
1154UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
1155UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
1156UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
1157UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
1158UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
1159UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
1160UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
1161UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
1162UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
1163UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
1164
1165
1166static struct attribute *ufs_sysfs_unit_descriptor[] = {
1167 &dev_attr_boot_lun_id.attr,
1168 &dev_attr_lun_write_protect.attr,
1169 &dev_attr_lun_queue_depth.attr,
1170 &dev_attr_psa_sensitive.attr,
1171 &dev_attr_lun_memory_type.attr,
1172 &dev_attr_data_reliability.attr,
1173 &dev_attr_logical_block_size.attr,
1174 &dev_attr_logical_block_count.attr,
1175 &dev_attr_erase_block_size.attr,
1176 &dev_attr_provisioning_type.attr,
1177 &dev_attr_physical_memory_resourse_count.attr,
1178 &dev_attr_context_capabilities.attr,
1179 &dev_attr_large_unit_granularity.attr,
1180 &dev_attr_wb_buf_alloc_units.attr,
1181 NULL,
1182};
1183
1184const struct attribute_group ufs_sysfs_unit_descriptor_group = {
1185 .name = "unit_descriptor",
1186 .attrs = ufs_sysfs_unit_descriptor,
1187};
1188
1189static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
1190 struct device_attribute *attr, char *buf)
1191{
1192 u32 value;
1193 struct scsi_device *sdev = to_scsi_device(dev);
1194 struct ufs_hba *hba = shost_priv(sdev->host);
1195 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
1196 int ret;
1197
1198 down(&hba->host_sem);
1199 if (!ufshcd_is_user_access_allowed(hba)) {
1200 ret = -EBUSY;
1201 goto out;
1202 }
1203
1204 ufshcd_rpm_get_sync(hba);
1205 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1206 QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
1207 ufshcd_rpm_put_sync(hba);
1208 if (ret) {
1209 ret = -EINVAL;
1210 goto out;
1211 }
1212
1213 ret = sysfs_emit(buf, "0x%08X\n", value);
1214
1215out:
1216 up(&hba->host_sem);
1217 return ret;
1218}
1219static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
1220
1221static struct attribute *ufs_sysfs_lun_attributes[] = {
1222 &dev_attr_dyn_cap_needed_attribute.attr,
1223 NULL,
1224};
1225
1226const struct attribute_group ufs_sysfs_lun_attributes_group = {
1227 .attrs = ufs_sysfs_lun_attributes,
1228};
1229
1230void ufs_sysfs_add_nodes(struct device *dev)
1231{
1232 int ret;
1233
1234 ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
1235 if (ret)
1236 dev_err(dev,
1237 "%s: sysfs groups creation failed (err = %d)\n",
1238 __func__, ret);
1239}
1240
1241void ufs_sysfs_remove_nodes(struct device *dev)
1242{
1243 sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
1244}
1245