1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Western Digital Corporation
3
4 #include <linux/err.h>
5 #include <linux/string.h>
6 #include <linux/bitfield.h>
7 #include <linux/unaligned.h>
8
9 #include <ufs/ufs.h>
10 #include <ufs/unipro.h>
11 #include "ufs-sysfs.h"
12 #include "ufshcd-priv.h"
13
ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)14 static const char *ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)
15 {
16 switch (mode) {
17 case FAST_MODE: return "FAST_MODE";
18 case SLOW_MODE: return "SLOW_MODE";
19 case FASTAUTO_MODE: return "FASTAUTO_MODE";
20 case SLOWAUTO_MODE: return "SLOWAUTO_MODE";
21 default: return "UNKNOWN";
22 }
23 }
24
ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)25 static const char *ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)
26 {
27 switch (rate) {
28 case PA_HS_MODE_A: return "HS_RATE_A";
29 case PA_HS_MODE_B: return "HS_RATE_B";
30 default: return "UNKNOWN";
31 }
32 }
33
ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)34 static const char *ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)
35 {
36 switch (gear) {
37 case UFS_PWM_G1: return "PWM_GEAR1";
38 case UFS_PWM_G2: return "PWM_GEAR2";
39 case UFS_PWM_G3: return "PWM_GEAR3";
40 case UFS_PWM_G4: return "PWM_GEAR4";
41 case UFS_PWM_G5: return "PWM_GEAR5";
42 case UFS_PWM_G6: return "PWM_GEAR6";
43 case UFS_PWM_G7: return "PWM_GEAR7";
44 default: return "UNKNOWN";
45 }
46 }
47
ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)48 static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
49 {
50 switch (gear) {
51 case UFS_HS_G1: return "HS_GEAR1";
52 case UFS_HS_G2: return "HS_GEAR2";
53 case UFS_HS_G3: return "HS_GEAR3";
54 case UFS_HS_G4: return "HS_GEAR4";
55 case UFS_HS_G5: return "HS_GEAR5";
56 default: return "UNKNOWN";
57 }
58 }
59
60 static const char * const ufs_hid_states[] = {
61 [HID_IDLE] = "idle",
62 [ANALYSIS_IN_PROGRESS] = "analysis_in_progress",
63 [DEFRAG_REQUIRED] = "defrag_required",
64 [DEFRAG_IN_PROGRESS] = "defrag_in_progress",
65 [DEFRAG_COMPLETED] = "defrag_completed",
66 [DEFRAG_NOT_REQUIRED] = "defrag_not_required",
67 };
68
ufs_hid_state_to_string(enum ufs_hid_state state)69 static const char *ufs_hid_state_to_string(enum ufs_hid_state state)
70 {
71 if (state < NUM_UFS_HID_STATES)
72 return ufs_hid_states[state];
73
74 return "unknown";
75 }
76
ufshcd_uic_link_state_to_string(enum uic_link_state state)77 static const char *ufshcd_uic_link_state_to_string(
78 enum uic_link_state state)
79 {
80 switch (state) {
81 case UIC_LINK_OFF_STATE: return "OFF";
82 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
83 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
84 case UIC_LINK_BROKEN_STATE: return "BROKEN";
85 default: return "UNKNOWN";
86 }
87 }
88
ufshcd_ufs_dev_pwr_mode_to_string(enum ufs_dev_pwr_mode state)89 static const char *ufshcd_ufs_dev_pwr_mode_to_string(
90 enum ufs_dev_pwr_mode state)
91 {
92 switch (state) {
93 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
94 case UFS_SLEEP_PWR_MODE: return "SLEEP";
95 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
96 case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
97 default: return "UNKNOWN";
98 }
99 }
100
ufs_sysfs_pm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count,bool rpm)101 static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
102 struct device_attribute *attr,
103 const char *buf, size_t count,
104 bool rpm)
105 {
106 struct ufs_hba *hba = dev_get_drvdata(dev);
107 struct ufs_dev_info *dev_info = &hba->dev_info;
108 unsigned long flags, value;
109
110 if (kstrtoul(buf, 0, &value))
111 return -EINVAL;
112
113 if (value >= UFS_PM_LVL_MAX)
114 return -EINVAL;
115
116 if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
117 (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
118 !(dev_info->wspecversion >= 0x310)))
119 return -EINVAL;
120
121 spin_lock_irqsave(hba->host->host_lock, flags);
122 if (rpm)
123 hba->rpm_lvl = value;
124 else
125 hba->spm_lvl = value;
126 spin_unlock_irqrestore(hba->host->host_lock, flags);
127 return count;
128 }
129
rpm_lvl_show(struct device * dev,struct device_attribute * attr,char * buf)130 static ssize_t rpm_lvl_show(struct device *dev,
131 struct device_attribute *attr, char *buf)
132 {
133 struct ufs_hba *hba = dev_get_drvdata(dev);
134
135 return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
136 }
137
rpm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)138 static ssize_t rpm_lvl_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t count)
140 {
141 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
142 }
143
rpm_target_dev_state_show(struct device * dev,struct device_attribute * attr,char * buf)144 static ssize_t rpm_target_dev_state_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146 {
147 struct ufs_hba *hba = dev_get_drvdata(dev);
148
149 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
150 ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
151 }
152
rpm_target_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t rpm_target_link_state_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155 {
156 struct ufs_hba *hba = dev_get_drvdata(dev);
157
158 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
159 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
160 }
161
spm_lvl_show(struct device * dev,struct device_attribute * attr,char * buf)162 static ssize_t spm_lvl_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164 {
165 struct ufs_hba *hba = dev_get_drvdata(dev);
166
167 return sysfs_emit(buf, "%d\n", hba->spm_lvl);
168 }
169
spm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)170 static ssize_t spm_lvl_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t count)
172 {
173 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
174 }
175
spm_target_dev_state_show(struct device * dev,struct device_attribute * attr,char * buf)176 static ssize_t spm_target_dev_state_show(struct device *dev,
177 struct device_attribute *attr, char *buf)
178 {
179 struct ufs_hba *hba = dev_get_drvdata(dev);
180
181 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
182 ufs_pm_lvl_states[hba->spm_lvl].dev_state));
183 }
184
spm_target_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t spm_target_link_state_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187 {
188 struct ufs_hba *hba = dev_get_drvdata(dev);
189
190 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
191 ufs_pm_lvl_states[hba->spm_lvl].link_state));
192 }
193
194 /* Convert Auto-Hibernate Idle Timer register value to microseconds */
ufshcd_ahit_to_us(u32 ahit)195 static int ufshcd_ahit_to_us(u32 ahit)
196 {
197 int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
198 int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
199
200 for (; scale > 0; --scale)
201 timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
202
203 return timer;
204 }
205
206 /* Convert microseconds to Auto-Hibernate Idle Timer register value */
ufshcd_us_to_ahit(unsigned int timer)207 static u32 ufshcd_us_to_ahit(unsigned int timer)
208 {
209 unsigned int scale;
210
211 for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
212 timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
213
214 return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
215 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
216 }
217
ufshcd_read_hci_reg(struct ufs_hba * hba,u32 * val,unsigned int reg)218 static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
219 {
220 down(&hba->host_sem);
221 if (!ufshcd_is_user_access_allowed(hba)) {
222 up(&hba->host_sem);
223 return -EBUSY;
224 }
225
226 ufshcd_rpm_get_sync(hba);
227 ufshcd_hold(hba);
228 *val = ufshcd_readl(hba, reg);
229 ufshcd_release(hba);
230 ufshcd_rpm_put_sync(hba);
231
232 up(&hba->host_sem);
233 return 0;
234 }
235
auto_hibern8_show(struct device * dev,struct device_attribute * attr,char * buf)236 static ssize_t auto_hibern8_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
238 {
239 u32 ahit;
240 int ret;
241 struct ufs_hba *hba = dev_get_drvdata(dev);
242
243 if (!ufshcd_is_auto_hibern8_supported(hba))
244 return -EOPNOTSUPP;
245
246 ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
247 if (ret)
248 return ret;
249
250 return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
251 }
252
auto_hibern8_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)253 static ssize_t auto_hibern8_store(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
256 {
257 struct ufs_hba *hba = dev_get_drvdata(dev);
258 unsigned int timer;
259 int ret = 0;
260
261 if (!ufshcd_is_auto_hibern8_supported(hba))
262 return -EOPNOTSUPP;
263
264 if (kstrtouint(buf, 0, &timer))
265 return -EINVAL;
266
267 if (timer > UFSHCI_AHIBERN8_MAX)
268 return -EINVAL;
269
270 down(&hba->host_sem);
271 if (!ufshcd_is_user_access_allowed(hba)) {
272 ret = -EBUSY;
273 goto out;
274 }
275
276 ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
277
278 out:
279 up(&hba->host_sem);
280 return ret ? ret : count;
281 }
282
wb_on_show(struct device * dev,struct device_attribute * attr,char * buf)283 static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
284 char *buf)
285 {
286 struct ufs_hba *hba = dev_get_drvdata(dev);
287
288 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
289 }
290
wb_on_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)291 static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
292 const char *buf, size_t count)
293 {
294 struct ufs_hba *hba = dev_get_drvdata(dev);
295 unsigned int wb_enable;
296 ssize_t res;
297
298 if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
299 && ufshcd_enable_wb_if_scaling_up(hba))) {
300 /*
301 * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
302 * on/off will be done while clock scaling up/down.
303 */
304 dev_warn(dev, "It is not allowed to configure WB!\n");
305 return -EOPNOTSUPP;
306 }
307
308 if (kstrtouint(buf, 0, &wb_enable))
309 return -EINVAL;
310
311 if (wb_enable != 0 && wb_enable != 1)
312 return -EINVAL;
313
314 down(&hba->host_sem);
315 if (!ufshcd_is_user_access_allowed(hba)) {
316 res = -EBUSY;
317 goto out;
318 }
319
320 ufshcd_rpm_get_sync(hba);
321 res = ufshcd_wb_toggle(hba, wb_enable);
322 ufshcd_rpm_put_sync(hba);
323 out:
324 up(&hba->host_sem);
325 return res < 0 ? res : count;
326 }
327
rtc_update_ms_show(struct device * dev,struct device_attribute * attr,char * buf)328 static ssize_t rtc_update_ms_show(struct device *dev, struct device_attribute *attr,
329 char *buf)
330 {
331 struct ufs_hba *hba = dev_get_drvdata(dev);
332
333 return sysfs_emit(buf, "%d\n", hba->dev_info.rtc_update_period);
334 }
335
rtc_update_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)336 static ssize_t rtc_update_ms_store(struct device *dev, struct device_attribute *attr,
337 const char *buf, size_t count)
338 {
339 struct ufs_hba *hba = dev_get_drvdata(dev);
340 unsigned int ms;
341 bool resume_period_update = false;
342
343 if (kstrtouint(buf, 0, &ms))
344 return -EINVAL;
345
346 if (!hba->dev_info.rtc_update_period && ms > 0)
347 resume_period_update = true;
348 /* Minimum and maximum update frequency should be synchronized with all UFS vendors */
349 hba->dev_info.rtc_update_period = ms;
350
351 if (resume_period_update)
352 schedule_delayed_work(&hba->ufs_rtc_update_work,
353 msecs_to_jiffies(hba->dev_info.rtc_update_period));
354 return count;
355 }
356
enable_wb_buf_flush_show(struct device * dev,struct device_attribute * attr,char * buf)357 static ssize_t enable_wb_buf_flush_show(struct device *dev,
358 struct device_attribute *attr,
359 char *buf)
360 {
361 struct ufs_hba *hba = dev_get_drvdata(dev);
362
363 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
364 }
365
enable_wb_buf_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)366 static ssize_t enable_wb_buf_flush_store(struct device *dev,
367 struct device_attribute *attr,
368 const char *buf, size_t count)
369 {
370 struct ufs_hba *hba = dev_get_drvdata(dev);
371 unsigned int enable_wb_buf_flush;
372 ssize_t res;
373
374 if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
375 dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
376 return -EOPNOTSUPP;
377 }
378
379 if (kstrtouint(buf, 0, &enable_wb_buf_flush))
380 return -EINVAL;
381
382 if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
383 return -EINVAL;
384
385 down(&hba->host_sem);
386 if (!ufshcd_is_user_access_allowed(hba)) {
387 res = -EBUSY;
388 goto out;
389 }
390
391 ufshcd_rpm_get_sync(hba);
392 res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
393 ufshcd_rpm_put_sync(hba);
394
395 out:
396 up(&hba->host_sem);
397 return res < 0 ? res : count;
398 }
399
wb_flush_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)400 static ssize_t wb_flush_threshold_show(struct device *dev,
401 struct device_attribute *attr,
402 char *buf)
403 {
404 struct ufs_hba *hba = dev_get_drvdata(dev);
405
406 return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold);
407 }
408
wb_flush_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)409 static ssize_t wb_flush_threshold_store(struct device *dev,
410 struct device_attribute *attr,
411 const char *buf, size_t count)
412 {
413 struct ufs_hba *hba = dev_get_drvdata(dev);
414 unsigned int wb_flush_threshold;
415
416 if (kstrtouint(buf, 0, &wb_flush_threshold))
417 return -EINVAL;
418
419 /* The range of values for wb_flush_threshold is (0,10] */
420 if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) ||
421 wb_flush_threshold == 0) {
422 dev_err(dev, "The value of wb_flush_threshold is invalid!\n");
423 return -EINVAL;
424 }
425
426 hba->vps->wb_flush_threshold = wb_flush_threshold;
427
428 return count;
429 }
430
431 /**
432 * pm_qos_enable_show - sysfs handler to show pm qos enable value
433 * @dev: device associated with the UFS controller
434 * @attr: sysfs attribute handle
435 * @buf: buffer for sysfs file
436 *
437 * Print 1 if PM QoS feature is enabled, 0 if disabled.
438 *
439 * Returns number of characters written to @buf.
440 */
pm_qos_enable_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t pm_qos_enable_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
443 {
444 struct ufs_hba *hba = dev_get_drvdata(dev);
445
446 guard(mutex)(&to_hba_priv(hba)->pm_qos_mutex);
447
448 return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
449 }
450
451 /**
452 * pm_qos_enable_store - sysfs handler to store value
453 * @dev: device associated with the UFS controller
454 * @attr: sysfs attribute handle
455 * @buf: buffer for sysfs file
456 * @count: stores buffer characters count
457 *
458 * Input 0 to disable PM QoS and 1 value to enable.
459 * Default state: 1
460 *
461 * Return: number of characters written to @buf on success, < 0 upon failure.
462 */
pm_qos_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)463 static ssize_t pm_qos_enable_store(struct device *dev,
464 struct device_attribute *attr, const char *buf, size_t count)
465 {
466 struct ufs_hba *hba = dev_get_drvdata(dev);
467 bool value;
468
469 if (kstrtobool(buf, &value))
470 return -EINVAL;
471
472 if (value)
473 ufshcd_pm_qos_init(hba);
474 else
475 ufshcd_pm_qos_exit(hba);
476
477 return count;
478 }
479
critical_health_show(struct device * dev,struct device_attribute * attr,char * buf)480 static ssize_t critical_health_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482 {
483 struct ufs_hba *hba = dev_get_drvdata(dev);
484
485 return sysfs_emit(buf, "%d\n", hba->critical_health_count);
486 }
487
device_lvl_exception_count_show(struct device * dev,struct device_attribute * attr,char * buf)488 static ssize_t device_lvl_exception_count_show(struct device *dev,
489 struct device_attribute *attr,
490 char *buf)
491 {
492 struct ufs_hba *hba = dev_get_drvdata(dev);
493
494 if (hba->dev_info.wspecversion < 0x410)
495 return -EOPNOTSUPP;
496
497 return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
498 }
499
device_lvl_exception_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)500 static ssize_t device_lvl_exception_count_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503 {
504 struct ufs_hba *hba = dev_get_drvdata(dev);
505 unsigned int value;
506
507 if (kstrtouint(buf, 0, &value))
508 return -EINVAL;
509
510 /* the only supported usecase is to reset the dev_lvl_exception_count */
511 if (value)
512 return -EINVAL;
513
514 atomic_set(&hba->dev_lvl_exception_count, 0);
515
516 return count;
517 }
518
device_lvl_exception_id_show(struct device * dev,struct device_attribute * attr,char * buf)519 static ssize_t device_lvl_exception_id_show(struct device *dev,
520 struct device_attribute *attr,
521 char *buf)
522 {
523 struct ufs_hba *hba = dev_get_drvdata(dev);
524 u64 exception_id;
525 int err;
526
527 ufshcd_rpm_get_sync(hba);
528 err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
529 ufshcd_rpm_put_sync(hba);
530
531 if (err)
532 return err;
533
534 hba->dev_lvl_exception_id = exception_id;
535 return sysfs_emit(buf, "%llu\n", exception_id);
536 }
537
538 static DEVICE_ATTR_RW(rpm_lvl);
539 static DEVICE_ATTR_RO(rpm_target_dev_state);
540 static DEVICE_ATTR_RO(rpm_target_link_state);
541 static DEVICE_ATTR_RW(spm_lvl);
542 static DEVICE_ATTR_RO(spm_target_dev_state);
543 static DEVICE_ATTR_RO(spm_target_link_state);
544 static DEVICE_ATTR_RW(auto_hibern8);
545 static DEVICE_ATTR_RW(wb_on);
546 static DEVICE_ATTR_RW(enable_wb_buf_flush);
547 static DEVICE_ATTR_RW(wb_flush_threshold);
548 static DEVICE_ATTR_RW(rtc_update_ms);
549 static DEVICE_ATTR_RW(pm_qos_enable);
550 static DEVICE_ATTR_RO(critical_health);
551 static DEVICE_ATTR_RW(device_lvl_exception_count);
552 static DEVICE_ATTR_RO(device_lvl_exception_id);
553
554 static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
555 &dev_attr_rpm_lvl.attr,
556 &dev_attr_rpm_target_dev_state.attr,
557 &dev_attr_rpm_target_link_state.attr,
558 &dev_attr_spm_lvl.attr,
559 &dev_attr_spm_target_dev_state.attr,
560 &dev_attr_spm_target_link_state.attr,
561 &dev_attr_auto_hibern8.attr,
562 &dev_attr_wb_on.attr,
563 &dev_attr_enable_wb_buf_flush.attr,
564 &dev_attr_wb_flush_threshold.attr,
565 &dev_attr_rtc_update_ms.attr,
566 &dev_attr_pm_qos_enable.attr,
567 &dev_attr_critical_health.attr,
568 &dev_attr_device_lvl_exception_count.attr,
569 &dev_attr_device_lvl_exception_id.attr,
570 NULL
571 };
572
573 static const struct attribute_group ufs_sysfs_default_group = {
574 .attrs = ufs_sysfs_ufshcd_attrs,
575 };
576
clock_scaling_show(struct device * dev,struct device_attribute * attr,char * buf)577 static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
578 char *buf)
579 {
580 struct ufs_hba *hba = dev_get_drvdata(dev);
581
582 return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
583 }
584
write_booster_show(struct device * dev,struct device_attribute * attr,char * buf)585 static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
586 char *buf)
587 {
588 struct ufs_hba *hba = dev_get_drvdata(dev);
589
590 return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
591 }
592
593 static DEVICE_ATTR_RO(clock_scaling);
594 static DEVICE_ATTR_RO(write_booster);
595
596 /*
597 * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
598 * group.
599 */
600 static struct attribute *ufs_sysfs_capabilities_attrs[] = {
601 &dev_attr_clock_scaling.attr,
602 &dev_attr_write_booster.attr,
603 NULL
604 };
605
606 static const struct attribute_group ufs_sysfs_capabilities_group = {
607 .name = "capabilities",
608 .attrs = ufs_sysfs_capabilities_attrs,
609 };
610
version_show(struct device * dev,struct device_attribute * attr,char * buf)611 static ssize_t version_show(struct device *dev,
612 struct device_attribute *attr, char *buf)
613 {
614 struct ufs_hba *hba = dev_get_drvdata(dev);
615
616 return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
617 }
618
product_id_show(struct device * dev,struct device_attribute * attr,char * buf)619 static ssize_t product_id_show(struct device *dev,
620 struct device_attribute *attr, char *buf)
621 {
622 int ret;
623 u32 val;
624 struct ufs_hba *hba = dev_get_drvdata(dev);
625
626 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
627 if (ret)
628 return ret;
629
630 return sysfs_emit(buf, "0x%x\n", val);
631 }
632
man_id_show(struct device * dev,struct device_attribute * attr,char * buf)633 static ssize_t man_id_show(struct device *dev,
634 struct device_attribute *attr, char *buf)
635 {
636 int ret;
637 u32 val;
638 struct ufs_hba *hba = dev_get_drvdata(dev);
639
640 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
641 if (ret)
642 return ret;
643
644 return sysfs_emit(buf, "0x%x\n", val);
645 }
646
647 static DEVICE_ATTR_RO(version);
648 static DEVICE_ATTR_RO(product_id);
649 static DEVICE_ATTR_RO(man_id);
650
651 static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
652 &dev_attr_version.attr,
653 &dev_attr_product_id.attr,
654 &dev_attr_man_id.attr,
655 NULL
656 };
657
658 static const struct attribute_group ufs_sysfs_ufshci_group = {
659 .name = "ufshci_capabilities",
660 .attrs = ufs_sysfs_ufshci_cap_attrs,
661 };
662
monitor_enable_show(struct device * dev,struct device_attribute * attr,char * buf)663 static ssize_t monitor_enable_show(struct device *dev,
664 struct device_attribute *attr, char *buf)
665 {
666 struct ufs_hba *hba = dev_get_drvdata(dev);
667
668 return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
669 }
670
monitor_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)671 static ssize_t monitor_enable_store(struct device *dev,
672 struct device_attribute *attr,
673 const char *buf, size_t count)
674 {
675 struct ufs_hba *hba = dev_get_drvdata(dev);
676 unsigned long value, flags;
677
678 if (kstrtoul(buf, 0, &value))
679 return -EINVAL;
680
681 value = !!value;
682 spin_lock_irqsave(hba->host->host_lock, flags);
683 if (value == hba->monitor.enabled)
684 goto out_unlock;
685
686 if (!value) {
687 memset(&hba->monitor, 0, sizeof(hba->monitor));
688 } else {
689 hba->monitor.enabled = true;
690 hba->monitor.enabled_ts = ktime_get();
691 }
692
693 out_unlock:
694 spin_unlock_irqrestore(hba->host->host_lock, flags);
695 return count;
696 }
697
monitor_chunk_size_show(struct device * dev,struct device_attribute * attr,char * buf)698 static ssize_t monitor_chunk_size_show(struct device *dev,
699 struct device_attribute *attr, char *buf)
700 {
701 struct ufs_hba *hba = dev_get_drvdata(dev);
702
703 return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
704 }
705
monitor_chunk_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)706 static ssize_t monitor_chunk_size_store(struct device *dev,
707 struct device_attribute *attr,
708 const char *buf, size_t count)
709 {
710 struct ufs_hba *hba = dev_get_drvdata(dev);
711 unsigned long value, flags;
712
713 if (kstrtoul(buf, 0, &value))
714 return -EINVAL;
715
716 spin_lock_irqsave(hba->host->host_lock, flags);
717 /* Only allow chunk size change when monitor is disabled */
718 if (!hba->monitor.enabled)
719 hba->monitor.chunk_size = value;
720 spin_unlock_irqrestore(hba->host->host_lock, flags);
721 return count;
722 }
723
read_total_sectors_show(struct device * dev,struct device_attribute * attr,char * buf)724 static ssize_t read_total_sectors_show(struct device *dev,
725 struct device_attribute *attr, char *buf)
726 {
727 struct ufs_hba *hba = dev_get_drvdata(dev);
728
729 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
730 }
731
read_total_busy_show(struct device * dev,struct device_attribute * attr,char * buf)732 static ssize_t read_total_busy_show(struct device *dev,
733 struct device_attribute *attr, char *buf)
734 {
735 struct ufs_hba *hba = dev_get_drvdata(dev);
736
737 return sysfs_emit(buf, "%llu\n",
738 ktime_to_us(hba->monitor.total_busy[READ]));
739 }
740
read_nr_requests_show(struct device * dev,struct device_attribute * attr,char * buf)741 static ssize_t read_nr_requests_show(struct device *dev,
742 struct device_attribute *attr, char *buf)
743 {
744 struct ufs_hba *hba = dev_get_drvdata(dev);
745
746 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
747 }
748
read_req_latency_avg_show(struct device * dev,struct device_attribute * attr,char * buf)749 static ssize_t read_req_latency_avg_show(struct device *dev,
750 struct device_attribute *attr,
751 char *buf)
752 {
753 struct ufs_hba *hba = dev_get_drvdata(dev);
754 struct ufs_hba_monitor *m = &hba->monitor;
755
756 if (!m->nr_req[READ])
757 return sysfs_emit(buf, "0\n");
758
759 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
760 m->nr_req[READ]));
761 }
762
read_req_latency_max_show(struct device * dev,struct device_attribute * attr,char * buf)763 static ssize_t read_req_latency_max_show(struct device *dev,
764 struct device_attribute *attr,
765 char *buf)
766 {
767 struct ufs_hba *hba = dev_get_drvdata(dev);
768
769 return sysfs_emit(buf, "%llu\n",
770 ktime_to_us(hba->monitor.lat_max[READ]));
771 }
772
read_req_latency_min_show(struct device * dev,struct device_attribute * attr,char * buf)773 static ssize_t read_req_latency_min_show(struct device *dev,
774 struct device_attribute *attr,
775 char *buf)
776 {
777 struct ufs_hba *hba = dev_get_drvdata(dev);
778
779 return sysfs_emit(buf, "%llu\n",
780 ktime_to_us(hba->monitor.lat_min[READ]));
781 }
782
read_req_latency_sum_show(struct device * dev,struct device_attribute * attr,char * buf)783 static ssize_t read_req_latency_sum_show(struct device *dev,
784 struct device_attribute *attr,
785 char *buf)
786 {
787 struct ufs_hba *hba = dev_get_drvdata(dev);
788
789 return sysfs_emit(buf, "%llu\n",
790 ktime_to_us(hba->monitor.lat_sum[READ]));
791 }
792
write_total_sectors_show(struct device * dev,struct device_attribute * attr,char * buf)793 static ssize_t write_total_sectors_show(struct device *dev,
794 struct device_attribute *attr,
795 char *buf)
796 {
797 struct ufs_hba *hba = dev_get_drvdata(dev);
798
799 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
800 }
801
write_total_busy_show(struct device * dev,struct device_attribute * attr,char * buf)802 static ssize_t write_total_busy_show(struct device *dev,
803 struct device_attribute *attr, char *buf)
804 {
805 struct ufs_hba *hba = dev_get_drvdata(dev);
806
807 return sysfs_emit(buf, "%llu\n",
808 ktime_to_us(hba->monitor.total_busy[WRITE]));
809 }
810
write_nr_requests_show(struct device * dev,struct device_attribute * attr,char * buf)811 static ssize_t write_nr_requests_show(struct device *dev,
812 struct device_attribute *attr, char *buf)
813 {
814 struct ufs_hba *hba = dev_get_drvdata(dev);
815
816 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
817 }
818
write_req_latency_avg_show(struct device * dev,struct device_attribute * attr,char * buf)819 static ssize_t write_req_latency_avg_show(struct device *dev,
820 struct device_attribute *attr,
821 char *buf)
822 {
823 struct ufs_hba *hba = dev_get_drvdata(dev);
824 struct ufs_hba_monitor *m = &hba->monitor;
825
826 if (!m->nr_req[WRITE])
827 return sysfs_emit(buf, "0\n");
828
829 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
830 m->nr_req[WRITE]));
831 }
832
write_req_latency_max_show(struct device * dev,struct device_attribute * attr,char * buf)833 static ssize_t write_req_latency_max_show(struct device *dev,
834 struct device_attribute *attr,
835 char *buf)
836 {
837 struct ufs_hba *hba = dev_get_drvdata(dev);
838
839 return sysfs_emit(buf, "%llu\n",
840 ktime_to_us(hba->monitor.lat_max[WRITE]));
841 }
842
write_req_latency_min_show(struct device * dev,struct device_attribute * attr,char * buf)843 static ssize_t write_req_latency_min_show(struct device *dev,
844 struct device_attribute *attr,
845 char *buf)
846 {
847 struct ufs_hba *hba = dev_get_drvdata(dev);
848
849 return sysfs_emit(buf, "%llu\n",
850 ktime_to_us(hba->monitor.lat_min[WRITE]));
851 }
852
write_req_latency_sum_show(struct device * dev,struct device_attribute * attr,char * buf)853 static ssize_t write_req_latency_sum_show(struct device *dev,
854 struct device_attribute *attr,
855 char *buf)
856 {
857 struct ufs_hba *hba = dev_get_drvdata(dev);
858
859 return sysfs_emit(buf, "%llu\n",
860 ktime_to_us(hba->monitor.lat_sum[WRITE]));
861 }
862
863 static DEVICE_ATTR_RW(monitor_enable);
864 static DEVICE_ATTR_RW(monitor_chunk_size);
865 static DEVICE_ATTR_RO(read_total_sectors);
866 static DEVICE_ATTR_RO(read_total_busy);
867 static DEVICE_ATTR_RO(read_nr_requests);
868 static DEVICE_ATTR_RO(read_req_latency_avg);
869 static DEVICE_ATTR_RO(read_req_latency_max);
870 static DEVICE_ATTR_RO(read_req_latency_min);
871 static DEVICE_ATTR_RO(read_req_latency_sum);
872 static DEVICE_ATTR_RO(write_total_sectors);
873 static DEVICE_ATTR_RO(write_total_busy);
874 static DEVICE_ATTR_RO(write_nr_requests);
875 static DEVICE_ATTR_RO(write_req_latency_avg);
876 static DEVICE_ATTR_RO(write_req_latency_max);
877 static DEVICE_ATTR_RO(write_req_latency_min);
878 static DEVICE_ATTR_RO(write_req_latency_sum);
879
880 static struct attribute *ufs_sysfs_monitor_attrs[] = {
881 &dev_attr_monitor_enable.attr,
882 &dev_attr_monitor_chunk_size.attr,
883 &dev_attr_read_total_sectors.attr,
884 &dev_attr_read_total_busy.attr,
885 &dev_attr_read_nr_requests.attr,
886 &dev_attr_read_req_latency_avg.attr,
887 &dev_attr_read_req_latency_max.attr,
888 &dev_attr_read_req_latency_min.attr,
889 &dev_attr_read_req_latency_sum.attr,
890 &dev_attr_write_total_sectors.attr,
891 &dev_attr_write_total_busy.attr,
892 &dev_attr_write_nr_requests.attr,
893 &dev_attr_write_req_latency_avg.attr,
894 &dev_attr_write_req_latency_max.attr,
895 &dev_attr_write_req_latency_min.attr,
896 &dev_attr_write_req_latency_sum.attr,
897 NULL
898 };
899
900 static const struct attribute_group ufs_sysfs_monitor_group = {
901 .name = "monitor",
902 .attrs = ufs_sysfs_monitor_attrs,
903 };
904
lane_show(struct device * dev,struct device_attribute * attr,char * buf)905 static ssize_t lane_show(struct device *dev, struct device_attribute *attr,
906 char *buf)
907 {
908 struct ufs_hba *hba = dev_get_drvdata(dev);
909
910 return sysfs_emit(buf, "%u\n", hba->pwr_info.lane_rx);
911 }
912
mode_show(struct device * dev,struct device_attribute * attr,char * buf)913 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
914 char *buf)
915 {
916 struct ufs_hba *hba = dev_get_drvdata(dev);
917
918 return sysfs_emit(buf, "%s\n", ufs_pa_pwr_mode_to_string(hba->pwr_info.pwr_rx));
919 }
920
rate_show(struct device * dev,struct device_attribute * attr,char * buf)921 static ssize_t rate_show(struct device *dev, struct device_attribute *attr,
922 char *buf)
923 {
924 struct ufs_hba *hba = dev_get_drvdata(dev);
925
926 return sysfs_emit(buf, "%s\n", ufs_hs_gear_rate_to_string(hba->pwr_info.hs_rate));
927 }
928
gear_show(struct device * dev,struct device_attribute * attr,char * buf)929 static ssize_t gear_show(struct device *dev, struct device_attribute *attr,
930 char *buf)
931 {
932 struct ufs_hba *hba = dev_get_drvdata(dev);
933
934 return sysfs_emit(buf, "%s\n", hba->pwr_info.hs_rate ?
935 ufs_hs_gear_to_string(hba->pwr_info.gear_rx) :
936 ufs_pwm_gear_to_string(hba->pwr_info.gear_rx));
937 }
938
dev_pm_show(struct device * dev,struct device_attribute * attr,char * buf)939 static ssize_t dev_pm_show(struct device *dev, struct device_attribute *attr,
940 char *buf)
941 {
942 struct ufs_hba *hba = dev_get_drvdata(dev);
943
944 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(hba->curr_dev_pwr_mode));
945 }
946
link_state_show(struct device * dev,struct device_attribute * attr,char * buf)947 static ssize_t link_state_show(struct device *dev,
948 struct device_attribute *attr, char *buf)
949 {
950 struct ufs_hba *hba = dev_get_drvdata(dev);
951
952 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(hba->uic_link_state));
953 }
954
955 static DEVICE_ATTR_RO(lane);
956 static DEVICE_ATTR_RO(mode);
957 static DEVICE_ATTR_RO(rate);
958 static DEVICE_ATTR_RO(gear);
959 static DEVICE_ATTR_RO(dev_pm);
960 static DEVICE_ATTR_RO(link_state);
961
962 static struct attribute *ufs_power_info_attrs[] = {
963 &dev_attr_lane.attr,
964 &dev_attr_mode.attr,
965 &dev_attr_rate.attr,
966 &dev_attr_gear.attr,
967 &dev_attr_dev_pm.attr,
968 &dev_attr_link_state.attr,
969 NULL
970 };
971
972 static const struct attribute_group ufs_sysfs_power_info_group = {
973 .name = "power_info",
974 .attrs = ufs_power_info_attrs,
975 };
976
ufs_sysfs_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,u8 desc_index,u8 param_offset,u8 * sysfs_buf,u8 param_size)977 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
978 enum desc_idn desc_id,
979 u8 desc_index,
980 u8 param_offset,
981 u8 *sysfs_buf,
982 u8 param_size)
983 {
984 u8 desc_buf[8] = {0};
985 int ret;
986
987 if (param_size > 8)
988 return -EINVAL;
989
990 down(&hba->host_sem);
991 if (!ufshcd_is_user_access_allowed(hba)) {
992 ret = -EBUSY;
993 goto out;
994 }
995
996 ufshcd_rpm_get_sync(hba);
997 ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
998 param_offset, desc_buf, param_size);
999 ufshcd_rpm_put_sync(hba);
1000 if (ret) {
1001 ret = -EINVAL;
1002 goto out;
1003 }
1004
1005 switch (param_size) {
1006 case 1:
1007 ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
1008 break;
1009 case 2:
1010 ret = sysfs_emit(sysfs_buf, "0x%04X\n",
1011 get_unaligned_be16(desc_buf));
1012 break;
1013 case 4:
1014 ret = sysfs_emit(sysfs_buf, "0x%08X\n",
1015 get_unaligned_be32(desc_buf));
1016 break;
1017 case 8:
1018 ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
1019 get_unaligned_be64(desc_buf));
1020 break;
1021 }
1022
1023 out:
1024 up(&hba->host_sem);
1025 return ret;
1026 }
1027
1028 #define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
1029 static ssize_t _name##_show(struct device *dev, \
1030 struct device_attribute *attr, char *buf) \
1031 { \
1032 struct ufs_hba *hba = dev_get_drvdata(dev); \
1033 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1034 0, _duname##_DESC_PARAM##_puname, buf, _size); \
1035 } \
1036 static DEVICE_ATTR_RO(_name)
1037
1038 #define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
1039 UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
1040
1041 UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
1042 UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
1043 UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
1044 UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
1045 UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
1046 UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
1047 UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
1048 UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
1049 UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
1050 UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
1051 UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
1052 UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
1053 UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
1054 UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
1055 UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
1056 UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
1057 UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
1058 UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
1059 UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
1060 UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
1061 UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
1062 UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
1063 UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
1064 UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
1065 UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
1066 UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
1067 UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
1068 UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
1069 UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
1070 UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
1071
1072 static struct attribute *ufs_sysfs_device_descriptor[] = {
1073 &dev_attr_device_type.attr,
1074 &dev_attr_device_class.attr,
1075 &dev_attr_device_sub_class.attr,
1076 &dev_attr_protocol.attr,
1077 &dev_attr_number_of_luns.attr,
1078 &dev_attr_number_of_wluns.attr,
1079 &dev_attr_boot_enable.attr,
1080 &dev_attr_descriptor_access_enable.attr,
1081 &dev_attr_initial_power_mode.attr,
1082 &dev_attr_high_priority_lun.attr,
1083 &dev_attr_secure_removal_type.attr,
1084 &dev_attr_support_security_lun.attr,
1085 &dev_attr_bkops_termination_latency.attr,
1086 &dev_attr_initial_active_icc_level.attr,
1087 &dev_attr_specification_version.attr,
1088 &dev_attr_manufacturing_date.attr,
1089 &dev_attr_manufacturer_id.attr,
1090 &dev_attr_rtt_capability.attr,
1091 &dev_attr_rtc_update.attr,
1092 &dev_attr_ufs_features.attr,
1093 &dev_attr_ffu_timeout.attr,
1094 &dev_attr_queue_depth.attr,
1095 &dev_attr_device_version.attr,
1096 &dev_attr_number_of_secure_wpa.attr,
1097 &dev_attr_psa_max_data_size.attr,
1098 &dev_attr_psa_state_timeout.attr,
1099 &dev_attr_ext_feature_sup.attr,
1100 &dev_attr_wb_presv_us_en.attr,
1101 &dev_attr_wb_type.attr,
1102 &dev_attr_wb_shared_alloc_units.attr,
1103 NULL,
1104 };
1105
1106 static const struct attribute_group ufs_sysfs_device_descriptor_group = {
1107 .name = "device_descriptor",
1108 .attrs = ufs_sysfs_device_descriptor,
1109 };
1110
1111 #define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
1112 UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
1113
1114 UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
1115 UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
1116
1117 static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
1118 &dev_attr_unipro_version.attr,
1119 &dev_attr_mphy_version.attr,
1120 NULL,
1121 };
1122
1123 static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
1124 .name = "interconnect_descriptor",
1125 .attrs = ufs_sysfs_interconnect_descriptor,
1126 };
1127
1128 #define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
1129 UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
1130
1131 UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
1132 UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
1133 UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
1134 UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
1135 UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
1136 UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
1137 UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
1138 UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
1139 UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
1140 UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
1141 UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
1142 UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
1143 UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
1144 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
1145 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
1146 UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
1147 UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
1148 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
1149 _SCM_MAX_NUM_UNITS, 4);
1150 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
1151 _SCM_CAP_ADJ_FCTR, 2);
1152 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
1153 _NPM_MAX_NUM_UNITS, 4);
1154 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
1155 _NPM_CAP_ADJ_FCTR, 2);
1156 UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
1157 _ENM1_MAX_NUM_UNITS, 4);
1158 UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
1159 _ENM1_CAP_ADJ_FCTR, 2);
1160 UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
1161 _ENM2_MAX_NUM_UNITS, 4);
1162 UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
1163 _ENM2_CAP_ADJ_FCTR, 2);
1164 UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
1165 _ENM3_MAX_NUM_UNITS, 4);
1166 UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
1167 _ENM3_CAP_ADJ_FCTR, 2);
1168 UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
1169 _ENM4_MAX_NUM_UNITS, 4);
1170 UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
1171 _ENM4_CAP_ADJ_FCTR, 2);
1172 UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
1173 UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
1174 UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
1175 UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
1176 UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
1177
1178
1179 static struct attribute *ufs_sysfs_geometry_descriptor[] = {
1180 &dev_attr_raw_device_capacity.attr,
1181 &dev_attr_max_number_of_luns.attr,
1182 &dev_attr_segment_size.attr,
1183 &dev_attr_allocation_unit_size.attr,
1184 &dev_attr_min_addressable_block_size.attr,
1185 &dev_attr_optimal_read_block_size.attr,
1186 &dev_attr_optimal_write_block_size.attr,
1187 &dev_attr_max_in_buffer_size.attr,
1188 &dev_attr_max_out_buffer_size.attr,
1189 &dev_attr_rpmb_rw_size.attr,
1190 &dev_attr_dyn_capacity_resource_policy.attr,
1191 &dev_attr_data_ordering.attr,
1192 &dev_attr_max_number_of_contexts.attr,
1193 &dev_attr_sys_data_tag_unit_size.attr,
1194 &dev_attr_sys_data_tag_resource_size.attr,
1195 &dev_attr_secure_removal_types.attr,
1196 &dev_attr_memory_types.attr,
1197 &dev_attr_sys_code_memory_max_alloc_units.attr,
1198 &dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
1199 &dev_attr_non_persist_memory_max_alloc_units.attr,
1200 &dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
1201 &dev_attr_enh1_memory_max_alloc_units.attr,
1202 &dev_attr_enh1_memory_capacity_adjustment_factor.attr,
1203 &dev_attr_enh2_memory_max_alloc_units.attr,
1204 &dev_attr_enh2_memory_capacity_adjustment_factor.attr,
1205 &dev_attr_enh3_memory_max_alloc_units.attr,
1206 &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
1207 &dev_attr_enh4_memory_max_alloc_units.attr,
1208 &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
1209 &dev_attr_wb_max_alloc_units.attr,
1210 &dev_attr_wb_max_wb_luns.attr,
1211 &dev_attr_wb_buff_cap_adj.attr,
1212 &dev_attr_wb_sup_red_type.attr,
1213 &dev_attr_wb_sup_wb_type.attr,
1214 NULL,
1215 };
1216
1217 static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
1218 .name = "geometry_descriptor",
1219 .attrs = ufs_sysfs_geometry_descriptor,
1220 };
1221
1222 #define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
1223 UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
1224
1225 UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
1226 UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
1227 UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
1228
1229 static struct attribute *ufs_sysfs_health_descriptor[] = {
1230 &dev_attr_eol_info.attr,
1231 &dev_attr_life_time_estimation_a.attr,
1232 &dev_attr_life_time_estimation_b.attr,
1233 NULL,
1234 };
1235
1236 static const struct attribute_group ufs_sysfs_health_descriptor_group = {
1237 .name = "health_descriptor",
1238 .attrs = ufs_sysfs_health_descriptor,
1239 };
1240
1241 #define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
1242 static ssize_t _name##_index##_show(struct device *dev, \
1243 struct device_attribute *attr, char *buf) \
1244 { \
1245 struct ufs_hba *hba = dev_get_drvdata(dev); \
1246 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
1247 PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
1248 } \
1249 static DEVICE_ATTR_RO(_name##_index)
1250
1251 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
1252 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
1253 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
1254 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
1255 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
1256 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
1257 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
1258 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
1259 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
1260 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
1261 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
1262 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
1263 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
1264 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
1265 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
1266 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
1267 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
1268 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
1269 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
1270 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
1271 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
1272 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
1273 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
1274 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
1275 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
1276 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
1277 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
1278 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
1279 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
1280 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
1281 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
1282 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
1283 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
1284 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
1285 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
1286 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
1287 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
1288 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
1289 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
1290 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
1291 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
1292 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
1293 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
1294 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
1295 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
1296 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
1297 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
1298 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
1299
1300 static struct attribute *ufs_sysfs_power_descriptor[] = {
1301 &dev_attr_active_icc_levels_vcc0.attr,
1302 &dev_attr_active_icc_levels_vcc1.attr,
1303 &dev_attr_active_icc_levels_vcc2.attr,
1304 &dev_attr_active_icc_levels_vcc3.attr,
1305 &dev_attr_active_icc_levels_vcc4.attr,
1306 &dev_attr_active_icc_levels_vcc5.attr,
1307 &dev_attr_active_icc_levels_vcc6.attr,
1308 &dev_attr_active_icc_levels_vcc7.attr,
1309 &dev_attr_active_icc_levels_vcc8.attr,
1310 &dev_attr_active_icc_levels_vcc9.attr,
1311 &dev_attr_active_icc_levels_vcc10.attr,
1312 &dev_attr_active_icc_levels_vcc11.attr,
1313 &dev_attr_active_icc_levels_vcc12.attr,
1314 &dev_attr_active_icc_levels_vcc13.attr,
1315 &dev_attr_active_icc_levels_vcc14.attr,
1316 &dev_attr_active_icc_levels_vcc15.attr,
1317 &dev_attr_active_icc_levels_vccq0.attr,
1318 &dev_attr_active_icc_levels_vccq1.attr,
1319 &dev_attr_active_icc_levels_vccq2.attr,
1320 &dev_attr_active_icc_levels_vccq3.attr,
1321 &dev_attr_active_icc_levels_vccq4.attr,
1322 &dev_attr_active_icc_levels_vccq5.attr,
1323 &dev_attr_active_icc_levels_vccq6.attr,
1324 &dev_attr_active_icc_levels_vccq7.attr,
1325 &dev_attr_active_icc_levels_vccq8.attr,
1326 &dev_attr_active_icc_levels_vccq9.attr,
1327 &dev_attr_active_icc_levels_vccq10.attr,
1328 &dev_attr_active_icc_levels_vccq11.attr,
1329 &dev_attr_active_icc_levels_vccq12.attr,
1330 &dev_attr_active_icc_levels_vccq13.attr,
1331 &dev_attr_active_icc_levels_vccq14.attr,
1332 &dev_attr_active_icc_levels_vccq15.attr,
1333 &dev_attr_active_icc_levels_vccq20.attr,
1334 &dev_attr_active_icc_levels_vccq21.attr,
1335 &dev_attr_active_icc_levels_vccq22.attr,
1336 &dev_attr_active_icc_levels_vccq23.attr,
1337 &dev_attr_active_icc_levels_vccq24.attr,
1338 &dev_attr_active_icc_levels_vccq25.attr,
1339 &dev_attr_active_icc_levels_vccq26.attr,
1340 &dev_attr_active_icc_levels_vccq27.attr,
1341 &dev_attr_active_icc_levels_vccq28.attr,
1342 &dev_attr_active_icc_levels_vccq29.attr,
1343 &dev_attr_active_icc_levels_vccq210.attr,
1344 &dev_attr_active_icc_levels_vccq211.attr,
1345 &dev_attr_active_icc_levels_vccq212.attr,
1346 &dev_attr_active_icc_levels_vccq213.attr,
1347 &dev_attr_active_icc_levels_vccq214.attr,
1348 &dev_attr_active_icc_levels_vccq215.attr,
1349 NULL,
1350 };
1351
1352 static const struct attribute_group ufs_sysfs_power_descriptor_group = {
1353 .name = "power_descriptor",
1354 .attrs = ufs_sysfs_power_descriptor,
1355 };
1356
1357 #define UFS_STRING_DESCRIPTOR(_name, _pname) \
1358 static ssize_t _name##_show(struct device *dev, \
1359 struct device_attribute *attr, char *buf) \
1360 { \
1361 u8 index; \
1362 struct ufs_hba *hba = dev_get_drvdata(dev); \
1363 int ret; \
1364 int desc_len = QUERY_DESC_MAX_SIZE; \
1365 u8 *desc_buf; \
1366 \
1367 down(&hba->host_sem); \
1368 if (!ufshcd_is_user_access_allowed(hba)) { \
1369 up(&hba->host_sem); \
1370 return -EBUSY; \
1371 } \
1372 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
1373 if (!desc_buf) { \
1374 up(&hba->host_sem); \
1375 return -ENOMEM; \
1376 } \
1377 ufshcd_rpm_get_sync(hba); \
1378 ret = ufshcd_query_descriptor_retry(hba, \
1379 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
1380 0, 0, desc_buf, &desc_len); \
1381 if (ret) { \
1382 ret = -EINVAL; \
1383 goto out; \
1384 } \
1385 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
1386 kfree(desc_buf); \
1387 desc_buf = NULL; \
1388 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
1389 SD_ASCII_STD); \
1390 if (ret < 0) \
1391 goto out; \
1392 ret = sysfs_emit(buf, "%s\n", desc_buf); \
1393 out: \
1394 ufshcd_rpm_put_sync(hba); \
1395 kfree(desc_buf); \
1396 up(&hba->host_sem); \
1397 return ret; \
1398 } \
1399 static DEVICE_ATTR_RO(_name)
1400
1401 UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
1402 UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
1403 UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
1404 UFS_STRING_DESCRIPTOR(serial_number, _SN);
1405 UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
1406
1407 static struct attribute *ufs_sysfs_string_descriptors[] = {
1408 &dev_attr_manufacturer_name.attr,
1409 &dev_attr_product_name.attr,
1410 &dev_attr_oem_id.attr,
1411 &dev_attr_serial_number.attr,
1412 &dev_attr_product_revision.attr,
1413 NULL,
1414 };
1415
1416 static const struct attribute_group ufs_sysfs_string_descriptors_group = {
1417 .name = "string_descriptors",
1418 .attrs = ufs_sysfs_string_descriptors,
1419 };
1420
ufshcd_is_wb_flags(enum flag_idn idn)1421 static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
1422 {
1423 return idn >= QUERY_FLAG_IDN_WB_EN &&
1424 idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
1425 }
1426
1427 #define UFS_FLAG(_name, _uname) \
1428 static ssize_t _name##_show(struct device *dev, \
1429 struct device_attribute *attr, char *buf) \
1430 { \
1431 bool flag; \
1432 u8 index = 0; \
1433 int ret; \
1434 struct ufs_hba *hba = dev_get_drvdata(dev); \
1435 \
1436 down(&hba->host_sem); \
1437 if (!ufshcd_is_user_access_allowed(hba)) { \
1438 up(&hba->host_sem); \
1439 return -EBUSY; \
1440 } \
1441 if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
1442 index = ufshcd_wb_get_query_index(hba); \
1443 ufshcd_rpm_get_sync(hba); \
1444 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
1445 QUERY_FLAG_IDN##_uname, index, &flag); \
1446 ufshcd_rpm_put_sync(hba); \
1447 if (ret) { \
1448 ret = -EINVAL; \
1449 goto out; \
1450 } \
1451 ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
1452 out: \
1453 up(&hba->host_sem); \
1454 return ret; \
1455 } \
1456 static DEVICE_ATTR_RO(_name)
1457
1458 UFS_FLAG(device_init, _FDEVICEINIT);
1459 UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
1460 UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
1461 UFS_FLAG(bkops_enable, _BKOPS_EN);
1462 UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
1463 UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
1464 UFS_FLAG(busy_rtc, _BUSY_RTC);
1465 UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
1466 UFS_FLAG(wb_enable, _WB_EN);
1467 UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
1468 UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
1469
1470 static struct attribute *ufs_sysfs_device_flags[] = {
1471 &dev_attr_device_init.attr,
1472 &dev_attr_permanent_wpe.attr,
1473 &dev_attr_power_on_wpe.attr,
1474 &dev_attr_bkops_enable.attr,
1475 &dev_attr_life_span_mode_enable.attr,
1476 &dev_attr_phy_resource_removal.attr,
1477 &dev_attr_busy_rtc.attr,
1478 &dev_attr_disable_fw_update.attr,
1479 &dev_attr_wb_enable.attr,
1480 &dev_attr_wb_flush_en.attr,
1481 &dev_attr_wb_flush_during_h8.attr,
1482 NULL,
1483 };
1484
1485 static const struct attribute_group ufs_sysfs_flags_group = {
1486 .name = "flags",
1487 .attrs = ufs_sysfs_device_flags,
1488 };
1489
max_number_of_rtt_show(struct device * dev,struct device_attribute * attr,char * buf)1490 static ssize_t max_number_of_rtt_show(struct device *dev,
1491 struct device_attribute *attr, char *buf)
1492 {
1493 struct ufs_hba *hba = dev_get_drvdata(dev);
1494 u32 rtt;
1495 int ret;
1496
1497 down(&hba->host_sem);
1498 if (!ufshcd_is_user_access_allowed(hba)) {
1499 up(&hba->host_sem);
1500 return -EBUSY;
1501 }
1502
1503 ufshcd_rpm_get_sync(hba);
1504 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1505 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
1506 ufshcd_rpm_put_sync(hba);
1507
1508 if (ret)
1509 goto out;
1510
1511 ret = sysfs_emit(buf, "0x%08X\n", rtt);
1512
1513 out:
1514 up(&hba->host_sem);
1515 return ret;
1516 }
1517
max_number_of_rtt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1518 static ssize_t max_number_of_rtt_store(struct device *dev,
1519 struct device_attribute *attr,
1520 const char *buf, size_t count)
1521 {
1522 struct ufs_hba *hba = dev_get_drvdata(dev);
1523 struct ufs_dev_info *dev_info = &hba->dev_info;
1524 struct scsi_device *sdev;
1525 unsigned int rtt;
1526 int ret;
1527
1528 if (kstrtouint(buf, 0, &rtt))
1529 return -EINVAL;
1530
1531 if (rtt > dev_info->rtt_cap) {
1532 dev_err(dev, "rtt can be at most bDeviceRTTCap\n");
1533 return -EINVAL;
1534 }
1535
1536 down(&hba->host_sem);
1537 if (!ufshcd_is_user_access_allowed(hba)) {
1538 ret = -EBUSY;
1539 goto out;
1540 }
1541
1542 ufshcd_rpm_get_sync(hba);
1543
1544 shost_for_each_device(sdev, hba->host)
1545 blk_mq_freeze_queue(sdev->request_queue);
1546
1547 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1548 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
1549
1550 shost_for_each_device(sdev, hba->host)
1551 blk_mq_unfreeze_queue(sdev->request_queue);
1552
1553 ufshcd_rpm_put_sync(hba);
1554
1555 out:
1556 up(&hba->host_sem);
1557 return ret < 0 ? ret : count;
1558 }
1559
1560 static DEVICE_ATTR_RW(max_number_of_rtt);
1561
ufshcd_is_wb_attrs(enum attr_idn idn)1562 static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
1563 {
1564 return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
1565 idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
1566 }
1567
1568 #define UFS_ATTRIBUTE(_name, _uname) \
1569 static ssize_t _name##_show(struct device *dev, \
1570 struct device_attribute *attr, char *buf) \
1571 { \
1572 struct ufs_hba *hba = dev_get_drvdata(dev); \
1573 u32 value; \
1574 int ret; \
1575 u8 index = 0; \
1576 \
1577 down(&hba->host_sem); \
1578 if (!ufshcd_is_user_access_allowed(hba)) { \
1579 up(&hba->host_sem); \
1580 return -EBUSY; \
1581 } \
1582 if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
1583 index = ufshcd_wb_get_query_index(hba); \
1584 ufshcd_rpm_get_sync(hba); \
1585 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
1586 QUERY_ATTR_IDN##_uname, index, 0, &value); \
1587 ufshcd_rpm_put_sync(hba); \
1588 if (ret) { \
1589 ret = -EINVAL; \
1590 goto out; \
1591 } \
1592 ret = sysfs_emit(buf, "0x%08X\n", value); \
1593 out: \
1594 up(&hba->host_sem); \
1595 return ret; \
1596 } \
1597 static DEVICE_ATTR_RO(_name)
1598
1599 UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
1600 UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
1601 UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
1602 UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
1603 UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
1604 UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
1605 UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
1606 UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
1607 UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
1608 UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
1609 UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
1610 UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
1611 UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
1612 UFS_ATTRIBUTE(psa_state, _PSA_STATE);
1613 UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
1614 UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
1615 UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
1616 UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
1617 UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
1618
1619
1620 static struct attribute *ufs_sysfs_attributes[] = {
1621 &dev_attr_boot_lun_enabled.attr,
1622 &dev_attr_current_power_mode.attr,
1623 &dev_attr_active_icc_level.attr,
1624 &dev_attr_ooo_data_enabled.attr,
1625 &dev_attr_bkops_status.attr,
1626 &dev_attr_purge_status.attr,
1627 &dev_attr_max_data_in_size.attr,
1628 &dev_attr_max_data_out_size.attr,
1629 &dev_attr_reference_clock_frequency.attr,
1630 &dev_attr_configuration_descriptor_lock.attr,
1631 &dev_attr_max_number_of_rtt.attr,
1632 &dev_attr_exception_event_control.attr,
1633 &dev_attr_exception_event_status.attr,
1634 &dev_attr_ffu_status.attr,
1635 &dev_attr_psa_state.attr,
1636 &dev_attr_psa_data_size.attr,
1637 &dev_attr_wb_flush_status.attr,
1638 &dev_attr_wb_avail_buf.attr,
1639 &dev_attr_wb_life_time_est.attr,
1640 &dev_attr_wb_cur_buf.attr,
1641 NULL,
1642 };
1643
1644 static const struct attribute_group ufs_sysfs_attributes_group = {
1645 .name = "attributes",
1646 .attrs = ufs_sysfs_attributes,
1647 };
1648
hid_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u32 * attr_val)1649 static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1650 enum attr_idn idn, u32 *attr_val)
1651 {
1652 int ret;
1653
1654 down(&hba->host_sem);
1655 if (!ufshcd_is_user_access_allowed(hba)) {
1656 up(&hba->host_sem);
1657 return -EBUSY;
1658 }
1659
1660 ufshcd_rpm_get_sync(hba);
1661 ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
1662 ufshcd_rpm_put_sync(hba);
1663
1664 up(&hba->host_sem);
1665 return ret;
1666 }
1667
analysis_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1668 static ssize_t analysis_trigger_store(struct device *dev,
1669 struct device_attribute *attr, const char *buf, size_t count)
1670 {
1671 struct ufs_hba *hba = dev_get_drvdata(dev);
1672 int mode;
1673 int ret;
1674
1675 if (sysfs_streq(buf, "enable"))
1676 mode = HID_ANALYSIS_ENABLE;
1677 else if (sysfs_streq(buf, "disable"))
1678 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
1679 else
1680 return -EINVAL;
1681
1682 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1683 (enum attr_idn)QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
1684
1685 return ret < 0 ? ret : count;
1686 }
1687
1688 static DEVICE_ATTR_WO(analysis_trigger);
1689
defrag_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1690 static ssize_t defrag_trigger_store(struct device *dev,
1691 struct device_attribute *attr, const char *buf, size_t count)
1692 {
1693 struct ufs_hba *hba = dev_get_drvdata(dev);
1694 int mode;
1695 int ret;
1696
1697 if (sysfs_streq(buf, "enable"))
1698 mode = HID_ANALYSIS_AND_DEFRAG_ENABLE;
1699 else if (sysfs_streq(buf, "disable"))
1700 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
1701 else
1702 return -EINVAL;
1703
1704 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1705 (enum attr_idn)QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
1706
1707 return ret < 0 ? ret : count;
1708 }
1709
1710 static DEVICE_ATTR_WO(defrag_trigger);
1711
fragmented_size_show(struct device * dev,struct device_attribute * attr,char * buf)1712 static ssize_t fragmented_size_show(struct device *dev,
1713 struct device_attribute *attr, char *buf)
1714 {
1715 struct ufs_hba *hba = dev_get_drvdata(dev);
1716 u32 value;
1717 int ret;
1718
1719 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1720 (enum attr_idn)QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value);
1721 if (ret)
1722 return ret;
1723
1724 return sysfs_emit(buf, "%u\n", value);
1725 }
1726
1727 static DEVICE_ATTR_RO(fragmented_size);
1728
defrag_size_show(struct device * dev,struct device_attribute * attr,char * buf)1729 static ssize_t defrag_size_show(struct device *dev,
1730 struct device_attribute *attr, char *buf)
1731 {
1732 struct ufs_hba *hba = dev_get_drvdata(dev);
1733 u32 value;
1734 int ret;
1735
1736 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1737 (enum attr_idn)QUERY_ATTR_IDN_HID_SIZE, &value);
1738 if (ret)
1739 return ret;
1740
1741 return sysfs_emit(buf, "%u\n", value);
1742 }
1743
defrag_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1744 static ssize_t defrag_size_store(struct device *dev,
1745 struct device_attribute *attr, const char *buf, size_t count)
1746 {
1747 struct ufs_hba *hba = dev_get_drvdata(dev);
1748 u32 value;
1749 int ret;
1750
1751 if (kstrtou32(buf, 0, &value))
1752 return -EINVAL;
1753
1754 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1755 (enum attr_idn)QUERY_ATTR_IDN_HID_SIZE, &value);
1756
1757 return ret < 0 ? ret : count;
1758 }
1759
1760 static DEVICE_ATTR_RW(defrag_size);
1761
progress_ratio_show(struct device * dev,struct device_attribute * attr,char * buf)1762 static ssize_t progress_ratio_show(struct device *dev,
1763 struct device_attribute *attr, char *buf)
1764 {
1765 struct ufs_hba *hba = dev_get_drvdata(dev);
1766 u32 value;
1767 int ret;
1768
1769 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1770 (enum attr_idn)QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value);
1771 if (ret)
1772 return ret;
1773
1774 return sysfs_emit(buf, "%u\n", value);
1775 }
1776
1777 static DEVICE_ATTR_RO(progress_ratio);
1778
state_show(struct device * dev,struct device_attribute * attr,char * buf)1779 static ssize_t state_show(struct device *dev,
1780 struct device_attribute *attr, char *buf)
1781 {
1782 struct ufs_hba *hba = dev_get_drvdata(dev);
1783 u32 value;
1784 int ret;
1785
1786 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1787 (enum attr_idn)QUERY_ATTR_IDN_HID_STATE, &value);
1788 if (ret)
1789 return ret;
1790
1791 return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value));
1792 }
1793
1794 static DEVICE_ATTR_RO(state);
1795
1796 static struct attribute *ufs_sysfs_hid[] = {
1797 &dev_attr_analysis_trigger.attr,
1798 &dev_attr_defrag_trigger.attr,
1799 &dev_attr_fragmented_size.attr,
1800 &dev_attr_defrag_size.attr,
1801 &dev_attr_progress_ratio.attr,
1802 &dev_attr_state.attr,
1803 NULL,
1804 };
1805
ufs_sysfs_hid_is_visible(struct kobject * kobj,struct attribute * attr,int n)1806 static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
1807 struct attribute *attr, int n)
1808 {
1809 struct device *dev = container_of(kobj, struct device, kobj);
1810 struct ufs_hba *hba = dev_get_drvdata(dev);
1811
1812 return to_hba_priv(hba)->hid_sup ? attr->mode : 0;
1813 }
1814
1815 static const struct attribute_group ufs_sysfs_hid_group = {
1816 .name = "hid",
1817 .attrs = ufs_sysfs_hid,
1818 .is_visible = ufs_sysfs_hid_is_visible,
1819 };
1820
1821 static const struct attribute_group *ufs_sysfs_groups[] = {
1822 &ufs_sysfs_default_group,
1823 &ufs_sysfs_capabilities_group,
1824 &ufs_sysfs_ufshci_group,
1825 &ufs_sysfs_monitor_group,
1826 &ufs_sysfs_power_info_group,
1827 &ufs_sysfs_device_descriptor_group,
1828 &ufs_sysfs_interconnect_descriptor_group,
1829 &ufs_sysfs_geometry_descriptor_group,
1830 &ufs_sysfs_health_descriptor_group,
1831 &ufs_sysfs_power_descriptor_group,
1832 &ufs_sysfs_string_descriptors_group,
1833 &ufs_sysfs_flags_group,
1834 &ufs_sysfs_attributes_group,
1835 &ufs_sysfs_hid_group,
1836 NULL,
1837 };
1838
1839 #define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
1840 static ssize_t _pname##_show(struct device *dev, \
1841 struct device_attribute *attr, char *buf) \
1842 { \
1843 struct scsi_device *sdev = to_scsi_device(dev); \
1844 struct ufs_hba *hba = shost_priv(sdev->host); \
1845 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
1846 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
1847 return -EINVAL; \
1848 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1849 lun, _duname##_DESC_PARAM##_puname, buf, _size); \
1850 } \
1851 static DEVICE_ATTR_RO(_pname)
1852
1853 #define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
1854 UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
1855
1856 UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
1857 UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
1858 UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
1859 UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
1860 UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
1861 UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
1862 UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
1863 UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
1864 UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
1865 UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
1866 UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
1867 UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8);
1868 UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
1869 UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
1870 UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
1871
1872 static struct attribute *ufs_sysfs_unit_descriptor[] = {
1873 &dev_attr_lu_enable.attr,
1874 &dev_attr_boot_lun_id.attr,
1875 &dev_attr_lun_write_protect.attr,
1876 &dev_attr_lun_queue_depth.attr,
1877 &dev_attr_psa_sensitive.attr,
1878 &dev_attr_lun_memory_type.attr,
1879 &dev_attr_data_reliability.attr,
1880 &dev_attr_logical_block_size.attr,
1881 &dev_attr_logical_block_count.attr,
1882 &dev_attr_erase_block_size.attr,
1883 &dev_attr_provisioning_type.attr,
1884 &dev_attr_physical_memory_resource_count.attr,
1885 &dev_attr_context_capabilities.attr,
1886 &dev_attr_large_unit_granularity.attr,
1887 &dev_attr_wb_buf_alloc_units.attr,
1888 NULL,
1889 };
1890
ufs_unit_descriptor_is_visible(struct kobject * kobj,struct attribute * attr,int n)1891 static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n)
1892 {
1893 struct device *dev = container_of(kobj, struct device, kobj);
1894 struct scsi_device *sdev = to_scsi_device(dev);
1895 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
1896 umode_t mode = attr->mode;
1897
1898 if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN)
1899 /* Boot and device WLUN have no unit descriptors */
1900 mode = 0;
1901 if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr)
1902 mode = 0;
1903
1904 return mode;
1905 }
1906
1907
1908 const struct attribute_group ufs_sysfs_unit_descriptor_group = {
1909 .name = "unit_descriptor",
1910 .attrs = ufs_sysfs_unit_descriptor,
1911 .is_visible = ufs_unit_descriptor_is_visible,
1912 };
1913
dyn_cap_needed_attribute_show(struct device * dev,struct device_attribute * attr,char * buf)1914 static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
1915 struct device_attribute *attr, char *buf)
1916 {
1917 u32 value;
1918 struct scsi_device *sdev = to_scsi_device(dev);
1919 struct ufs_hba *hba = shost_priv(sdev->host);
1920 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
1921 int ret;
1922
1923 down(&hba->host_sem);
1924 if (!ufshcd_is_user_access_allowed(hba)) {
1925 ret = -EBUSY;
1926 goto out;
1927 }
1928
1929 ufshcd_rpm_get_sync(hba);
1930 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1931 QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
1932 ufshcd_rpm_put_sync(hba);
1933 if (ret) {
1934 ret = -EINVAL;
1935 goto out;
1936 }
1937
1938 ret = sysfs_emit(buf, "0x%08X\n", value);
1939
1940 out:
1941 up(&hba->host_sem);
1942 return ret;
1943 }
1944 static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
1945
1946 static struct attribute *ufs_sysfs_lun_attributes[] = {
1947 &dev_attr_dyn_cap_needed_attribute.attr,
1948 NULL,
1949 };
1950
1951 const struct attribute_group ufs_sysfs_lun_attributes_group = {
1952 .attrs = ufs_sysfs_lun_attributes,
1953 };
1954
ufs_sysfs_add_nodes(struct device * dev)1955 void ufs_sysfs_add_nodes(struct device *dev)
1956 {
1957 int ret;
1958
1959 ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
1960 if (ret)
1961 dev_err(dev,
1962 "%s: sysfs groups creation failed (err = %d)\n",
1963 __func__, ret);
1964 }
1965
ufs_sysfs_remove_nodes(struct device * dev)1966 void ufs_sysfs_remove_nodes(struct device *dev)
1967 {
1968 sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
1969 }
1970