1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Universal Flash Storage Host controller driver
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 */
11
12 #ifndef _UFSHCD_H
13 #define _UFSHCD_H
14
15 #include <linux/bitfield.h>
16 #include <linux/blk-crypto-profile.h>
17 #include <linux/blk-mq.h>
18 #include <linux/devfreq.h>
19 #include <linux/msi.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/dma-direction.h>
22 #include <scsi/scsi_device.h>
23 #include <linux/android_kabi.h>
24 #include <ufs/unipro.h>
25 #include <ufs/ufs.h>
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/ufshci.h>
28
29 #define UFSHCD "ufshcd"
30
31 struct ufs_hba;
32
33 enum dev_cmd_type {
34 DEV_CMD_TYPE_NOP = 0x0,
35 DEV_CMD_TYPE_QUERY = 0x1,
36 DEV_CMD_TYPE_RPMB = 0x2,
37 };
38
39 enum ufs_event_type {
40 /* uic specific errors */
41 UFS_EVT_PA_ERR = 0,
42 UFS_EVT_DL_ERR,
43 UFS_EVT_NL_ERR,
44 UFS_EVT_TL_ERR,
45 UFS_EVT_DME_ERR,
46
47 /* fatal errors */
48 UFS_EVT_AUTO_HIBERN8_ERR,
49 UFS_EVT_FATAL_ERR,
50 UFS_EVT_LINK_STARTUP_FAIL,
51 UFS_EVT_RESUME_ERR,
52 UFS_EVT_SUSPEND_ERR,
53 UFS_EVT_WL_SUSP_ERR,
54 UFS_EVT_WL_RES_ERR,
55
56 /* abnormal events */
57 UFS_EVT_DEV_RESET,
58 UFS_EVT_HOST_RESET,
59 UFS_EVT_ABORT,
60
61 UFS_EVT_CNT,
62 };
63
64 /**
65 * struct uic_command - UIC command structure
66 * @command: UIC command
67 * @argument1: UIC command argument 1
68 * @argument2: UIC command argument 2
69 * @argument3: UIC command argument 3
70 * @cmd_active: Indicate if UIC command is outstanding
71 * @done: UIC command completion
72 */
73 struct uic_command {
74 u32 command;
75 u32 argument1;
76 u32 argument2;
77 u32 argument3;
78 int cmd_active;
79 struct completion done;
80 };
81
82 /* Used to differentiate the power management options */
83 enum ufs_pm_op {
84 UFS_RUNTIME_PM,
85 UFS_SYSTEM_PM,
86 UFS_SHUTDOWN_PM,
87 };
88
89 /* Host <-> Device UniPro Link state */
90 enum uic_link_state {
91 UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
92 UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
93 UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
94 UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
95 };
96
97 #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
98 #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
99 UIC_LINK_ACTIVE_STATE)
100 #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
101 UIC_LINK_HIBERN8_STATE)
102 #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
103 UIC_LINK_BROKEN_STATE)
104 #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
105 #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
106 UIC_LINK_ACTIVE_STATE)
107 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
108 UIC_LINK_HIBERN8_STATE)
109 #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
110 UIC_LINK_BROKEN_STATE)
111
112 #define ufshcd_set_ufs_dev_active(h) \
113 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
114 #define ufshcd_set_ufs_dev_sleep(h) \
115 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
116 #define ufshcd_set_ufs_dev_poweroff(h) \
117 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
118 #define ufshcd_set_ufs_dev_deepsleep(h) \
119 ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
120 #define ufshcd_is_ufs_dev_active(h) \
121 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
122 #define ufshcd_is_ufs_dev_sleep(h) \
123 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
124 #define ufshcd_is_ufs_dev_poweroff(h) \
125 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
126 #define ufshcd_is_ufs_dev_deepsleep(h) \
127 ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
128
129 /*
130 * UFS Power management levels.
131 * Each level is in increasing order of power savings, except DeepSleep
132 * which is lower than PowerDown with power on but not PowerDown with
133 * power off.
134 */
135 enum ufs_pm_level {
136 UFS_PM_LVL_0,
137 UFS_PM_LVL_1,
138 UFS_PM_LVL_2,
139 UFS_PM_LVL_3,
140 UFS_PM_LVL_4,
141 UFS_PM_LVL_5,
142 UFS_PM_LVL_6,
143 UFS_PM_LVL_MAX
144 };
145
146 struct ufs_pm_lvl_states {
147 enum ufs_dev_pwr_mode dev_state;
148 enum uic_link_state link_state;
149 };
150
151 /**
152 * struct ufshcd_lrb - local reference block
153 * @utr_descriptor_ptr: UTRD address of the command
154 * @ucd_req_ptr: UCD address of the command
155 * @ucd_rsp_ptr: Response UPIU address for this command
156 * @ucd_prdt_ptr: PRDT address of the command
157 * @utrd_dma_addr: UTRD dma address for debug
158 * @ucd_prdt_dma_addr: PRDT dma address for debug
159 * @ucd_rsp_dma_addr: UPIU response dma address for debug
160 * @ucd_req_dma_addr: UPIU request dma address for debug
161 * @cmd: pointer to SCSI command
162 * @scsi_status: SCSI status of the command
163 * @command_type: SCSI, UFS, Query.
164 * @task_tag: Task tag of the command
165 * @lun: LUN of the command
166 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
167 * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
168 * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
169 * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
170 * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
171 * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
172 * @data_unit_num: the data unit number for the first block for inline crypto
173 * @req_abort_skip: skip request abort task flag
174 */
175 struct ufshcd_lrb {
176 struct utp_transfer_req_desc *utr_descriptor_ptr;
177 struct utp_upiu_req *ucd_req_ptr;
178 struct utp_upiu_rsp *ucd_rsp_ptr;
179 struct ufshcd_sg_entry *ucd_prdt_ptr;
180
181 dma_addr_t utrd_dma_addr;
182 dma_addr_t ucd_req_dma_addr;
183 dma_addr_t ucd_rsp_dma_addr;
184 dma_addr_t ucd_prdt_dma_addr;
185
186 struct scsi_cmnd *cmd;
187 int scsi_status;
188
189 int command_type;
190 int task_tag;
191 u8 lun; /* UPIU LUN id field is only 8-bit wide */
192 bool intr_cmd;
193 ktime_t issue_time_stamp;
194 u64 issue_time_stamp_local_clock;
195 ktime_t compl_time_stamp;
196 u64 compl_time_stamp_local_clock;
197 #ifdef CONFIG_SCSI_UFS_CRYPTO
198 int crypto_key_slot;
199 u64 data_unit_num;
200 #endif
201
202 bool req_abort_skip;
203
204 ANDROID_KABI_RESERVE(1);
205 };
206
207 /**
208 * struct ufs_query - holds relevant data structures for query request
209 * @request: request upiu and function
210 * @descriptor: buffer for sending/receiving descriptor
211 * @response: response upiu and response
212 */
213 struct ufs_query {
214 struct ufs_query_req request;
215 u8 *descriptor;
216 struct ufs_query_res response;
217 };
218
219 /**
220 * struct ufs_dev_cmd - all assosiated fields with device management commands
221 * @type: device management command type - Query, NOP OUT
222 * @lock: lock to allow one command at a time
223 * @complete: internal commands completion
224 * @query: Device management query information
225 */
226 struct ufs_dev_cmd {
227 enum dev_cmd_type type;
228 struct mutex lock;
229 struct completion *complete;
230 struct ufs_query query;
231 struct cq_entry *cqe;
232 };
233
234 /**
235 * struct ufs_clk_info - UFS clock related info
236 * @list: list headed by hba->clk_list_head
237 * @clk: clock node
238 * @name: clock name
239 * @max_freq: maximum frequency supported by the clock
240 * @min_freq: min frequency that can be used for clock scaling
241 * @curr_freq: indicates the current frequency that it is set to
242 * @keep_link_active: indicates that the clk should not be disabled if
243 * link is active
244 * @enabled: variable to check against multiple enable/disable
245 */
246 struct ufs_clk_info {
247 struct list_head list;
248 struct clk *clk;
249 const char *name;
250 u32 max_freq;
251 u32 min_freq;
252 u32 curr_freq;
253 bool keep_link_active;
254 bool enabled;
255 };
256
257 enum ufs_notify_change_status {
258 PRE_CHANGE,
259 POST_CHANGE,
260 };
261
262 struct ufs_pa_layer_attr {
263 u32 gear_rx;
264 u32 gear_tx;
265 u32 lane_rx;
266 u32 lane_tx;
267 u32 pwr_rx;
268 u32 pwr_tx;
269 u32 hs_rate;
270 };
271
272 struct ufs_pwr_mode_info {
273 bool is_valid;
274 struct ufs_pa_layer_attr info;
275 };
276
277 /**
278 * struct ufs_hba_variant_ops - variant specific callbacks
279 * @name: variant name
280 * @init: called when the driver is initialized
281 * @exit: called to cleanup everything done in init
282 * @get_ufs_hci_version: called to get UFS HCI version
283 * @clk_scale_notify: notifies that clks are scaled up/down
284 * @setup_clocks: called before touching any of the controller registers
285 * @hce_enable_notify: called before and after HCE enable bit is set to allow
286 * variant specific Uni-Pro initialization.
287 * @link_startup_notify: called before and after Link startup is carried out
288 * to allow variant specific Uni-Pro initialization.
289 * @pwr_change_notify: called before and after a power mode change
290 * is carried out to allow vendor spesific capabilities
291 * to be set.
292 * @setup_xfer_req: called before any transfer request is issued
293 * to set some things
294 * @setup_task_mgmt: called before any task management request is issued
295 * to set some things
296 * @hibern8_notify: called around hibern8 enter/exit
297 * @apply_dev_quirks: called to apply device specific quirks
298 * @fixup_dev_quirks: called to modify device specific quirks
299 * @suspend: called during host controller PM callback
300 * @resume: called during host controller PM callback
301 * @dbg_register_dump: used to dump controller debug information
302 * @phy_initialization: used to initialize phys
303 * @device_reset: called to issue a reset pulse on the UFS device
304 * @config_scaling_param: called to configure clock scaling parameters
305 * @program_key: program or evict an inline encryption key
306 * @event_notify: called to notify important events
307 * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
308 * @mcq_config_resource: called to configure MCQ platform resources
309 * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
310 * @op_runtime_config: called to config Operation and runtime regs Pointers
311 * @get_outstanding_cqs: called to get outstanding completion queues
312 * @config_esi: called to config Event Specific Interrupt
313 */
314 struct ufs_hba_variant_ops {
315 const char *name;
316 int (*init)(struct ufs_hba *);
317 void (*exit)(struct ufs_hba *);
318 u32 (*get_ufs_hci_version)(struct ufs_hba *);
319 int (*clk_scale_notify)(struct ufs_hba *, bool,
320 enum ufs_notify_change_status);
321 int (*setup_clocks)(struct ufs_hba *, bool,
322 enum ufs_notify_change_status);
323 int (*hce_enable_notify)(struct ufs_hba *,
324 enum ufs_notify_change_status);
325 int (*link_startup_notify)(struct ufs_hba *,
326 enum ufs_notify_change_status);
327 int (*pwr_change_notify)(struct ufs_hba *,
328 enum ufs_notify_change_status status,
329 struct ufs_pa_layer_attr *,
330 struct ufs_pa_layer_attr *);
331 void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
332 bool is_scsi_cmd);
333 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
334 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
335 enum ufs_notify_change_status);
336 int (*apply_dev_quirks)(struct ufs_hba *hba);
337 void (*fixup_dev_quirks)(struct ufs_hba *hba);
338 int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
339 enum ufs_notify_change_status);
340 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
341 void (*dbg_register_dump)(struct ufs_hba *hba);
342 int (*phy_initialization)(struct ufs_hba *);
343 int (*device_reset)(struct ufs_hba *hba);
344 void (*config_scaling_param)(struct ufs_hba *hba,
345 struct devfreq_dev_profile *profile,
346 struct devfreq_simple_ondemand_data *data);
347 int (*program_key)(struct ufs_hba *hba,
348 const union ufs_crypto_cfg_entry *cfg, int slot);
349 void (*event_notify)(struct ufs_hba *hba,
350 enum ufs_event_type evt, void *data);
351 void (*reinit_notify)(struct ufs_hba *);
352 int (*mcq_config_resource)(struct ufs_hba *hba);
353 int (*get_hba_mac)(struct ufs_hba *hba);
354 int (*op_runtime_config)(struct ufs_hba *hba);
355 int (*get_outstanding_cqs)(struct ufs_hba *hba,
356 unsigned long *ocqs);
357 int (*config_esi)(struct ufs_hba *hba);
358
359 ANDROID_KABI_RESERVE(1);
360 ANDROID_KABI_RESERVE(2);
361 ANDROID_KABI_RESERVE(3);
362 ANDROID_KABI_RESERVE(4);
363 };
364
365 /* clock gating state */
366 enum clk_gating_state {
367 CLKS_OFF,
368 CLKS_ON,
369 REQ_CLKS_OFF,
370 REQ_CLKS_ON,
371 };
372
373 /**
374 * struct ufs_clk_gating - UFS clock gating related info
375 * @gate_work: worker to turn off clocks after some delay as specified in
376 * delay_ms
377 * @ungate_work: worker to turn on clocks that will be used in case of
378 * interrupt context
379 * @state: the current clocks state
380 * @delay_ms: gating delay in ms
381 * @is_suspended: clk gating is suspended when set to 1 which can be used
382 * during suspend/resume
383 * @delay_attr: sysfs attribute to control delay_attr
384 * @enable_attr: sysfs attribute to enable/disable clock gating
385 * @is_enabled: Indicates the current status of clock gating
386 * @is_initialized: Indicates whether clock gating is initialized or not
387 * @active_reqs: number of requests that are pending and should be waited for
388 * completion before gating clocks.
389 * @clk_gating_workq: workqueue for clock gating work.
390 */
391 struct ufs_clk_gating {
392 struct delayed_work gate_work;
393 struct work_struct ungate_work;
394 enum clk_gating_state state;
395 unsigned long delay_ms;
396 bool is_suspended;
397 struct device_attribute delay_attr;
398 struct device_attribute enable_attr;
399 bool is_enabled;
400 bool is_initialized;
401 int active_reqs;
402 struct workqueue_struct *clk_gating_workq;
403
404 ANDROID_KABI_RESERVE(1);
405 };
406
407 struct ufs_saved_pwr_info {
408 struct ufs_pa_layer_attr info;
409 bool is_valid;
410 };
411
412 /**
413 * struct ufs_clk_scaling - UFS clock scaling related data
414 * @active_reqs: number of requests that are pending. If this is zero when
415 * devfreq ->target() function is called then schedule "suspend_work" to
416 * suspend devfreq.
417 * @tot_busy_t: Total busy time in current polling window
418 * @window_start_t: Start time (in jiffies) of the current polling window
419 * @busy_start_t: Start time of current busy period
420 * @enable_attr: sysfs attribute to enable/disable clock scaling
421 * @saved_pwr_info: UFS power mode may also be changed during scaling and this
422 * one keeps track of previous power mode.
423 * @workq: workqueue to schedule devfreq suspend/resume work
424 * @suspend_work: worker to suspend devfreq
425 * @resume_work: worker to resume devfreq
426 * @min_gear: lowest HS gear to scale down to
427 * @is_enabled: tracks if scaling is currently enabled or not, controlled by
428 * clkscale_enable sysfs node
429 * @is_allowed: tracks if scaling is currently allowed or not, used to block
430 * clock scaling which is not invoked from devfreq governor
431 * @is_initialized: Indicates whether clock scaling is initialized or not
432 * @is_busy_started: tracks if busy period has started or not
433 * @is_suspended: tracks if devfreq is suspended or not
434 */
435 struct ufs_clk_scaling {
436 int active_reqs;
437 unsigned long tot_busy_t;
438 ktime_t window_start_t;
439 ktime_t busy_start_t;
440 struct device_attribute enable_attr;
441 struct ufs_saved_pwr_info saved_pwr_info;
442 struct workqueue_struct *workq;
443 struct work_struct suspend_work;
444 struct work_struct resume_work;
445 u32 min_gear;
446 bool is_enabled;
447 bool is_allowed;
448 bool is_initialized;
449 bool is_busy_started;
450 bool is_suspended;
451
452 ANDROID_KABI_RESERVE(1);
453 };
454
455 #define UFS_EVENT_HIST_LENGTH 8
456 /**
457 * struct ufs_event_hist - keeps history of errors
458 * @pos: index to indicate cyclic buffer position
459 * @val: cyclic buffer for registers value
460 * @tstamp: cyclic buffer for time stamp
461 * @cnt: error counter
462 */
463 struct ufs_event_hist {
464 int pos;
465 u32 val[UFS_EVENT_HIST_LENGTH];
466 u64 tstamp[UFS_EVENT_HIST_LENGTH];
467 unsigned long long cnt;
468 };
469
470 /**
471 * struct ufs_stats - keeps usage/err statistics
472 * @last_intr_status: record the last interrupt status.
473 * @last_intr_ts: record the last interrupt timestamp.
474 * @hibern8_exit_cnt: Counter to keep track of number of exits,
475 * reset this after link-startup.
476 * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
477 * Clear after the first successful command completion.
478 * @event: array with event history.
479 */
480 struct ufs_stats {
481 u32 last_intr_status;
482 u64 last_intr_ts;
483
484 u32 hibern8_exit_cnt;
485 u64 last_hibern8_exit_tstamp;
486 struct ufs_event_hist event[UFS_EVT_CNT];
487 };
488
489 /**
490 * enum ufshcd_state - UFS host controller state
491 * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
492 * processing.
493 * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
494 * SCSI commands.
495 * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
496 * SCSI commands may be submitted to the controller.
497 * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
498 * newly submitted SCSI commands with error code DID_BAD_TARGET.
499 * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
500 * failed. Fail all SCSI commands with error code DID_ERROR.
501 */
502 enum ufshcd_state {
503 UFSHCD_STATE_RESET,
504 UFSHCD_STATE_OPERATIONAL,
505 UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
506 UFSHCD_STATE_EH_SCHEDULED_FATAL,
507 UFSHCD_STATE_ERROR,
508 };
509
510 enum ufshcd_quirks {
511 /* Interrupt aggregation support is broken */
512 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
513
514 /*
515 * delay before each dme command is required as the unipro
516 * layer has shown instabilities
517 */
518 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
519
520 /*
521 * If UFS host controller is having issue in processing LCC (Line
522 * Control Command) coming from device then enable this quirk.
523 * When this quirk is enabled, host controller driver should disable
524 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
525 * attribute of device to 0).
526 */
527 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
528
529 /*
530 * The attribute PA_RXHSUNTERMCAP specifies whether or not the
531 * inbound Link supports unterminated line in HS mode. Setting this
532 * attribute to 1 fixes moving to HS gear.
533 */
534 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
535
536 /*
537 * This quirk needs to be enabled if the host controller only allows
538 * accessing the peer dme attributes in AUTO mode (FAST AUTO or
539 * SLOW AUTO).
540 */
541 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
542
543 /*
544 * This quirk needs to be enabled if the host controller doesn't
545 * advertise the correct version in UFS_VER register. If this quirk
546 * is enabled, standard UFS host driver will call the vendor specific
547 * ops (get_ufs_hci_version) to get the correct version.
548 */
549 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
550
551 /*
552 * Clear handling for transfer/task request list is just opposite.
553 */
554 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
555
556 /*
557 * This quirk needs to be enabled if host controller doesn't allow
558 * that the interrupt aggregation timer and counter are reset by s/w.
559 */
560 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
561
562 /*
563 * This quirks needs to be enabled if host controller cannot be
564 * enabled via HCE register.
565 */
566 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
567
568 /*
569 * This quirk needs to be enabled if the host controller regards
570 * resolution of the values of PRDTO and PRDTL in UTRD as byte.
571 */
572 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
573
574 /*
575 * This quirk needs to be enabled if the host controller reports
576 * OCS FATAL ERROR with device error through sense data
577 */
578 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
579
580 /*
581 * This quirk needs to be enabled if the host controller has
582 * auto-hibernate capability but it doesn't work.
583 */
584 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
585
586 /*
587 * This quirk needs to disable manual flush for write booster
588 */
589 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
590
591 /*
592 * This quirk needs to disable unipro timeout values
593 * before power mode change
594 */
595 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
596
597 /*
598 * Align DMA SG entries on a 4 KiB boundary.
599 */
600 UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
601
602 /*
603 * This quirk needs to be enabled if the host controller does not
604 * support UIC command
605 */
606 UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
607
608 /*
609 * This quirk needs to be enabled if the host controller cannot
610 * support physical host configuration.
611 */
612 UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
613
614 /*
615 * This quirk needs to be enabled if the host controller has
616 * 64-bit addressing supported capability but it doesn't work.
617 */
618 UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
619
620 /*
621 * This quirk needs to be enabled if the host controller has
622 * auto-hibernate capability but it's FASTAUTO only.
623 */
624 UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
625
626 /*
627 * This quirk needs to be enabled if the host controller needs
628 * to reinit the device after switching to maximum gear.
629 */
630 UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
631 };
632
633 enum ufshcd_android_quirks {
634
635 /*
636 * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
637 *
638 * This quirk needs to be enabled if the host controller supports inline
639 * encryption, but it needs to initialize the crypto capabilities in a
640 * nonstandard way and/or it needs to override blk_crypto_ll_ops. If
641 * enabled, the standard code won't initialize the blk_crypto_profile;
642 * ufs_hba_variant_ops::init() must do it instead.
643 */
644 UFSHCD_ANDROID_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 0,
645
646 /*
647 * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
648 *
649 * This quirk needs to be enabled if the host controller supports inline
650 * encryption, but the CRYPTO_GENERAL_ENABLE bit is not implemented and
651 * breaks the HCE sequence if used.
652 */
653 UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 1,
654
655 /*
656 * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
657 *
658 * This quirk needs to be enabled if the host controller requires that
659 * the PRDT be cleared after each encrypted request because encryption
660 * keys were stored in it.
661 */
662 UFSHCD_ANDROID_QUIRK_KEYS_IN_PRDT = 1 << 2,
663 };
664
665 enum ufshcd_caps {
666 /* Allow dynamic clk gating */
667 UFSHCD_CAP_CLK_GATING = 1 << 0,
668
669 /* Allow hiberb8 with clk gating */
670 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
671
672 /* Allow dynamic clk scaling */
673 UFSHCD_CAP_CLK_SCALING = 1 << 2,
674
675 /* Allow auto bkops to enabled during runtime suspend */
676 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
677
678 /*
679 * This capability allows host controller driver to use the UFS HCI's
680 * interrupt aggregation capability.
681 * CAUTION: Enabling this might reduce overall UFS throughput.
682 */
683 UFSHCD_CAP_INTR_AGGR = 1 << 4,
684
685 /*
686 * This capability allows the device auto-bkops to be always enabled
687 * except during suspend (both runtime and suspend).
688 * Enabling this capability means that device will always be allowed
689 * to do background operation when it's active but it might degrade
690 * the performance of ongoing read/write operations.
691 */
692 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
693
694 /*
695 * This capability allows host controller driver to automatically
696 * enable runtime power management by itself instead of waiting
697 * for userspace to control the power management.
698 */
699 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
700
701 /*
702 * This capability allows the host controller driver to turn-on
703 * WriteBooster, if the underlying device supports it and is
704 * provisioned to be used. This would increase the write performance.
705 */
706 UFSHCD_CAP_WB_EN = 1 << 7,
707
708 /*
709 * This capability allows the host controller driver to use the
710 * inline crypto engine, if it is present
711 */
712 UFSHCD_CAP_CRYPTO = 1 << 8,
713
714 /*
715 * This capability allows the controller regulators to be put into
716 * lpm mode aggressively during clock gating.
717 * This would increase power savings.
718 */
719 UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
720
721 /*
722 * This capability allows the host controller driver to use DeepSleep,
723 * if it is supported by the UFS device. The host controller driver must
724 * support device hardware reset via the hba->device_reset() callback,
725 * in order to exit DeepSleep state.
726 */
727 UFSHCD_CAP_DEEPSLEEP = 1 << 10,
728
729 /*
730 * This capability allows the host controller driver to use temperature
731 * notification if it is supported by the UFS device.
732 */
733 UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
734
735 /*
736 * Enable WriteBooster when scaling up the clock and disable
737 * WriteBooster when scaling the clock down.
738 */
739 UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
740 };
741
742 struct ufs_hba_variant_params {
743 struct devfreq_dev_profile devfreq_profile;
744 struct devfreq_simple_ondemand_data ondemand_data;
745 u16 hba_enable_delay_us;
746 u32 wb_flush_threshold;
747 };
748
749 #ifdef CONFIG_SCSI_UFS_HPB
750 /**
751 * struct ufshpb_dev_info - UFSHPB device related info
752 * @num_lu: the number of user logical unit to check whether all lu finished
753 * initialization
754 * @rgn_size: device reported HPB region size
755 * @srgn_size: device reported HPB sub-region size
756 * @slave_conf_cnt: counter to check all lu finished initialization
757 * @hpb_disabled: flag to check if HPB is disabled
758 * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
759 * @is_legacy: flag to check HPB 1.0
760 * @control_mode: either host or device
761 */
762 struct ufshpb_dev_info {
763 int num_lu;
764 int rgn_size;
765 int srgn_size;
766 atomic_t slave_conf_cnt;
767 bool hpb_disabled;
768 u8 max_hpb_single_cmd;
769 bool is_legacy;
770 u8 control_mode;
771 };
772 #endif
773
774 struct ufs_hba_monitor {
775 unsigned long chunk_size;
776
777 unsigned long nr_sec_rw[2];
778 ktime_t total_busy[2];
779
780 unsigned long nr_req[2];
781 /* latencies*/
782 ktime_t lat_sum[2];
783 ktime_t lat_max[2];
784 ktime_t lat_min[2];
785
786 u32 nr_queued[2];
787 ktime_t busy_start_ts[2];
788
789 ktime_t enabled_ts;
790 bool enabled;
791 };
792
793 /**
794 * struct ufshcd_res_info_t - MCQ related resource regions
795 *
796 * @name: resource name
797 * @resource: pointer to resource region
798 * @base: register base address
799 */
800 struct ufshcd_res_info {
801 const char *name;
802 struct resource *resource;
803 void __iomem *base;
804 };
805
806 enum ufshcd_res {
807 RES_UFS,
808 RES_MCQ,
809 RES_MCQ_SQD,
810 RES_MCQ_SQIS,
811 RES_MCQ_CQD,
812 RES_MCQ_CQIS,
813 RES_MCQ_VS,
814 RES_MAX,
815 };
816
817 /**
818 * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
819 *
820 * @offset: Doorbell Address Offset
821 * @stride: Steps proportional to queue [0...31]
822 * @base: base address
823 */
824 struct ufshcd_mcq_opr_info_t {
825 unsigned long offset;
826 unsigned long stride;
827 void __iomem *base;
828 };
829
830 enum ufshcd_mcq_opr {
831 OPR_SQD,
832 OPR_SQIS,
833 OPR_CQD,
834 OPR_CQIS,
835 OPR_MAX,
836 };
837
838 /**
839 * struct ufs_hba - per adapter private structure
840 * @mmio_base: UFSHCI base register address
841 * @ucdl_base_addr: UFS Command Descriptor base address
842 * @utrdl_base_addr: UTP Transfer Request Descriptor base address
843 * @utmrdl_base_addr: UTP Task Management Descriptor base address
844 * @ucdl_dma_addr: UFS Command Descriptor DMA address
845 * @utrdl_dma_addr: UTRDL DMA address
846 * @utmrdl_dma_addr: UTMRDL DMA address
847 * @host: Scsi_Host instance of the driver
848 * @dev: device handle
849 * @ufs_device_wlun: WLUN that controls the entire UFS device.
850 * @hwmon_device: device instance registered with the hwmon core.
851 * @curr_dev_pwr_mode: active UFS device power mode.
852 * @uic_link_state: active state of the link to the UFS device.
853 * @rpm_lvl: desired UFS power management level during runtime PM.
854 * @spm_lvl: desired UFS power management level during system PM.
855 * @pm_op_in_progress: whether or not a PM operation is in progress.
856 * @ahit: value of Auto-Hibernate Idle Timer register.
857 * @lrb: local reference block
858 * @outstanding_tasks: Bits representing outstanding task requests
859 * @outstanding_lock: Protects @outstanding_reqs.
860 * @outstanding_reqs: Bits representing outstanding transfer requests
861 * @capabilities: UFS Controller Capabilities
862 * @mcq_capabilities: UFS Multi Circular Queue capabilities
863 * @nutrs: Transfer Request Queue depth supported by controller
864 * @nutmrs: Task Management Queue depth supported by controller
865 * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
866 * @ufs_version: UFS Version to which controller complies
867 * @vops: pointer to variant specific operations
868 * @vps: pointer to variant specific parameters
869 * @priv: pointer to variant specific private data
870 * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
871 * @irq: Irq number of the controller
872 * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
873 * @dev_ref_clk_freq: reference clock frequency
874 * @quirks: bitmask with information about deviations from the UFSHCI standard.
875 * @dev_quirks: bitmask with information about deviations from the UFS standard.
876 * @tmf_tag_set: TMF tag set.
877 * @tmf_queue: Used to allocate TMF tags.
878 * @tmf_rqs: array with pointers to TMF requests while these are in progress.
879 * @active_uic_cmd: handle of active UIC command
880 * @uic_cmd_mutex: mutex for UIC command
881 * @uic_async_done: completion used during UIC processing
882 * @ufshcd_state: UFSHCD state
883 * @eh_flags: Error handling flags
884 * @intr_mask: Interrupt Mask Bits
885 * @ee_ctrl_mask: Exception event control mask
886 * @ee_drv_mask: Exception event mask for driver
887 * @ee_usr_mask: Exception event mask for user (set via debugfs)
888 * @ee_ctrl_mutex: Used to serialize exception event information.
889 * @is_powered: flag to check if HBA is powered
890 * @shutting_down: flag to check if shutdown has been invoked
891 * @host_sem: semaphore used to serialize concurrent contexts
892 * @eh_wq: Workqueue that eh_work works on
893 * @eh_work: Worker to handle UFS errors that require s/w attention
894 * @eeh_work: Worker to handle exception events
895 * @errors: HBA errors
896 * @uic_error: UFS interconnect layer error status
897 * @saved_err: sticky error mask
898 * @saved_uic_err: sticky UIC error mask
899 * @ufs_stats: various error counters
900 * @force_reset: flag to force eh_work perform a full reset
901 * @force_pmc: flag to force a power mode change
902 * @silence_err_logs: flag to silence error logs
903 * @dev_cmd: ufs device management command information
904 * @last_dme_cmd_tstamp: time stamp of the last completed DME command
905 * @nop_out_timeout: NOP OUT timeout value
906 * @dev_info: information about the UFS device
907 * @auto_bkops_enabled: to track whether bkops is enabled in device
908 * @vreg_info: UFS device voltage regulator information
909 * @clk_list_head: UFS host controller clocks list node head
910 * @req_abort_count: number of times ufshcd_abort() has been called
911 * @lanes_per_direction: number of lanes per data direction between the UFS
912 * controller and the UFS device.
913 * @pwr_info: holds current power mode
914 * @max_pwr_info: keeps the device max valid pwm
915 * @clk_gating: information related to clock gating
916 * @caps: bitmask with information about UFS controller capabilities
917 * @devfreq: frequency scaling information owned by the devfreq core
918 * @clk_scaling: frequency scaling information owned by the UFS driver
919 * @system_suspending: system suspend has been started and system resume has
920 * not yet finished.
921 * @is_sys_suspended: UFS device has been suspended because of system suspend
922 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
923 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
924 * device is known or not.
925 * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
926 * @clk_scaling_lock: used to serialize device commands and clock scaling
927 * @scsi_block_reqs_cnt: reference counting for scsi block requests
928 * @bsg_dev: struct device associated with the BSG queue
929 * @bsg_queue: BSG queue associated with the UFS controller
930 * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
931 * management) after the UFS device has finished a WriteBooster buffer
932 * flush or auto BKOP.
933 * @ufshpb_dev: information related to HPB (Host Performance Booster).
934 * @monitor: statistics about UFS commands
935 * @crypto_capabilities: Content of crypto capabilities register (0x100)
936 * @crypto_cap_array: Array of crypto capabilities
937 * @crypto_cfg_register: Start of the crypto cfg array
938 * @crypto_profile: the crypto profile of this hba (if applicable)
939 * @debugfs_root: UFS controller debugfs root directory
940 * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
941 * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
942 * ee_ctrl_mask
943 * @luns_avail: number of regular and well known LUNs supported by the UFS
944 * device
945 * @nr_hw_queues: number of hardware queues configured
946 * @nr_queues: number of Queues of different queue types
947 * @complete_put: whether or not to call ufshcd_rpm_put() from inside
948 * ufshcd_resume_complete()
949 * @ext_iid_sup: is EXT_IID is supported by UFSHC
950 * @mcq_sup: is mcq supported by UFSHC
951 * @mcq_enabled: is mcq ready to accept requests
952 * @res: array of resource info of MCQ registers
953 * @mcq_base: Multi circular queue registers base address
954 * @uhq: array of supported hardware queues
955 * @dev_cmd_queue: Queue for issuing device management commands
956 */
957 struct ufs_hba {
958 void __iomem *mmio_base;
959
960 /* Virtual memory reference */
961 struct utp_transfer_cmd_desc *ucdl_base_addr;
962 struct utp_transfer_req_desc *utrdl_base_addr;
963 struct utp_task_req_desc *utmrdl_base_addr;
964
965 /* DMA memory reference */
966 dma_addr_t ucdl_dma_addr;
967 dma_addr_t utrdl_dma_addr;
968 dma_addr_t utmrdl_dma_addr;
969
970 struct Scsi_Host *host;
971 struct device *dev;
972 struct scsi_device *ufs_device_wlun;
973
974 #ifdef CONFIG_SCSI_UFS_HWMON
975 struct device *hwmon_device;
976 #endif
977
978 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
979 enum uic_link_state uic_link_state;
980 /* Desired UFS power management level during runtime PM */
981 enum ufs_pm_level rpm_lvl;
982 /* Desired UFS power management level during system PM */
983 enum ufs_pm_level spm_lvl;
984 int pm_op_in_progress;
985
986 /* Auto-Hibernate Idle Timer register value */
987 u32 ahit;
988
989 struct ufshcd_lrb *lrb;
990
991 unsigned long outstanding_tasks;
992 spinlock_t outstanding_lock;
993 unsigned long outstanding_reqs;
994
995 u32 capabilities;
996 int nutrs;
997 u32 mcq_capabilities;
998 int nutmrs;
999 u32 reserved_slot;
1000 u32 ufs_version;
1001 const struct ufs_hba_variant_ops *vops;
1002 struct ufs_hba_variant_params *vps;
1003 void *priv;
1004 #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
1005 size_t sg_entry_size;
1006 #endif
1007 unsigned int irq;
1008 bool is_irq_enabled;
1009 enum ufs_ref_clk_freq dev_ref_clk_freq;
1010
1011 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
1012
1013 unsigned int android_quirks; /* for UFSHCD_ANDROID_QUIRK_* flags */
1014
1015 /* Device deviations from standard UFS device spec. */
1016 unsigned int dev_quirks;
1017
1018 struct blk_mq_tag_set tmf_tag_set;
1019 struct request_queue *tmf_queue;
1020 struct request **tmf_rqs;
1021
1022 struct uic_command *active_uic_cmd;
1023 struct mutex uic_cmd_mutex;
1024 struct completion *uic_async_done;
1025
1026 enum ufshcd_state ufshcd_state;
1027 u32 eh_flags;
1028 u32 intr_mask;
1029 u16 ee_ctrl_mask;
1030 u16 ee_drv_mask;
1031 u16 ee_usr_mask;
1032 struct mutex ee_ctrl_mutex;
1033 bool is_powered;
1034 bool shutting_down;
1035 struct semaphore host_sem;
1036
1037 /* Work Queues */
1038 struct workqueue_struct *eh_wq;
1039 struct work_struct eh_work;
1040 struct work_struct eeh_work;
1041
1042 /* HBA Errors */
1043 u32 errors;
1044 u32 uic_error;
1045 u32 saved_err;
1046 u32 saved_uic_err;
1047 struct ufs_stats ufs_stats;
1048 bool force_reset;
1049 bool force_pmc;
1050 bool silence_err_logs;
1051
1052 /* Device management request data */
1053 struct ufs_dev_cmd dev_cmd;
1054 ktime_t last_dme_cmd_tstamp;
1055 int nop_out_timeout;
1056
1057 /* Keeps information of the UFS device connected to this host */
1058 struct ufs_dev_info dev_info;
1059 bool auto_bkops_enabled;
1060 struct ufs_vreg_info vreg_info;
1061 struct list_head clk_list_head;
1062
1063 /* Number of requests aborts */
1064 int req_abort_count;
1065
1066 /* Number of lanes available (1 or 2) for Rx/Tx */
1067 u32 lanes_per_direction;
1068 struct ufs_pa_layer_attr pwr_info;
1069 struct ufs_pwr_mode_info max_pwr_info;
1070
1071 struct ufs_clk_gating clk_gating;
1072 /* Control to enable/disable host capabilities */
1073 u32 caps;
1074
1075 struct devfreq *devfreq;
1076 struct ufs_clk_scaling clk_scaling;
1077 bool system_suspending;
1078 bool is_sys_suspended;
1079
1080 enum bkops_status urgent_bkops_lvl;
1081 bool is_urgent_bkops_lvl_checked;
1082
1083 struct mutex wb_mutex;
1084 struct rw_semaphore clk_scaling_lock;
1085 atomic_t scsi_block_reqs_cnt;
1086
1087 struct device bsg_dev;
1088 struct request_queue *bsg_queue;
1089 struct delayed_work rpm_dev_flush_recheck_work;
1090
1091 #ifdef CONFIG_SCSI_UFS_HPB
1092 struct ufshpb_dev_info ufshpb_dev;
1093 #endif
1094
1095 struct ufs_hba_monitor monitor;
1096
1097 #ifdef CONFIG_SCSI_UFS_CRYPTO
1098 union ufs_crypto_capabilities crypto_capabilities;
1099 union ufs_crypto_cap_entry *crypto_cap_array;
1100 u32 crypto_cfg_register;
1101 struct blk_crypto_profile crypto_profile;
1102 #endif
1103 #ifdef CONFIG_DEBUG_FS
1104 struct dentry *debugfs_root;
1105 struct delayed_work debugfs_ee_work;
1106 u32 debugfs_ee_rate_limit_ms;
1107 #endif
1108 u32 luns_avail;
1109 unsigned int nr_hw_queues;
1110 unsigned int nr_queues[HCTX_MAX_TYPES];
1111 bool complete_put;
1112 bool ext_iid_sup;
1113 bool scsi_host_added;
1114 bool mcq_sup;
1115 bool mcq_enabled;
1116 struct ufshcd_res_info res[RES_MAX];
1117 void __iomem *mcq_base;
1118 struct ufs_hw_queue *uhq;
1119 struct ufs_hw_queue *dev_cmd_queue;
1120 struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
1121
1122 ANDROID_VENDOR_DATA(1);
1123 ANDROID_OEM_DATA_ARRAY(1, 2);
1124
1125 ANDROID_KABI_RESERVE(1);
1126 ANDROID_KABI_RESERVE(2);
1127 ANDROID_KABI_RESERVE(3);
1128 ANDROID_KABI_RESERVE(4);
1129 };
1130
1131 /**
1132 * struct ufs_hw_queue - per hardware queue structure
1133 * @mcq_sq_head: base address of submission queue head pointer
1134 * @mcq_sq_tail: base address of submission queue tail pointer
1135 * @mcq_cq_head: base address of completion queue head pointer
1136 * @mcq_cq_tail: base address of completion queue tail pointer
1137 * @sqe_base_addr: submission queue entry base address
1138 * @sqe_dma_addr: submission queue dma address
1139 * @cqe_base_addr: completion queue base address
1140 * @cqe_dma_addr: completion queue dma address
1141 * @max_entries: max number of slots in this hardware queue
1142 * @id: hardware queue ID
1143 * @sq_tp_slot: current slot to which SQ tail pointer is pointing
1144 * @sq_lock: serialize submission queue access
1145 * @cq_tail_slot: current slot to which CQ tail pointer is pointing
1146 * @cq_head_slot: current slot to which CQ head pointer is pointing
1147 * @cq_lock: Synchronize between multiple polling instances
1148 */
1149 struct ufs_hw_queue {
1150 void __iomem *mcq_sq_head;
1151 void __iomem *mcq_sq_tail;
1152 void __iomem *mcq_cq_head;
1153 void __iomem *mcq_cq_tail;
1154
1155 void *sqe_base_addr;
1156 dma_addr_t sqe_dma_addr;
1157 struct cq_entry *cqe_base_addr;
1158 dma_addr_t cqe_dma_addr;
1159 u32 max_entries;
1160 u32 id;
1161 u32 sq_tail_slot;
1162 spinlock_t sq_lock;
1163 u32 cq_tail_slot;
1164 u32 cq_head_slot;
1165 spinlock_t cq_lock;
1166 };
1167
is_mcq_enabled(struct ufs_hba * hba)1168 static inline bool is_mcq_enabled(struct ufs_hba *hba)
1169 {
1170 return hba->mcq_enabled;
1171 }
1172
1173 #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
ufshcd_sg_entry_size(const struct ufs_hba * hba)1174 static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
1175 {
1176 return hba->sg_entry_size;
1177 }
1178
ufshcd_set_sg_entry_size(struct ufs_hba * hba,size_t sg_entry_size)1179 static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
1180 {
1181 WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
1182 hba->sg_entry_size = sg_entry_size;
1183 }
1184 #else
ufshcd_sg_entry_size(const struct ufs_hba * hba)1185 static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
1186 {
1187 return sizeof(struct ufshcd_sg_entry);
1188 }
1189
1190 #define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
1191 ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
1192 #endif
1193
sizeof_utp_transfer_cmd_desc(const struct ufs_hba * hba)1194 static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
1195 {
1196 return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
1197 }
1198
1199 /* Returns true if clocks can be gated. Otherwise false */
ufshcd_is_clkgating_allowed(struct ufs_hba * hba)1200 static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
1201 {
1202 return hba->caps & UFSHCD_CAP_CLK_GATING;
1203 }
ufshcd_can_hibern8_during_gating(struct ufs_hba * hba)1204 static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
1205 {
1206 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1207 }
ufshcd_is_clkscaling_supported(struct ufs_hba * hba)1208 static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
1209 {
1210 return hba->caps & UFSHCD_CAP_CLK_SCALING;
1211 }
ufshcd_can_autobkops_during_suspend(struct ufs_hba * hba)1212 static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
1213 {
1214 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1215 }
ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba * hba)1216 static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
1217 {
1218 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
1219 }
1220
ufshcd_is_intr_aggr_allowed(struct ufs_hba * hba)1221 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
1222 {
1223 return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
1224 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
1225 }
1226
ufshcd_can_aggressive_pc(struct ufs_hba * hba)1227 static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
1228 {
1229 return !!(ufshcd_is_link_hibern8(hba) &&
1230 (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
1231 }
1232
ufshcd_is_auto_hibern8_supported(struct ufs_hba * hba)1233 static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
1234 {
1235 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
1236 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
1237 }
1238
ufshcd_is_auto_hibern8_enabled(struct ufs_hba * hba)1239 static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
1240 {
1241 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
1242 }
1243
ufshcd_is_wb_allowed(struct ufs_hba * hba)1244 static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
1245 {
1246 return hba->caps & UFSHCD_CAP_WB_EN;
1247 }
1248
ufshcd_enable_wb_if_scaling_up(struct ufs_hba * hba)1249 static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
1250 {
1251 return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
1252 }
1253
1254 #define ufsmcq_writel(hba, val, reg) \
1255 writel((val), (hba)->mcq_base + (reg))
1256 #define ufsmcq_readl(hba, reg) \
1257 readl((hba)->mcq_base + (reg))
1258
1259 #define ufsmcq_writelx(hba, val, reg) \
1260 writel_relaxed((val), (hba)->mcq_base + (reg))
1261 #define ufsmcq_readlx(hba, reg) \
1262 readl_relaxed((hba)->mcq_base + (reg))
1263
1264 #define ufshcd_writel(hba, val, reg) \
1265 writel((val), (hba)->mmio_base + (reg))
1266 #define ufshcd_readl(hba, reg) \
1267 readl((hba)->mmio_base + (reg))
1268
1269 /**
1270 * ufshcd_rmwl - perform read/modify/write for a controller register
1271 * @hba: per adapter instance
1272 * @mask: mask to apply on read value
1273 * @val: actual value to write
1274 * @reg: register address
1275 */
ufshcd_rmwl(struct ufs_hba * hba,u32 mask,u32 val,u32 reg)1276 static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
1277 {
1278 u32 tmp;
1279
1280 tmp = ufshcd_readl(hba, reg);
1281 tmp &= ~mask;
1282 tmp |= (val & mask);
1283 ufshcd_writel(hba, tmp, reg);
1284 }
1285
1286 int ufshcd_alloc_host(struct device *, struct ufs_hba **);
1287 void ufshcd_dealloc_host(struct ufs_hba *);
1288 int ufshcd_hba_enable(struct ufs_hba *hba);
1289 int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
1290 int ufshcd_link_recovery(struct ufs_hba *hba);
1291 int ufshcd_make_hba_operational(struct ufs_hba *hba);
1292 void ufshcd_remove(struct ufs_hba *);
1293 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
1294 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
1295 void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
1296 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
1297 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
1298 void ufshcd_hba_stop(struct ufs_hba *hba);
1299 void ufshcd_schedule_eh_work(struct ufs_hba *hba);
1300 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
1301 void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
1302 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
1303
1304 /**
1305 * ufshcd_set_variant - set variant specific data to the hba
1306 * @hba: per adapter instance
1307 * @variant: pointer to variant specific data
1308 */
ufshcd_set_variant(struct ufs_hba * hba,void * variant)1309 static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
1310 {
1311 BUG_ON(!hba);
1312 hba->priv = variant;
1313 }
1314
1315 /**
1316 * ufshcd_get_variant - get variant specific data from the hba
1317 * @hba: per adapter instance
1318 */
ufshcd_get_variant(struct ufs_hba * hba)1319 static inline void *ufshcd_get_variant(struct ufs_hba *hba)
1320 {
1321 BUG_ON(!hba);
1322 return hba->priv;
1323 }
1324
1325 #ifdef CONFIG_PM
1326 extern int ufshcd_runtime_suspend(struct device *dev);
1327 extern int ufshcd_runtime_resume(struct device *dev);
1328 #endif
1329 #ifdef CONFIG_PM_SLEEP
1330 extern int ufshcd_system_suspend(struct device *dev);
1331 extern int ufshcd_system_resume(struct device *dev);
1332 extern int ufshcd_system_freeze(struct device *dev);
1333 extern int ufshcd_system_thaw(struct device *dev);
1334 extern int ufshcd_system_restore(struct device *dev);
1335 #endif
1336 extern int ufshcd_shutdown(struct ufs_hba *hba);
1337
1338 extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
1339 int agreed_gear,
1340 int adapt_val);
1341 extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1342 u8 attr_set, u32 mib_val, u8 peer);
1343 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1344 u32 *mib_val, u8 peer);
1345 extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
1346 struct ufs_pa_layer_attr *desired_pwr_mode);
1347 extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
1348
1349 /* UIC command interfaces for DME primitives */
1350 #define DME_LOCAL 0
1351 #define DME_PEER 1
1352 #define ATTR_SET_NOR 0 /* NORMAL */
1353 #define ATTR_SET_ST 1 /* STATIC */
1354
ufshcd_dme_set(struct ufs_hba * hba,u32 attr_sel,u32 mib_val)1355 static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
1356 u32 mib_val)
1357 {
1358 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1359 mib_val, DME_LOCAL);
1360 }
1361
ufshcd_dme_st_set(struct ufs_hba * hba,u32 attr_sel,u32 mib_val)1362 static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
1363 u32 mib_val)
1364 {
1365 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1366 mib_val, DME_LOCAL);
1367 }
1368
ufshcd_dme_peer_set(struct ufs_hba * hba,u32 attr_sel,u32 mib_val)1369 static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
1370 u32 mib_val)
1371 {
1372 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1373 mib_val, DME_PEER);
1374 }
1375
ufshcd_dme_peer_st_set(struct ufs_hba * hba,u32 attr_sel,u32 mib_val)1376 static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
1377 u32 mib_val)
1378 {
1379 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1380 mib_val, DME_PEER);
1381 }
1382
ufshcd_dme_get(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val)1383 static inline int ufshcd_dme_get(struct ufs_hba *hba,
1384 u32 attr_sel, u32 *mib_val)
1385 {
1386 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
1387 }
1388
ufshcd_dme_peer_get(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val)1389 static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
1390 u32 attr_sel, u32 *mib_val)
1391 {
1392 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
1393 }
1394
ufshcd_is_hs_mode(struct ufs_pa_layer_attr * pwr_info)1395 static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
1396 {
1397 return (pwr_info->pwr_rx == FAST_MODE ||
1398 pwr_info->pwr_rx == FASTAUTO_MODE) &&
1399 (pwr_info->pwr_tx == FAST_MODE ||
1400 pwr_info->pwr_tx == FASTAUTO_MODE);
1401 }
1402
ufshcd_disable_host_tx_lcc(struct ufs_hba * hba)1403 static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
1404 {
1405 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
1406 }
1407
1408 /* Expose Query-Request API */
1409 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1410 enum query_opcode opcode,
1411 enum desc_idn idn, u8 index,
1412 u8 selector,
1413 u8 *desc_buf, int *buf_len);
1414 int ufshcd_read_desc_param(struct ufs_hba *hba,
1415 enum desc_idn desc_id,
1416 int desc_index,
1417 u8 param_offset,
1418 u8 *param_read_buf,
1419 u8 param_size);
1420 int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
1421 enum attr_idn idn, u8 index, u8 selector,
1422 u32 *attr_val);
1423 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1424 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
1425 int ufshcd_query_attr_retry(struct ufs_hba *hba,
1426 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1427 u32 *attr_val);
1428 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1429 enum flag_idn idn, u8 index, bool *flag_res);
1430 int ufshcd_query_flag_retry(struct ufs_hba *hba,
1431 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
1432 int ufshcd_bkops_ctrl(struct ufs_hba *hba, enum bkops_status status);
1433
1434 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
1435 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
1436 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
1437 const struct ufs_dev_quirk *fixups);
1438 #define SD_ASCII_STD true
1439 #define SD_RAW false
1440 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
1441 u8 **buf, bool ascii);
1442
1443 void ufshcd_hold(struct ufs_hba *hba, bool async);
1444 void ufshcd_release(struct ufs_hba *hba);
1445
1446 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
1447
1448 int ufshcd_freeze_scsi_devs(struct ufs_hba *hba, u64 timeout_us);
1449 void ufshcd_unfreeze_scsi_devs(struct ufs_hba *hba);
1450
1451 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
1452
1453 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
1454
1455 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
1456
1457 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
1458 struct utp_upiu_req *req_upiu,
1459 struct utp_upiu_req *rsp_upiu,
1460 int msgcode,
1461 u8 *desc_buff, int *buff_len,
1462 enum query_opcode desc_op);
1463 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
1464 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
1465 struct ufs_ehs *ehs_rsp, int sg_cnt,
1466 struct scatterlist *sg_list, enum dma_data_direction dir);
1467 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
1468 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
1469 int ufshcd_suspend_prepare(struct device *dev);
1470 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
1471 void ufshcd_resume_complete(struct device *dev);
1472
1473 /* Wrapper functions for safely calling variant operations */
ufshcd_vops_init(struct ufs_hba * hba)1474 static inline int ufshcd_vops_init(struct ufs_hba *hba)
1475 {
1476 if (hba->vops && hba->vops->init)
1477 return hba->vops->init(hba);
1478
1479 return 0;
1480 }
1481
ufshcd_vops_phy_initialization(struct ufs_hba * hba)1482 static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
1483 {
1484 if (hba->vops && hba->vops->phy_initialization)
1485 return hba->vops->phy_initialization(hba);
1486
1487 return 0;
1488 }
1489
1490 extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1491
1492 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1493 const char *prefix);
1494
1495 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
1496 int ufshcd_write_ee_control(struct ufs_hba *hba);
1497 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
1498 const u16 *other_mask, u16 set, u16 clr);
1499
1500 #endif /* End of Header */
1501