1 /*
2 * Universal Flash Storage Host controller driver Core
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
38 */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/blkdev.h>
43
44 #include "ufshcd.h"
45 #include "unipro.h"
46
47 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
48 UTP_TASK_REQ_COMPL |\
49 UFSHCD_ERROR_MASK)
50 /* UIC command timeout, unit: ms */
51 #define UIC_CMD_TIMEOUT 500
52
53 /* NOP OUT retries waiting for NOP IN response */
54 #define NOP_OUT_RETRIES 10
55 /* Timeout after 30 msecs if NOP OUT hangs without response */
56 #define NOP_OUT_TIMEOUT 30 /* msecs */
57
58 /* Query request retries */
59 #define QUERY_REQ_RETRIES 10
60 /* Query request timeout */
61 #define QUERY_REQ_TIMEOUT 30 /* msec */
62
63 /* Task management command timeout */
64 #define TM_CMD_TIMEOUT 100 /* msecs */
65
66 /* maximum number of link-startup retries */
67 #define DME_LINKSTARTUP_RETRIES 3
68
69 /* maximum number of reset retries before giving up */
70 #define MAX_HOST_RESET_RETRIES 5
71
72 /* Expose the flag value from utp_upiu_query.value */
73 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
74
75 /* Interrupt aggregation default timeout, unit: 40us */
76 #define INT_AGGR_DEF_TO 0x02
77
78 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
79 ({ \
80 int _ret; \
81 if (_on) \
82 _ret = ufshcd_enable_vreg(_dev, _vreg); \
83 else \
84 _ret = ufshcd_disable_vreg(_dev, _vreg); \
85 _ret; \
86 })
87
88 static u32 ufs_query_desc_max_size[] = {
89 QUERY_DESC_DEVICE_MAX_SIZE,
90 QUERY_DESC_CONFIGURAION_MAX_SIZE,
91 QUERY_DESC_UNIT_MAX_SIZE,
92 QUERY_DESC_RFU_MAX_SIZE,
93 QUERY_DESC_INTERCONNECT_MAX_SIZE,
94 QUERY_DESC_STRING_MAX_SIZE,
95 QUERY_DESC_RFU_MAX_SIZE,
96 QUERY_DESC_GEOMETRY_MAZ_SIZE,
97 QUERY_DESC_POWER_MAX_SIZE,
98 QUERY_DESC_RFU_MAX_SIZE,
99 };
100
101 enum {
102 UFSHCD_MAX_CHANNEL = 0,
103 UFSHCD_MAX_ID = 1,
104 UFSHCD_CMD_PER_LUN = 32,
105 UFSHCD_CAN_QUEUE = 32,
106 };
107
108 /* UFSHCD states */
109 enum {
110 UFSHCD_STATE_RESET,
111 UFSHCD_STATE_ERROR,
112 UFSHCD_STATE_OPERATIONAL,
113 };
114
115 /* UFSHCD error handling flags */
116 enum {
117 UFSHCD_EH_IN_PROGRESS = (1 << 0),
118 };
119
120 /* UFSHCD UIC layer error flags */
121 enum {
122 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
123 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
124 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
125 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
126 };
127
128 /* Interrupt configuration options */
129 enum {
130 UFSHCD_INT_DISABLE,
131 UFSHCD_INT_ENABLE,
132 UFSHCD_INT_CLEAR,
133 };
134
135 #define ufshcd_set_eh_in_progress(h) \
136 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
137 #define ufshcd_eh_in_progress(h) \
138 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
139 #define ufshcd_clear_eh_in_progress(h) \
140 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
141
142 #define ufshcd_set_ufs_dev_active(h) \
143 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
144 #define ufshcd_set_ufs_dev_sleep(h) \
145 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
146 #define ufshcd_set_ufs_dev_poweroff(h) \
147 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
148 #define ufshcd_is_ufs_dev_active(h) \
149 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
150 #define ufshcd_is_ufs_dev_sleep(h) \
151 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
152 #define ufshcd_is_ufs_dev_poweroff(h) \
153 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
154
155 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
156 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
157 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
159 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
160 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
161 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
162 };
163
164 static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)165 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
166 {
167 return ufs_pm_lvl_states[lvl].dev_state;
168 }
169
170 static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)171 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
172 {
173 return ufs_pm_lvl_states[lvl].link_state;
174 }
175
176 static void ufshcd_tmc_handler(struct ufs_hba *hba);
177 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
178 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
179 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
180 static void ufshcd_hba_exit(struct ufs_hba *hba);
181 static int ufshcd_probe_hba(struct ufs_hba *hba);
182 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
183 bool skip_ref_clk);
184 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
185 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
186 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
187 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
188 static irqreturn_t ufshcd_intr(int irq, void *__hba);
189 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
190 struct ufs_pa_layer_attr *desired_pwr_mode);
191
ufshcd_enable_irq(struct ufs_hba * hba)192 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
193 {
194 int ret = 0;
195
196 if (!hba->is_irq_enabled) {
197 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
198 hba);
199 if (ret)
200 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
201 __func__, ret);
202 hba->is_irq_enabled = true;
203 }
204
205 return ret;
206 }
207
ufshcd_disable_irq(struct ufs_hba * hba)208 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
209 {
210 if (hba->is_irq_enabled) {
211 free_irq(hba->irq, hba);
212 hba->is_irq_enabled = false;
213 }
214 }
215
216 /*
217 * ufshcd_wait_for_register - wait for register value to change
218 * @hba - per-adapter interface
219 * @reg - mmio register offset
220 * @mask - mask to apply to read register value
221 * @val - wait condition
222 * @interval_us - polling interval in microsecs
223 * @timeout_ms - timeout in millisecs
224 *
225 * Returns -ETIMEDOUT on error, zero on success
226 */
ufshcd_wait_for_register(struct ufs_hba * hba,u32 reg,u32 mask,u32 val,unsigned long interval_us,unsigned long timeout_ms)227 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
228 u32 val, unsigned long interval_us, unsigned long timeout_ms)
229 {
230 int err = 0;
231 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
232
233 /* ignore bits that we don't intend to wait on */
234 val = val & mask;
235
236 while ((ufshcd_readl(hba, reg) & mask) != val) {
237 /* wakeup within 50us of expiry */
238 usleep_range(interval_us, interval_us + 50);
239
240 if (time_after(jiffies, timeout)) {
241 if ((ufshcd_readl(hba, reg) & mask) != val)
242 err = -ETIMEDOUT;
243 break;
244 }
245 }
246
247 return err;
248 }
249
250 /**
251 * ufshcd_get_intr_mask - Get the interrupt bit mask
252 * @hba - Pointer to adapter instance
253 *
254 * Returns interrupt bit mask per version
255 */
ufshcd_get_intr_mask(struct ufs_hba * hba)256 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
257 {
258 if (hba->ufs_version == UFSHCI_VERSION_10)
259 return INTERRUPT_MASK_ALL_VER_10;
260 else
261 return INTERRUPT_MASK_ALL_VER_11;
262 }
263
264 /**
265 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
266 * @hba - Pointer to adapter instance
267 *
268 * Returns UFSHCI version supported by the controller
269 */
ufshcd_get_ufs_version(struct ufs_hba * hba)270 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
271 {
272 return ufshcd_readl(hba, REG_UFS_VERSION);
273 }
274
275 /**
276 * ufshcd_is_device_present - Check if any device connected to
277 * the host controller
278 * @hba: pointer to adapter instance
279 *
280 * Returns 1 if device present, 0 if no device detected
281 */
ufshcd_is_device_present(struct ufs_hba * hba)282 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
283 {
284 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
285 DEVICE_PRESENT) ? 1 : 0;
286 }
287
288 /**
289 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
290 * @lrb: pointer to local command reference block
291 *
292 * This function is used to get the OCS field from UTRD
293 * Returns the OCS field in the UTRD
294 */
ufshcd_get_tr_ocs(struct ufshcd_lrb * lrbp)295 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
296 {
297 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
298 }
299
300 /**
301 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
302 * @task_req_descp: pointer to utp_task_req_desc structure
303 *
304 * This function is used to get the OCS field from UTMRD
305 * Returns the OCS field in the UTMRD
306 */
307 static inline int
ufshcd_get_tmr_ocs(struct utp_task_req_desc * task_req_descp)308 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
309 {
310 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
311 }
312
313 /**
314 * ufshcd_get_tm_free_slot - get a free slot for task management request
315 * @hba: per adapter instance
316 * @free_slot: pointer to variable with available slot value
317 *
318 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
319 * Returns 0 if free slot is not available, else return 1 with tag value
320 * in @free_slot.
321 */
ufshcd_get_tm_free_slot(struct ufs_hba * hba,int * free_slot)322 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
323 {
324 int tag;
325 bool ret = false;
326
327 if (!free_slot)
328 goto out;
329
330 do {
331 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
332 if (tag >= hba->nutmrs)
333 goto out;
334 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
335
336 *free_slot = tag;
337 ret = true;
338 out:
339 return ret;
340 }
341
ufshcd_put_tm_slot(struct ufs_hba * hba,int slot)342 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
343 {
344 clear_bit_unlock(slot, &hba->tm_slots_in_use);
345 }
346
347 /**
348 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
349 * @hba: per adapter instance
350 * @pos: position of the bit to be cleared
351 */
ufshcd_utrl_clear(struct ufs_hba * hba,u32 pos)352 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
353 {
354 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
355 }
356
357 /**
358 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
359 * @reg: Register value of host controller status
360 *
361 * Returns integer, 0 on Success and positive value if failed
362 */
ufshcd_get_lists_status(u32 reg)363 static inline int ufshcd_get_lists_status(u32 reg)
364 {
365 /*
366 * The mask 0xFF is for the following HCS register bits
367 * Bit Description
368 * 0 Device Present
369 * 1 UTRLRDY
370 * 2 UTMRLRDY
371 * 3 UCRDY
372 * 4 HEI
373 * 5 DEI
374 * 6-7 reserved
375 */
376 return (((reg) & (0xFF)) >> 1) ^ (0x07);
377 }
378
379 /**
380 * ufshcd_get_uic_cmd_result - Get the UIC command result
381 * @hba: Pointer to adapter instance
382 *
383 * This function gets the result of UIC command completion
384 * Returns 0 on success, non zero value on error
385 */
ufshcd_get_uic_cmd_result(struct ufs_hba * hba)386 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
387 {
388 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
389 MASK_UIC_COMMAND_RESULT;
390 }
391
392 /**
393 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
394 * @hba: Pointer to adapter instance
395 *
396 * This function gets UIC command argument3
397 * Returns 0 on success, non zero value on error
398 */
ufshcd_get_dme_attr_val(struct ufs_hba * hba)399 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
400 {
401 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
402 }
403
404 /**
405 * ufshcd_get_req_rsp - returns the TR response transaction type
406 * @ucd_rsp_ptr: pointer to response UPIU
407 */
408 static inline int
ufshcd_get_req_rsp(struct utp_upiu_rsp * ucd_rsp_ptr)409 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
410 {
411 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
412 }
413
414 /**
415 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
416 * @ucd_rsp_ptr: pointer to response UPIU
417 *
418 * This function gets the response status and scsi_status from response UPIU
419 * Returns the response result code.
420 */
421 static inline int
ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp * ucd_rsp_ptr)422 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
423 {
424 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
425 }
426
427 /*
428 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
429 * from response UPIU
430 * @ucd_rsp_ptr: pointer to response UPIU
431 *
432 * Return the data segment length.
433 */
434 static inline unsigned int
ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp * ucd_rsp_ptr)435 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
436 {
437 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
438 MASK_RSP_UPIU_DATA_SEG_LEN;
439 }
440
441 /**
442 * ufshcd_is_exception_event - Check if the device raised an exception event
443 * @ucd_rsp_ptr: pointer to response UPIU
444 *
445 * The function checks if the device raised an exception event indicated in
446 * the Device Information field of response UPIU.
447 *
448 * Returns true if exception is raised, false otherwise.
449 */
ufshcd_is_exception_event(struct utp_upiu_rsp * ucd_rsp_ptr)450 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
451 {
452 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
453 MASK_RSP_EXCEPTION_EVENT ? true : false;
454 }
455
456 /**
457 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
458 * @hba: per adapter instance
459 */
460 static inline void
ufshcd_reset_intr_aggr(struct ufs_hba * hba)461 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
462 {
463 ufshcd_writel(hba, INT_AGGR_ENABLE |
464 INT_AGGR_COUNTER_AND_TIMER_RESET,
465 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
466 }
467
468 /**
469 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
470 * @hba: per adapter instance
471 * @cnt: Interrupt aggregation counter threshold
472 * @tmout: Interrupt aggregation timeout value
473 */
474 static inline void
ufshcd_config_intr_aggr(struct ufs_hba * hba,u8 cnt,u8 tmout)475 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
476 {
477 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
478 INT_AGGR_COUNTER_THLD_VAL(cnt) |
479 INT_AGGR_TIMEOUT_VAL(tmout),
480 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
481 }
482
483 /**
484 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
485 * When run-stop registers are set to 1, it indicates the
486 * host controller that it can process the requests
487 * @hba: per adapter instance
488 */
ufshcd_enable_run_stop_reg(struct ufs_hba * hba)489 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
490 {
491 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
492 REG_UTP_TASK_REQ_LIST_RUN_STOP);
493 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
494 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
495 }
496
497 /**
498 * ufshcd_hba_start - Start controller initialization sequence
499 * @hba: per adapter instance
500 */
ufshcd_hba_start(struct ufs_hba * hba)501 static inline void ufshcd_hba_start(struct ufs_hba *hba)
502 {
503 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
504 }
505
506 /**
507 * ufshcd_is_hba_active - Get controller state
508 * @hba: per adapter instance
509 *
510 * Returns zero if controller is active, 1 otherwise
511 */
ufshcd_is_hba_active(struct ufs_hba * hba)512 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
513 {
514 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
515 }
516
ufshcd_ungate_work(struct work_struct * work)517 static void ufshcd_ungate_work(struct work_struct *work)
518 {
519 int ret;
520 unsigned long flags;
521 struct ufs_hba *hba = container_of(work, struct ufs_hba,
522 clk_gating.ungate_work);
523
524 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
525
526 spin_lock_irqsave(hba->host->host_lock, flags);
527 if (hba->clk_gating.state == CLKS_ON) {
528 spin_unlock_irqrestore(hba->host->host_lock, flags);
529 goto unblock_reqs;
530 }
531
532 spin_unlock_irqrestore(hba->host->host_lock, flags);
533 ufshcd_setup_clocks(hba, true);
534
535 /* Exit from hibern8 */
536 if (ufshcd_can_hibern8_during_gating(hba)) {
537 /* Prevent gating in this path */
538 hba->clk_gating.is_suspended = true;
539 if (ufshcd_is_link_hibern8(hba)) {
540 ret = ufshcd_uic_hibern8_exit(hba);
541 if (ret)
542 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
543 __func__, ret);
544 else
545 ufshcd_set_link_active(hba);
546 }
547 hba->clk_gating.is_suspended = false;
548 }
549 unblock_reqs:
550 if (ufshcd_is_clkscaling_enabled(hba))
551 devfreq_resume_device(hba->devfreq);
552 scsi_unblock_requests(hba->host);
553 }
554
555 /**
556 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
557 * Also, exit from hibern8 mode and set the link as active.
558 * @hba: per adapter instance
559 * @async: This indicates whether caller should ungate clocks asynchronously.
560 */
ufshcd_hold(struct ufs_hba * hba,bool async)561 int ufshcd_hold(struct ufs_hba *hba, bool async)
562 {
563 int rc = 0;
564 unsigned long flags;
565
566 if (!ufshcd_is_clkgating_allowed(hba))
567 goto out;
568 spin_lock_irqsave(hba->host->host_lock, flags);
569 hba->clk_gating.active_reqs++;
570
571 start:
572 switch (hba->clk_gating.state) {
573 case CLKS_ON:
574 break;
575 case REQ_CLKS_OFF:
576 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
577 hba->clk_gating.state = CLKS_ON;
578 break;
579 }
580 /*
581 * If we here, it means gating work is either done or
582 * currently running. Hence, fall through to cancel gating
583 * work and to enable clocks.
584 */
585 case CLKS_OFF:
586 scsi_block_requests(hba->host);
587 hba->clk_gating.state = REQ_CLKS_ON;
588 schedule_work(&hba->clk_gating.ungate_work);
589 /*
590 * fall through to check if we should wait for this
591 * work to be done or not.
592 */
593 case REQ_CLKS_ON:
594 if (async) {
595 rc = -EAGAIN;
596 hba->clk_gating.active_reqs--;
597 break;
598 }
599
600 spin_unlock_irqrestore(hba->host->host_lock, flags);
601 flush_work(&hba->clk_gating.ungate_work);
602 /* Make sure state is CLKS_ON before returning */
603 spin_lock_irqsave(hba->host->host_lock, flags);
604 goto start;
605 default:
606 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
607 __func__, hba->clk_gating.state);
608 break;
609 }
610 spin_unlock_irqrestore(hba->host->host_lock, flags);
611 out:
612 return rc;
613 }
614
ufshcd_gate_work(struct work_struct * work)615 static void ufshcd_gate_work(struct work_struct *work)
616 {
617 struct ufs_hba *hba = container_of(work, struct ufs_hba,
618 clk_gating.gate_work.work);
619 unsigned long flags;
620
621 spin_lock_irqsave(hba->host->host_lock, flags);
622 if (hba->clk_gating.is_suspended) {
623 hba->clk_gating.state = CLKS_ON;
624 goto rel_lock;
625 }
626
627 if (hba->clk_gating.active_reqs
628 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
629 || hba->lrb_in_use || hba->outstanding_tasks
630 || hba->active_uic_cmd || hba->uic_async_done)
631 goto rel_lock;
632
633 spin_unlock_irqrestore(hba->host->host_lock, flags);
634
635 /* put the link into hibern8 mode before turning off clocks */
636 if (ufshcd_can_hibern8_during_gating(hba)) {
637 if (ufshcd_uic_hibern8_enter(hba)) {
638 hba->clk_gating.state = CLKS_ON;
639 goto out;
640 }
641 ufshcd_set_link_hibern8(hba);
642 }
643
644 if (ufshcd_is_clkscaling_enabled(hba)) {
645 devfreq_suspend_device(hba->devfreq);
646 hba->clk_scaling.window_start_t = 0;
647 }
648
649 if (!ufshcd_is_link_active(hba))
650 ufshcd_setup_clocks(hba, false);
651 else
652 /* If link is active, device ref_clk can't be switched off */
653 __ufshcd_setup_clocks(hba, false, true);
654
655 /*
656 * In case you are here to cancel this work the gating state
657 * would be marked as REQ_CLKS_ON. In this case keep the state
658 * as REQ_CLKS_ON which would anyway imply that clocks are off
659 * and a request to turn them on is pending. By doing this way,
660 * we keep the state machine in tact and this would ultimately
661 * prevent from doing cancel work multiple times when there are
662 * new requests arriving before the current cancel work is done.
663 */
664 spin_lock_irqsave(hba->host->host_lock, flags);
665 if (hba->clk_gating.state == REQ_CLKS_OFF)
666 hba->clk_gating.state = CLKS_OFF;
667
668 rel_lock:
669 spin_unlock_irqrestore(hba->host->host_lock, flags);
670 out:
671 return;
672 }
673
674 /* host lock must be held before calling this variant */
__ufshcd_release(struct ufs_hba * hba)675 static void __ufshcd_release(struct ufs_hba *hba)
676 {
677 if (!ufshcd_is_clkgating_allowed(hba))
678 return;
679
680 hba->clk_gating.active_reqs--;
681
682 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
683 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
684 || hba->lrb_in_use || hba->outstanding_tasks
685 || hba->active_uic_cmd || hba->uic_async_done)
686 return;
687
688 hba->clk_gating.state = REQ_CLKS_OFF;
689 schedule_delayed_work(&hba->clk_gating.gate_work,
690 msecs_to_jiffies(hba->clk_gating.delay_ms));
691 }
692
ufshcd_release(struct ufs_hba * hba)693 void ufshcd_release(struct ufs_hba *hba)
694 {
695 unsigned long flags;
696
697 spin_lock_irqsave(hba->host->host_lock, flags);
698 __ufshcd_release(hba);
699 spin_unlock_irqrestore(hba->host->host_lock, flags);
700 }
701
ufshcd_clkgate_delay_show(struct device * dev,struct device_attribute * attr,char * buf)702 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
703 struct device_attribute *attr, char *buf)
704 {
705 struct ufs_hba *hba = dev_get_drvdata(dev);
706
707 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
708 }
709
ufshcd_clkgate_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)710 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
711 struct device_attribute *attr, const char *buf, size_t count)
712 {
713 struct ufs_hba *hba = dev_get_drvdata(dev);
714 unsigned long flags, value;
715
716 if (kstrtoul(buf, 0, &value))
717 return -EINVAL;
718
719 spin_lock_irqsave(hba->host->host_lock, flags);
720 hba->clk_gating.delay_ms = value;
721 spin_unlock_irqrestore(hba->host->host_lock, flags);
722 return count;
723 }
724
ufshcd_init_clk_gating(struct ufs_hba * hba)725 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
726 {
727 if (!ufshcd_is_clkgating_allowed(hba))
728 return;
729
730 hba->clk_gating.delay_ms = 150;
731 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
732 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
733
734 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
735 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
736 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
737 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
738 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
739 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
740 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
741 }
742
ufshcd_exit_clk_gating(struct ufs_hba * hba)743 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
744 {
745 if (!ufshcd_is_clkgating_allowed(hba))
746 return;
747 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
748 cancel_work_sync(&hba->clk_gating.ungate_work);
749 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
750 }
751
752 /* Must be called with host lock acquired */
ufshcd_clk_scaling_start_busy(struct ufs_hba * hba)753 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
754 {
755 if (!ufshcd_is_clkscaling_enabled(hba))
756 return;
757
758 if (!hba->clk_scaling.is_busy_started) {
759 hba->clk_scaling.busy_start_t = ktime_get();
760 hba->clk_scaling.is_busy_started = true;
761 }
762 }
763
ufshcd_clk_scaling_update_busy(struct ufs_hba * hba)764 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
765 {
766 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
767
768 if (!ufshcd_is_clkscaling_enabled(hba))
769 return;
770
771 if (!hba->outstanding_reqs && scaling->is_busy_started) {
772 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
773 scaling->busy_start_t));
774 scaling->busy_start_t = ktime_set(0, 0);
775 scaling->is_busy_started = false;
776 }
777 }
778 /**
779 * ufshcd_send_command - Send SCSI or device management commands
780 * @hba: per adapter instance
781 * @task_tag: Task tag of the command
782 */
783 static inline
ufshcd_send_command(struct ufs_hba * hba,unsigned int task_tag)784 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
785 {
786 ufshcd_clk_scaling_start_busy(hba);
787 __set_bit(task_tag, &hba->outstanding_reqs);
788 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
789 }
790
791 /**
792 * ufshcd_copy_sense_data - Copy sense data in case of check condition
793 * @lrb - pointer to local reference block
794 */
ufshcd_copy_sense_data(struct ufshcd_lrb * lrbp)795 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
796 {
797 int len;
798 if (lrbp->sense_buffer &&
799 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
800 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
801 memcpy(lrbp->sense_buffer,
802 lrbp->ucd_rsp_ptr->sr.sense_data,
803 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
804 }
805 }
806
807 /**
808 * ufshcd_copy_query_response() - Copy the Query Response and the data
809 * descriptor
810 * @hba: per adapter instance
811 * @lrb - pointer to local reference block
812 */
813 static
ufshcd_copy_query_response(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)814 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
815 {
816 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
817
818 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
819
820 /* Get the descriptor */
821 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
822 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
823 GENERAL_UPIU_REQUEST_SIZE;
824 u16 resp_len;
825 u16 buf_len;
826
827 /* data segment length */
828 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
829 MASK_QUERY_DATA_SEG_LEN;
830 buf_len = be16_to_cpu(
831 hba->dev_cmd.query.request.upiu_req.length);
832 if (likely(buf_len >= resp_len)) {
833 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
834 } else {
835 dev_warn(hba->dev,
836 "%s: Response size is bigger than buffer",
837 __func__);
838 return -EINVAL;
839 }
840 }
841
842 return 0;
843 }
844
845 /**
846 * ufshcd_hba_capabilities - Read controller capabilities
847 * @hba: per adapter instance
848 */
ufshcd_hba_capabilities(struct ufs_hba * hba)849 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
850 {
851 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
852
853 /* nutrs and nutmrs are 0 based values */
854 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
855 hba->nutmrs =
856 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
857 }
858
859 /**
860 * ufshcd_ready_for_uic_cmd - Check if controller is ready
861 * to accept UIC commands
862 * @hba: per adapter instance
863 * Return true on success, else false
864 */
ufshcd_ready_for_uic_cmd(struct ufs_hba * hba)865 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
866 {
867 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
868 return true;
869 else
870 return false;
871 }
872
873 /**
874 * ufshcd_get_upmcrs - Get the power mode change request status
875 * @hba: Pointer to adapter instance
876 *
877 * This function gets the UPMCRS field of HCS register
878 * Returns value of UPMCRS field
879 */
ufshcd_get_upmcrs(struct ufs_hba * hba)880 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
881 {
882 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
883 }
884
885 /**
886 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
887 * @hba: per adapter instance
888 * @uic_cmd: UIC command
889 *
890 * Mutex must be held.
891 */
892 static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)893 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
894 {
895 WARN_ON(hba->active_uic_cmd);
896
897 hba->active_uic_cmd = uic_cmd;
898
899 /* Write Args */
900 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
901 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
902 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
903
904 /* Write UIC Cmd */
905 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
906 REG_UIC_COMMAND);
907 }
908
909 /**
910 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
911 * @hba: per adapter instance
912 * @uic_command: UIC command
913 *
914 * Must be called with mutex held.
915 * Returns 0 only if success.
916 */
917 static int
ufshcd_wait_for_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)918 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
919 {
920 int ret;
921 unsigned long flags;
922
923 if (wait_for_completion_timeout(&uic_cmd->done,
924 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
925 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
926 else
927 ret = -ETIMEDOUT;
928
929 spin_lock_irqsave(hba->host->host_lock, flags);
930 hba->active_uic_cmd = NULL;
931 spin_unlock_irqrestore(hba->host->host_lock, flags);
932
933 return ret;
934 }
935
936 /**
937 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
938 * @hba: per adapter instance
939 * @uic_cmd: UIC command
940 *
941 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
942 * with mutex held and host_lock locked.
943 * Returns 0 only if success.
944 */
945 static int
__ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)946 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
947 {
948 if (!ufshcd_ready_for_uic_cmd(hba)) {
949 dev_err(hba->dev,
950 "Controller not ready to accept UIC commands\n");
951 return -EIO;
952 }
953
954 init_completion(&uic_cmd->done);
955
956 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
957
958 return 0;
959 }
960
961 /**
962 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
963 * @hba: per adapter instance
964 * @uic_cmd: UIC command
965 *
966 * Returns 0 only if success.
967 */
968 static int
ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)969 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
970 {
971 int ret;
972 unsigned long flags;
973
974 ufshcd_hold(hba, false);
975 mutex_lock(&hba->uic_cmd_mutex);
976 spin_lock_irqsave(hba->host->host_lock, flags);
977 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
978 spin_unlock_irqrestore(hba->host->host_lock, flags);
979 if (!ret)
980 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
981
982 mutex_unlock(&hba->uic_cmd_mutex);
983
984 ufshcd_release(hba);
985 return ret;
986 }
987
988 /**
989 * ufshcd_map_sg - Map scatter-gather list to prdt
990 * @lrbp - pointer to local reference block
991 *
992 * Returns 0 in case of success, non-zero value in case of failure
993 */
ufshcd_map_sg(struct ufshcd_lrb * lrbp)994 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
995 {
996 struct ufshcd_sg_entry *prd_table;
997 struct scatterlist *sg;
998 struct scsi_cmnd *cmd;
999 int sg_segments;
1000 int i;
1001
1002 cmd = lrbp->cmd;
1003 sg_segments = scsi_dma_map(cmd);
1004 if (sg_segments < 0)
1005 return sg_segments;
1006
1007 if (sg_segments) {
1008 lrbp->utr_descriptor_ptr->prd_table_length =
1009 cpu_to_le16((u16) (sg_segments));
1010
1011 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1012
1013 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1014 prd_table[i].size =
1015 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1016 prd_table[i].base_addr =
1017 cpu_to_le32(lower_32_bits(sg->dma_address));
1018 prd_table[i].upper_addr =
1019 cpu_to_le32(upper_32_bits(sg->dma_address));
1020 }
1021 } else {
1022 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1023 }
1024
1025 return 0;
1026 }
1027
1028 /**
1029 * ufshcd_enable_intr - enable interrupts
1030 * @hba: per adapter instance
1031 * @intrs: interrupt bits
1032 */
ufshcd_enable_intr(struct ufs_hba * hba,u32 intrs)1033 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1034 {
1035 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1036
1037 if (hba->ufs_version == UFSHCI_VERSION_10) {
1038 u32 rw;
1039 rw = set & INTERRUPT_MASK_RW_VER_10;
1040 set = rw | ((set ^ intrs) & intrs);
1041 } else {
1042 set |= intrs;
1043 }
1044
1045 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1046 }
1047
1048 /**
1049 * ufshcd_disable_intr - disable interrupts
1050 * @hba: per adapter instance
1051 * @intrs: interrupt bits
1052 */
ufshcd_disable_intr(struct ufs_hba * hba,u32 intrs)1053 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1054 {
1055 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1056
1057 if (hba->ufs_version == UFSHCI_VERSION_10) {
1058 u32 rw;
1059 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1060 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1061 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1062
1063 } else {
1064 set &= ~intrs;
1065 }
1066
1067 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1068 }
1069
1070 /**
1071 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1072 * descriptor according to request
1073 * @lrbp: pointer to local reference block
1074 * @upiu_flags: flags required in the header
1075 * @cmd_dir: requests data direction
1076 */
ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb * lrbp,u32 * upiu_flags,enum dma_data_direction cmd_dir)1077 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1078 u32 *upiu_flags, enum dma_data_direction cmd_dir)
1079 {
1080 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1081 u32 data_direction;
1082 u32 dword_0;
1083
1084 if (cmd_dir == DMA_FROM_DEVICE) {
1085 data_direction = UTP_DEVICE_TO_HOST;
1086 *upiu_flags = UPIU_CMD_FLAGS_READ;
1087 } else if (cmd_dir == DMA_TO_DEVICE) {
1088 data_direction = UTP_HOST_TO_DEVICE;
1089 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1090 } else {
1091 data_direction = UTP_NO_DATA_TRANSFER;
1092 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1093 }
1094
1095 dword_0 = data_direction | (lrbp->command_type
1096 << UPIU_COMMAND_TYPE_OFFSET);
1097 if (lrbp->intr_cmd)
1098 dword_0 |= UTP_REQ_DESC_INT_CMD;
1099
1100 /* Transfer request descriptor header fields */
1101 req_desc->header.dword_0 = cpu_to_le32(dword_0);
1102
1103 /*
1104 * assigning invalid value for command status. Controller
1105 * updates OCS on command completion, with the command
1106 * status
1107 */
1108 req_desc->header.dword_2 =
1109 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1110 }
1111
1112 /**
1113 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1114 * for scsi commands
1115 * @lrbp - local reference block pointer
1116 * @upiu_flags - flags
1117 */
1118 static
ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb * lrbp,u32 upiu_flags)1119 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1120 {
1121 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1122
1123 /* command descriptor fields */
1124 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1125 UPIU_TRANSACTION_COMMAND, upiu_flags,
1126 lrbp->lun, lrbp->task_tag);
1127 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1128 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1129
1130 /* Total EHS length and Data segment length will be zero */
1131 ucd_req_ptr->header.dword_2 = 0;
1132
1133 ucd_req_ptr->sc.exp_data_transfer_len =
1134 cpu_to_be32(lrbp->cmd->sdb.length);
1135
1136 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
1137 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
1138 }
1139
1140 /**
1141 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1142 * for query requsts
1143 * @hba: UFS hba
1144 * @lrbp: local reference block pointer
1145 * @upiu_flags: flags
1146 */
ufshcd_prepare_utp_query_req_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,u32 upiu_flags)1147 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1148 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1149 {
1150 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1151 struct ufs_query *query = &hba->dev_cmd.query;
1152 u16 len = be16_to_cpu(query->request.upiu_req.length);
1153 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1154
1155 /* Query request header */
1156 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1157 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1158 lrbp->lun, lrbp->task_tag);
1159 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1160 0, query->request.query_func, 0, 0);
1161
1162 /* Data segment length */
1163 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
1164 0, 0, len >> 8, (u8)len);
1165
1166 /* Copy the Query Request buffer as is */
1167 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1168 QUERY_OSF_SIZE);
1169
1170 /* Copy the Descriptor */
1171 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1172 memcpy(descp, query->descriptor, len);
1173
1174 }
1175
ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb * lrbp)1176 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1177 {
1178 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1179
1180 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1181
1182 /* command descriptor fields */
1183 ucd_req_ptr->header.dword_0 =
1184 UPIU_HEADER_DWORD(
1185 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1186 }
1187
1188 /**
1189 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
1190 * @hba - per adapter instance
1191 * @lrb - pointer to local reference block
1192 */
ufshcd_compose_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1193 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1194 {
1195 u32 upiu_flags;
1196 int ret = 0;
1197
1198 switch (lrbp->command_type) {
1199 case UTP_CMD_TYPE_SCSI:
1200 if (likely(lrbp->cmd)) {
1201 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1202 lrbp->cmd->sc_data_direction);
1203 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1204 } else {
1205 ret = -EINVAL;
1206 }
1207 break;
1208 case UTP_CMD_TYPE_DEV_MANAGE:
1209 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1210 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1211 ufshcd_prepare_utp_query_req_upiu(
1212 hba, lrbp, upiu_flags);
1213 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1214 ufshcd_prepare_utp_nop_upiu(lrbp);
1215 else
1216 ret = -EINVAL;
1217 break;
1218 case UTP_CMD_TYPE_UFS:
1219 /* For UFS native command implementation */
1220 ret = -ENOTSUPP;
1221 dev_err(hba->dev, "%s: UFS native command are not supported\n",
1222 __func__);
1223 break;
1224 default:
1225 ret = -ENOTSUPP;
1226 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
1227 __func__, lrbp->command_type);
1228 break;
1229 } /* end of switch */
1230
1231 return ret;
1232 }
1233
1234 /*
1235 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1236 * @scsi_lun: scsi LUN id
1237 *
1238 * Returns UPIU LUN id
1239 */
ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)1240 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1241 {
1242 if (scsi_is_wlun(scsi_lun))
1243 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1244 | UFS_UPIU_WLUN_ID;
1245 else
1246 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1247 }
1248
1249 /**
1250 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1251 * @scsi_lun: UPIU W-LUN id
1252 *
1253 * Returns SCSI W-LUN id
1254 */
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)1255 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1256 {
1257 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1258 }
1259
1260 /**
1261 * ufshcd_queuecommand - main entry point for SCSI requests
1262 * @cmd: command from SCSI Midlayer
1263 * @done: call back function
1264 *
1265 * Returns 0 for success, non-zero in case of failure
1266 */
ufshcd_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)1267 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1268 {
1269 struct ufshcd_lrb *lrbp;
1270 struct ufs_hba *hba;
1271 unsigned long flags;
1272 int tag;
1273 int err = 0;
1274
1275 hba = shost_priv(host);
1276
1277 tag = cmd->request->tag;
1278
1279 spin_lock_irqsave(hba->host->host_lock, flags);
1280 switch (hba->ufshcd_state) {
1281 case UFSHCD_STATE_OPERATIONAL:
1282 break;
1283 case UFSHCD_STATE_RESET:
1284 err = SCSI_MLQUEUE_HOST_BUSY;
1285 goto out_unlock;
1286 case UFSHCD_STATE_ERROR:
1287 set_host_byte(cmd, DID_ERROR);
1288 cmd->scsi_done(cmd);
1289 goto out_unlock;
1290 default:
1291 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1292 __func__, hba->ufshcd_state);
1293 set_host_byte(cmd, DID_BAD_TARGET);
1294 cmd->scsi_done(cmd);
1295 goto out_unlock;
1296 }
1297 spin_unlock_irqrestore(hba->host->host_lock, flags);
1298
1299 /* acquire the tag to make sure device cmds don't use it */
1300 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1301 /*
1302 * Dev manage command in progress, requeue the command.
1303 * Requeuing the command helps in cases where the request *may*
1304 * find different tag instead of waiting for dev manage command
1305 * completion.
1306 */
1307 err = SCSI_MLQUEUE_HOST_BUSY;
1308 goto out;
1309 }
1310
1311 err = ufshcd_hold(hba, true);
1312 if (err) {
1313 err = SCSI_MLQUEUE_HOST_BUSY;
1314 clear_bit_unlock(tag, &hba->lrb_in_use);
1315 goto out;
1316 }
1317 /* IO svc time latency histogram */
1318 if (hba != NULL && cmd->request != NULL) {
1319 if (hba->latency_hist_enabled &&
1320 (cmd->request->cmd_type == REQ_TYPE_FS)) {
1321 cmd->request->lat_hist_io_start = ktime_get();
1322 cmd->request->lat_hist_enabled = 1;
1323 } else
1324 cmd->request->lat_hist_enabled = 0;
1325 }
1326
1327 WARN_ON(hba->clk_gating.state != CLKS_ON);
1328
1329 lrbp = &hba->lrb[tag];
1330
1331 WARN_ON(lrbp->cmd);
1332 lrbp->cmd = cmd;
1333 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1334 lrbp->sense_buffer = cmd->sense_buffer;
1335 lrbp->task_tag = tag;
1336 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1337 lrbp->intr_cmd = false;
1338 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1339
1340 /* form UPIU before issuing the command */
1341 ufshcd_compose_upiu(hba, lrbp);
1342 err = ufshcd_map_sg(lrbp);
1343 if (err) {
1344 lrbp->cmd = NULL;
1345 clear_bit_unlock(tag, &hba->lrb_in_use);
1346 goto out;
1347 }
1348
1349 /* issue command to the controller */
1350 spin_lock_irqsave(hba->host->host_lock, flags);
1351 ufshcd_send_command(hba, tag);
1352 out_unlock:
1353 spin_unlock_irqrestore(hba->host->host_lock, flags);
1354 out:
1355 return err;
1356 }
1357
ufshcd_compose_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,enum dev_cmd_type cmd_type,int tag)1358 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1359 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1360 {
1361 lrbp->cmd = NULL;
1362 lrbp->sense_bufflen = 0;
1363 lrbp->sense_buffer = NULL;
1364 lrbp->task_tag = tag;
1365 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
1366 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1367 lrbp->intr_cmd = true; /* No interrupt aggregation */
1368 hba->dev_cmd.type = cmd_type;
1369
1370 return ufshcd_compose_upiu(hba, lrbp);
1371 }
1372
1373 static int
ufshcd_clear_cmd(struct ufs_hba * hba,int tag)1374 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1375 {
1376 int err = 0;
1377 unsigned long flags;
1378 u32 mask = 1 << tag;
1379
1380 /* clear outstanding transaction before retry */
1381 spin_lock_irqsave(hba->host->host_lock, flags);
1382 ufshcd_utrl_clear(hba, tag);
1383 spin_unlock_irqrestore(hba->host->host_lock, flags);
1384
1385 /*
1386 * wait for for h/w to clear corresponding bit in door-bell.
1387 * max. wait is 1 sec.
1388 */
1389 err = ufshcd_wait_for_register(hba,
1390 REG_UTP_TRANSFER_REQ_DOOR_BELL,
1391 mask, ~mask, 1000, 1000);
1392
1393 return err;
1394 }
1395
1396 static int
ufshcd_check_query_response(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1397 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1398 {
1399 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1400
1401 /* Get the UPIU response */
1402 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1403 UPIU_RSP_CODE_OFFSET;
1404 return query_res->response;
1405 }
1406
1407 /**
1408 * ufshcd_dev_cmd_completion() - handles device management command responses
1409 * @hba: per adapter instance
1410 * @lrbp: pointer to local reference block
1411 */
1412 static int
ufshcd_dev_cmd_completion(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1413 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1414 {
1415 int resp;
1416 int err = 0;
1417
1418 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1419
1420 switch (resp) {
1421 case UPIU_TRANSACTION_NOP_IN:
1422 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1423 err = -EINVAL;
1424 dev_err(hba->dev, "%s: unexpected response %x\n",
1425 __func__, resp);
1426 }
1427 break;
1428 case UPIU_TRANSACTION_QUERY_RSP:
1429 err = ufshcd_check_query_response(hba, lrbp);
1430 if (!err)
1431 err = ufshcd_copy_query_response(hba, lrbp);
1432 break;
1433 case UPIU_TRANSACTION_REJECT_UPIU:
1434 /* TODO: handle Reject UPIU Response */
1435 err = -EPERM;
1436 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1437 __func__);
1438 break;
1439 default:
1440 err = -EINVAL;
1441 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1442 __func__, resp);
1443 break;
1444 }
1445
1446 return err;
1447 }
1448
ufshcd_wait_for_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int max_timeout)1449 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1450 struct ufshcd_lrb *lrbp, int max_timeout)
1451 {
1452 int err = 0;
1453 unsigned long time_left;
1454 unsigned long flags;
1455
1456 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1457 msecs_to_jiffies(max_timeout));
1458
1459 spin_lock_irqsave(hba->host->host_lock, flags);
1460 hba->dev_cmd.complete = NULL;
1461 if (likely(time_left)) {
1462 err = ufshcd_get_tr_ocs(lrbp);
1463 if (!err)
1464 err = ufshcd_dev_cmd_completion(hba, lrbp);
1465 }
1466 spin_unlock_irqrestore(hba->host->host_lock, flags);
1467
1468 if (!time_left) {
1469 err = -ETIMEDOUT;
1470 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1471 /* sucessfully cleared the command, retry if needed */
1472 err = -EAGAIN;
1473 }
1474
1475 return err;
1476 }
1477
1478 /**
1479 * ufshcd_get_dev_cmd_tag - Get device management command tag
1480 * @hba: per-adapter instance
1481 * @tag: pointer to variable with available slot value
1482 *
1483 * Get a free slot and lock it until device management command
1484 * completes.
1485 *
1486 * Returns false if free slot is unavailable for locking, else
1487 * return true with tag value in @tag.
1488 */
ufshcd_get_dev_cmd_tag(struct ufs_hba * hba,int * tag_out)1489 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1490 {
1491 int tag;
1492 bool ret = false;
1493 unsigned long tmp;
1494
1495 if (!tag_out)
1496 goto out;
1497
1498 do {
1499 tmp = ~hba->lrb_in_use;
1500 tag = find_last_bit(&tmp, hba->nutrs);
1501 if (tag >= hba->nutrs)
1502 goto out;
1503 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1504
1505 *tag_out = tag;
1506 ret = true;
1507 out:
1508 return ret;
1509 }
1510
ufshcd_put_dev_cmd_tag(struct ufs_hba * hba,int tag)1511 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1512 {
1513 clear_bit_unlock(tag, &hba->lrb_in_use);
1514 }
1515
1516 /**
1517 * ufshcd_exec_dev_cmd - API for sending device management requests
1518 * @hba - UFS hba
1519 * @cmd_type - specifies the type (NOP, Query...)
1520 * @timeout - time in seconds
1521 *
1522 * NOTE: Since there is only one available tag for device management commands,
1523 * it is expected you hold the hba->dev_cmd.lock mutex.
1524 */
ufshcd_exec_dev_cmd(struct ufs_hba * hba,enum dev_cmd_type cmd_type,int timeout)1525 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1526 enum dev_cmd_type cmd_type, int timeout)
1527 {
1528 struct ufshcd_lrb *lrbp;
1529 int err;
1530 int tag;
1531 struct completion wait;
1532 unsigned long flags;
1533
1534 /*
1535 * Get free slot, sleep if slots are unavailable.
1536 * Even though we use wait_event() which sleeps indefinitely,
1537 * the maximum wait time is bounded by SCSI request timeout.
1538 */
1539 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1540
1541 init_completion(&wait);
1542 lrbp = &hba->lrb[tag];
1543 WARN_ON(lrbp->cmd);
1544 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1545 if (unlikely(err))
1546 goto out_put_tag;
1547
1548 hba->dev_cmd.complete = &wait;
1549
1550 spin_lock_irqsave(hba->host->host_lock, flags);
1551 ufshcd_send_command(hba, tag);
1552 spin_unlock_irqrestore(hba->host->host_lock, flags);
1553
1554 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1555
1556 out_put_tag:
1557 ufshcd_put_dev_cmd_tag(hba, tag);
1558 wake_up(&hba->dev_cmd.tag_wq);
1559 return err;
1560 }
1561
1562 /**
1563 * ufshcd_init_query() - init the query response and request parameters
1564 * @hba: per-adapter instance
1565 * @request: address of the request pointer to be initialized
1566 * @response: address of the response pointer to be initialized
1567 * @opcode: operation to perform
1568 * @idn: flag idn to access
1569 * @index: LU number to access
1570 * @selector: query/flag/descriptor further identification
1571 */
ufshcd_init_query(struct ufs_hba * hba,struct ufs_query_req ** request,struct ufs_query_res ** response,enum query_opcode opcode,u8 idn,u8 index,u8 selector)1572 static inline void ufshcd_init_query(struct ufs_hba *hba,
1573 struct ufs_query_req **request, struct ufs_query_res **response,
1574 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1575 {
1576 *request = &hba->dev_cmd.query.request;
1577 *response = &hba->dev_cmd.query.response;
1578 memset(*request, 0, sizeof(struct ufs_query_req));
1579 memset(*response, 0, sizeof(struct ufs_query_res));
1580 (*request)->upiu_req.opcode = opcode;
1581 (*request)->upiu_req.idn = idn;
1582 (*request)->upiu_req.index = index;
1583 (*request)->upiu_req.selector = selector;
1584 }
1585
1586 /**
1587 * ufshcd_query_flag() - API function for sending flag query requests
1588 * hba: per-adapter instance
1589 * query_opcode: flag query to perform
1590 * idn: flag idn to access
1591 * flag_res: the flag value after the query request completes
1592 *
1593 * Returns 0 for success, non-zero in case of failure
1594 */
ufshcd_query_flag(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,bool * flag_res)1595 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1596 enum flag_idn idn, bool *flag_res)
1597 {
1598 struct ufs_query_req *request = NULL;
1599 struct ufs_query_res *response = NULL;
1600 int err, index = 0, selector = 0;
1601
1602 BUG_ON(!hba);
1603
1604 ufshcd_hold(hba, false);
1605 mutex_lock(&hba->dev_cmd.lock);
1606 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1607 selector);
1608
1609 switch (opcode) {
1610 case UPIU_QUERY_OPCODE_SET_FLAG:
1611 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1612 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1613 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1614 break;
1615 case UPIU_QUERY_OPCODE_READ_FLAG:
1616 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1617 if (!flag_res) {
1618 /* No dummy reads */
1619 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1620 __func__);
1621 err = -EINVAL;
1622 goto out_unlock;
1623 }
1624 break;
1625 default:
1626 dev_err(hba->dev,
1627 "%s: Expected query flag opcode but got = %d\n",
1628 __func__, opcode);
1629 err = -EINVAL;
1630 goto out_unlock;
1631 }
1632
1633 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1634
1635 if (err) {
1636 dev_err(hba->dev,
1637 "%s: Sending flag query for idn %d failed, err = %d\n",
1638 __func__, idn, err);
1639 goto out_unlock;
1640 }
1641
1642 if (flag_res)
1643 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1644 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1645
1646 out_unlock:
1647 mutex_unlock(&hba->dev_cmd.lock);
1648 ufshcd_release(hba);
1649 return err;
1650 }
1651
1652 /**
1653 * ufshcd_query_attr - API function for sending attribute requests
1654 * hba: per-adapter instance
1655 * opcode: attribute opcode
1656 * idn: attribute idn to access
1657 * index: index field
1658 * selector: selector field
1659 * attr_val: the attribute value after the query request completes
1660 *
1661 * Returns 0 for success, non-zero in case of failure
1662 */
ufshcd_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)1663 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1664 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1665 {
1666 struct ufs_query_req *request = NULL;
1667 struct ufs_query_res *response = NULL;
1668 int err;
1669
1670 BUG_ON(!hba);
1671
1672 ufshcd_hold(hba, false);
1673 if (!attr_val) {
1674 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1675 __func__, opcode);
1676 err = -EINVAL;
1677 goto out;
1678 }
1679
1680 mutex_lock(&hba->dev_cmd.lock);
1681 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1682 selector);
1683
1684 switch (opcode) {
1685 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1686 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1687 request->upiu_req.value = cpu_to_be32(*attr_val);
1688 break;
1689 case UPIU_QUERY_OPCODE_READ_ATTR:
1690 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1691 break;
1692 default:
1693 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1694 __func__, opcode);
1695 err = -EINVAL;
1696 goto out_unlock;
1697 }
1698
1699 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1700
1701 if (err) {
1702 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1703 __func__, opcode, idn, err);
1704 goto out_unlock;
1705 }
1706
1707 *attr_val = be32_to_cpu(response->upiu_res.value);
1708
1709 out_unlock:
1710 mutex_unlock(&hba->dev_cmd.lock);
1711 out:
1712 ufshcd_release(hba);
1713 return err;
1714 }
1715
1716 /**
1717 * ufshcd_query_descriptor - API function for sending descriptor requests
1718 * hba: per-adapter instance
1719 * opcode: attribute opcode
1720 * idn: attribute idn to access
1721 * index: index field
1722 * selector: selector field
1723 * desc_buf: the buffer that contains the descriptor
1724 * buf_len: length parameter passed to the device
1725 *
1726 * Returns 0 for success, non-zero in case of failure.
1727 * The buf_len parameter will contain, on return, the length parameter
1728 * received on the response.
1729 */
ufshcd_query_descriptor(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)1730 static int ufshcd_query_descriptor(struct ufs_hba *hba,
1731 enum query_opcode opcode, enum desc_idn idn, u8 index,
1732 u8 selector, u8 *desc_buf, int *buf_len)
1733 {
1734 struct ufs_query_req *request = NULL;
1735 struct ufs_query_res *response = NULL;
1736 int err;
1737
1738 BUG_ON(!hba);
1739
1740 ufshcd_hold(hba, false);
1741 if (!desc_buf) {
1742 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1743 __func__, opcode);
1744 err = -EINVAL;
1745 goto out;
1746 }
1747
1748 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1749 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1750 __func__, *buf_len);
1751 err = -EINVAL;
1752 goto out;
1753 }
1754
1755 mutex_lock(&hba->dev_cmd.lock);
1756 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1757 selector);
1758 hba->dev_cmd.query.descriptor = desc_buf;
1759 request->upiu_req.length = cpu_to_be16(*buf_len);
1760
1761 switch (opcode) {
1762 case UPIU_QUERY_OPCODE_WRITE_DESC:
1763 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1764 break;
1765 case UPIU_QUERY_OPCODE_READ_DESC:
1766 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1767 break;
1768 default:
1769 dev_err(hba->dev,
1770 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1771 __func__, opcode);
1772 err = -EINVAL;
1773 goto out_unlock;
1774 }
1775
1776 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1777
1778 if (err) {
1779 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1780 __func__, opcode, idn, err);
1781 goto out_unlock;
1782 }
1783
1784 hba->dev_cmd.query.descriptor = NULL;
1785 *buf_len = be16_to_cpu(response->upiu_res.length);
1786
1787 out_unlock:
1788 mutex_unlock(&hba->dev_cmd.lock);
1789 out:
1790 ufshcd_release(hba);
1791 return err;
1792 }
1793
1794 /**
1795 * ufshcd_read_desc_param - read the specified descriptor parameter
1796 * @hba: Pointer to adapter instance
1797 * @desc_id: descriptor idn value
1798 * @desc_index: descriptor index
1799 * @param_offset: offset of the parameter to read
1800 * @param_read_buf: pointer to buffer where parameter would be read
1801 * @param_size: sizeof(param_read_buf)
1802 *
1803 * Return 0 in case of success, non-zero otherwise
1804 */
ufshcd_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u32 param_offset,u8 * param_read_buf,u32 param_size)1805 static int ufshcd_read_desc_param(struct ufs_hba *hba,
1806 enum desc_idn desc_id,
1807 int desc_index,
1808 u32 param_offset,
1809 u8 *param_read_buf,
1810 u32 param_size)
1811 {
1812 int ret;
1813 u8 *desc_buf;
1814 u32 buff_len;
1815 bool is_kmalloc = true;
1816
1817 /* safety checks */
1818 if (desc_id >= QUERY_DESC_IDN_MAX)
1819 return -EINVAL;
1820
1821 buff_len = ufs_query_desc_max_size[desc_id];
1822 if ((param_offset + param_size) > buff_len)
1823 return -EINVAL;
1824
1825 if (!param_offset && (param_size == buff_len)) {
1826 /* memory space already available to hold full descriptor */
1827 desc_buf = param_read_buf;
1828 is_kmalloc = false;
1829 } else {
1830 /* allocate memory to hold full descriptor */
1831 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1832 if (!desc_buf)
1833 return -ENOMEM;
1834 }
1835
1836 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
1837 desc_id, desc_index, 0, desc_buf,
1838 &buff_len);
1839
1840 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
1841 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
1842 ufs_query_desc_max_size[desc_id])
1843 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
1844 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1845 __func__, desc_id, param_offset, buff_len, ret);
1846 if (!ret)
1847 ret = -EINVAL;
1848
1849 goto out;
1850 }
1851
1852 if (is_kmalloc)
1853 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1854 out:
1855 if (is_kmalloc)
1856 kfree(desc_buf);
1857 return ret;
1858 }
1859
ufshcd_read_desc(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 * buf,u32 size)1860 static inline int ufshcd_read_desc(struct ufs_hba *hba,
1861 enum desc_idn desc_id,
1862 int desc_index,
1863 u8 *buf,
1864 u32 size)
1865 {
1866 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1867 }
1868
ufshcd_read_power_desc(struct ufs_hba * hba,u8 * buf,u32 size)1869 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
1870 u8 *buf,
1871 u32 size)
1872 {
1873 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
1874 }
1875
1876 /**
1877 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
1878 * @hba: Pointer to adapter instance
1879 * @lun: lun id
1880 * @param_offset: offset of the parameter to read
1881 * @param_read_buf: pointer to buffer where parameter would be read
1882 * @param_size: sizeof(param_read_buf)
1883 *
1884 * Return 0 in case of success, non-zero otherwise
1885 */
ufshcd_read_unit_desc_param(struct ufs_hba * hba,int lun,enum unit_desc_param param_offset,u8 * param_read_buf,u32 param_size)1886 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
1887 int lun,
1888 enum unit_desc_param param_offset,
1889 u8 *param_read_buf,
1890 u32 param_size)
1891 {
1892 /*
1893 * Unit descriptors are only available for general purpose LUs (LUN id
1894 * from 0 to 7) and RPMB Well known LU.
1895 */
1896 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
1897 return -EOPNOTSUPP;
1898
1899 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
1900 param_offset, param_read_buf, param_size);
1901 }
1902
1903 /**
1904 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1905 * @hba: per adapter instance
1906 *
1907 * 1. Allocate DMA memory for Command Descriptor array
1908 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1909 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1910 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1911 * (UTMRDL)
1912 * 4. Allocate memory for local reference block(lrb).
1913 *
1914 * Returns 0 for success, non-zero in case of failure
1915 */
ufshcd_memory_alloc(struct ufs_hba * hba)1916 static int ufshcd_memory_alloc(struct ufs_hba *hba)
1917 {
1918 size_t utmrdl_size, utrdl_size, ucdl_size;
1919
1920 /* Allocate memory for UTP command descriptors */
1921 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
1922 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1923 ucdl_size,
1924 &hba->ucdl_dma_addr,
1925 GFP_KERNEL);
1926
1927 /*
1928 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1929 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1930 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1931 * be aligned to 128 bytes as well
1932 */
1933 if (!hba->ucdl_base_addr ||
1934 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
1935 dev_err(hba->dev,
1936 "Command Descriptor Memory allocation failed\n");
1937 goto out;
1938 }
1939
1940 /*
1941 * Allocate memory for UTP Transfer descriptors
1942 * UFSHCI requires 1024 byte alignment of UTRD
1943 */
1944 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
1945 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1946 utrdl_size,
1947 &hba->utrdl_dma_addr,
1948 GFP_KERNEL);
1949 if (!hba->utrdl_base_addr ||
1950 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
1951 dev_err(hba->dev,
1952 "Transfer Descriptor Memory allocation failed\n");
1953 goto out;
1954 }
1955
1956 /*
1957 * Allocate memory for UTP Task Management descriptors
1958 * UFSHCI requires 1024 byte alignment of UTMRD
1959 */
1960 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
1961 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1962 utmrdl_size,
1963 &hba->utmrdl_dma_addr,
1964 GFP_KERNEL);
1965 if (!hba->utmrdl_base_addr ||
1966 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
1967 dev_err(hba->dev,
1968 "Task Management Descriptor Memory allocation failed\n");
1969 goto out;
1970 }
1971
1972 /* Allocate memory for local reference block */
1973 hba->lrb = devm_kzalloc(hba->dev,
1974 hba->nutrs * sizeof(struct ufshcd_lrb),
1975 GFP_KERNEL);
1976 if (!hba->lrb) {
1977 dev_err(hba->dev, "LRB Memory allocation failed\n");
1978 goto out;
1979 }
1980 return 0;
1981 out:
1982 return -ENOMEM;
1983 }
1984
1985 /**
1986 * ufshcd_host_memory_configure - configure local reference block with
1987 * memory offsets
1988 * @hba: per adapter instance
1989 *
1990 * Configure Host memory space
1991 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1992 * address.
1993 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1994 * and PRDT offset.
1995 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1996 * into local reference block.
1997 */
ufshcd_host_memory_configure(struct ufs_hba * hba)1998 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1999 {
2000 struct utp_transfer_cmd_desc *cmd_descp;
2001 struct utp_transfer_req_desc *utrdlp;
2002 dma_addr_t cmd_desc_dma_addr;
2003 dma_addr_t cmd_desc_element_addr;
2004 u16 response_offset;
2005 u16 prdt_offset;
2006 int cmd_desc_size;
2007 int i;
2008
2009 utrdlp = hba->utrdl_base_addr;
2010 cmd_descp = hba->ucdl_base_addr;
2011
2012 response_offset =
2013 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2014 prdt_offset =
2015 offsetof(struct utp_transfer_cmd_desc, prd_table);
2016
2017 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2018 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2019
2020 for (i = 0; i < hba->nutrs; i++) {
2021 /* Configure UTRD with command descriptor base address */
2022 cmd_desc_element_addr =
2023 (cmd_desc_dma_addr + (cmd_desc_size * i));
2024 utrdlp[i].command_desc_base_addr_lo =
2025 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2026 utrdlp[i].command_desc_base_addr_hi =
2027 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2028
2029 /* Response upiu and prdt offset should be in double words */
2030 utrdlp[i].response_upiu_offset =
2031 cpu_to_le16((response_offset >> 2));
2032 utrdlp[i].prd_table_offset =
2033 cpu_to_le16((prdt_offset >> 2));
2034 utrdlp[i].response_upiu_length =
2035 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2036
2037 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2038 hba->lrb[i].ucd_req_ptr =
2039 (struct utp_upiu_req *)(cmd_descp + i);
2040 hba->lrb[i].ucd_rsp_ptr =
2041 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2042 hba->lrb[i].ucd_prdt_ptr =
2043 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2044 }
2045 }
2046
2047 /**
2048 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2049 * @hba: per adapter instance
2050 *
2051 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2052 * in order to initialize the Unipro link startup procedure.
2053 * Once the Unipro links are up, the device connected to the controller
2054 * is detected.
2055 *
2056 * Returns 0 on success, non-zero value on failure
2057 */
ufshcd_dme_link_startup(struct ufs_hba * hba)2058 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2059 {
2060 struct uic_command uic_cmd = {0};
2061 int ret;
2062
2063 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2064
2065 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2066 if (ret)
2067 dev_err(hba->dev,
2068 "dme-link-startup: error code %d\n", ret);
2069 return ret;
2070 }
2071
2072 /**
2073 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2074 * @hba: per adapter instance
2075 * @attr_sel: uic command argument1
2076 * @attr_set: attribute set type as uic command argument2
2077 * @mib_val: setting value as uic command argument3
2078 * @peer: indicate whether peer or local
2079 *
2080 * Returns 0 on success, non-zero value on failure
2081 */
ufshcd_dme_set_attr(struct ufs_hba * hba,u32 attr_sel,u8 attr_set,u32 mib_val,u8 peer)2082 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2083 u8 attr_set, u32 mib_val, u8 peer)
2084 {
2085 struct uic_command uic_cmd = {0};
2086 static const char *const action[] = {
2087 "dme-set",
2088 "dme-peer-set"
2089 };
2090 const char *set = action[!!peer];
2091 int ret;
2092
2093 uic_cmd.command = peer ?
2094 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2095 uic_cmd.argument1 = attr_sel;
2096 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2097 uic_cmd.argument3 = mib_val;
2098
2099 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2100 if (ret)
2101 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2102 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2103
2104 return ret;
2105 }
2106 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2107
2108 /**
2109 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2110 * @hba: per adapter instance
2111 * @attr_sel: uic command argument1
2112 * @mib_val: the value of the attribute as returned by the UIC command
2113 * @peer: indicate whether peer or local
2114 *
2115 * Returns 0 on success, non-zero value on failure
2116 */
ufshcd_dme_get_attr(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val,u8 peer)2117 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2118 u32 *mib_val, u8 peer)
2119 {
2120 struct uic_command uic_cmd = {0};
2121 static const char *const action[] = {
2122 "dme-get",
2123 "dme-peer-get"
2124 };
2125 const char *get = action[!!peer];
2126 int ret;
2127
2128 uic_cmd.command = peer ?
2129 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2130 uic_cmd.argument1 = attr_sel;
2131
2132 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2133 if (ret) {
2134 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
2135 get, UIC_GET_ATTR_ID(attr_sel), ret);
2136 goto out;
2137 }
2138
2139 if (mib_val)
2140 *mib_val = uic_cmd.argument3;
2141 out:
2142 return ret;
2143 }
2144 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2145
2146 /**
2147 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2148 * state) and waits for it to take effect.
2149 *
2150 * @hba: per adapter instance
2151 * @cmd: UIC command to execute
2152 *
2153 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2154 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2155 * and device UniPro link and hence it's final completion would be indicated by
2156 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2157 * addition to normal UIC command completion Status (UCCS). This function only
2158 * returns after the relevant status bits indicate the completion.
2159 *
2160 * Returns 0 on success, non-zero value on failure
2161 */
ufshcd_uic_pwr_ctrl(struct ufs_hba * hba,struct uic_command * cmd)2162 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2163 {
2164 struct completion uic_async_done;
2165 unsigned long flags;
2166 u8 status;
2167 int ret;
2168
2169 mutex_lock(&hba->uic_cmd_mutex);
2170 init_completion(&uic_async_done);
2171
2172 spin_lock_irqsave(hba->host->host_lock, flags);
2173 hba->uic_async_done = &uic_async_done;
2174 ret = __ufshcd_send_uic_cmd(hba, cmd);
2175 spin_unlock_irqrestore(hba->host->host_lock, flags);
2176 if (ret) {
2177 dev_err(hba->dev,
2178 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2179 cmd->command, cmd->argument3, ret);
2180 goto out;
2181 }
2182 ret = ufshcd_wait_for_uic_cmd(hba, cmd);
2183 if (ret) {
2184 dev_err(hba->dev,
2185 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2186 cmd->command, cmd->argument3, ret);
2187 goto out;
2188 }
2189
2190 if (!wait_for_completion_timeout(hba->uic_async_done,
2191 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2192 dev_err(hba->dev,
2193 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2194 cmd->command, cmd->argument3);
2195 ret = -ETIMEDOUT;
2196 goto out;
2197 }
2198
2199 status = ufshcd_get_upmcrs(hba);
2200 if (status != PWR_LOCAL) {
2201 dev_err(hba->dev,
2202 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
2203 cmd->command, status);
2204 ret = (status != PWR_OK) ? status : -1;
2205 }
2206 out:
2207 spin_lock_irqsave(hba->host->host_lock, flags);
2208 hba->uic_async_done = NULL;
2209 spin_unlock_irqrestore(hba->host->host_lock, flags);
2210 mutex_unlock(&hba->uic_cmd_mutex);
2211
2212 return ret;
2213 }
2214
2215 /**
2216 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2217 * using DME_SET primitives.
2218 * @hba: per adapter instance
2219 * @mode: powr mode value
2220 *
2221 * Returns 0 on success, non-zero value on failure
2222 */
ufshcd_uic_change_pwr_mode(struct ufs_hba * hba,u8 mode)2223 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2224 {
2225 struct uic_command uic_cmd = {0};
2226 int ret;
2227
2228 uic_cmd.command = UIC_CMD_DME_SET;
2229 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2230 uic_cmd.argument3 = mode;
2231 ufshcd_hold(hba, false);
2232 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2233 ufshcd_release(hba);
2234
2235 return ret;
2236 }
2237
ufshcd_uic_hibern8_enter(struct ufs_hba * hba)2238 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2239 {
2240 struct uic_command uic_cmd = {0};
2241
2242 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2243
2244 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2245 }
2246
ufshcd_uic_hibern8_exit(struct ufs_hba * hba)2247 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2248 {
2249 struct uic_command uic_cmd = {0};
2250 int ret;
2251
2252 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2253 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2254 if (ret) {
2255 ufshcd_set_link_off(hba);
2256 ret = ufshcd_host_reset_and_restore(hba);
2257 }
2258
2259 return ret;
2260 }
2261
2262 /**
2263 * ufshcd_init_pwr_info - setting the POR (power on reset)
2264 * values in hba power info
2265 * @hba: per-adapter instance
2266 */
ufshcd_init_pwr_info(struct ufs_hba * hba)2267 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2268 {
2269 hba->pwr_info.gear_rx = UFS_PWM_G1;
2270 hba->pwr_info.gear_tx = UFS_PWM_G1;
2271 hba->pwr_info.lane_rx = 1;
2272 hba->pwr_info.lane_tx = 1;
2273 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2274 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2275 hba->pwr_info.hs_rate = 0;
2276 }
2277
2278 /**
2279 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2280 * @hba: per-adapter instance
2281 */
ufshcd_get_max_pwr_mode(struct ufs_hba * hba)2282 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2283 {
2284 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2285
2286 if (hba->max_pwr_info.is_valid)
2287 return 0;
2288
2289 pwr_info->pwr_tx = FASTAUTO_MODE;
2290 pwr_info->pwr_rx = FASTAUTO_MODE;
2291 pwr_info->hs_rate = PA_HS_MODE_B;
2292
2293 /* Get the connected lane count */
2294 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2295 &pwr_info->lane_rx);
2296 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2297 &pwr_info->lane_tx);
2298
2299 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2300 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2301 __func__,
2302 pwr_info->lane_rx,
2303 pwr_info->lane_tx);
2304 return -EINVAL;
2305 }
2306
2307 /*
2308 * First, get the maximum gears of HS speed.
2309 * If a zero value, it means there is no HSGEAR capability.
2310 * Then, get the maximum gears of PWM speed.
2311 */
2312 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2313 if (!pwr_info->gear_rx) {
2314 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2315 &pwr_info->gear_rx);
2316 if (!pwr_info->gear_rx) {
2317 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2318 __func__, pwr_info->gear_rx);
2319 return -EINVAL;
2320 }
2321 pwr_info->pwr_rx = SLOWAUTO_MODE;
2322 }
2323
2324 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2325 &pwr_info->gear_tx);
2326 if (!pwr_info->gear_tx) {
2327 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2328 &pwr_info->gear_tx);
2329 if (!pwr_info->gear_tx) {
2330 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2331 __func__, pwr_info->gear_tx);
2332 return -EINVAL;
2333 }
2334 pwr_info->pwr_tx = SLOWAUTO_MODE;
2335 }
2336
2337 hba->max_pwr_info.is_valid = true;
2338 return 0;
2339 }
2340
ufshcd_change_power_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_mode)2341 static int ufshcd_change_power_mode(struct ufs_hba *hba,
2342 struct ufs_pa_layer_attr *pwr_mode)
2343 {
2344 int ret;
2345
2346 /* if already configured to the requested pwr_mode */
2347 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2348 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2349 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2350 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2351 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2352 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2353 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2354 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2355 return 0;
2356 }
2357
2358 /*
2359 * Configure attributes for power mode change with below.
2360 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2361 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2362 * - PA_HSSERIES
2363 */
2364 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2365 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2366 pwr_mode->lane_rx);
2367 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2368 pwr_mode->pwr_rx == FAST_MODE)
2369 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2370 else
2371 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2372
2373 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2374 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2375 pwr_mode->lane_tx);
2376 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2377 pwr_mode->pwr_tx == FAST_MODE)
2378 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2379 else
2380 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2381
2382 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2383 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2384 pwr_mode->pwr_rx == FAST_MODE ||
2385 pwr_mode->pwr_tx == FAST_MODE)
2386 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2387 pwr_mode->hs_rate);
2388
2389 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2390 | pwr_mode->pwr_tx);
2391
2392 if (ret) {
2393 dev_err(hba->dev,
2394 "%s: power mode change failed %d\n", __func__, ret);
2395 } else {
2396 if (hba->vops && hba->vops->pwr_change_notify)
2397 hba->vops->pwr_change_notify(hba,
2398 POST_CHANGE, NULL, pwr_mode);
2399
2400 memcpy(&hba->pwr_info, pwr_mode,
2401 sizeof(struct ufs_pa_layer_attr));
2402 }
2403
2404 return ret;
2405 }
2406
2407 /**
2408 * ufshcd_config_pwr_mode - configure a new power mode
2409 * @hba: per-adapter instance
2410 * @desired_pwr_mode: desired power configuration
2411 */
ufshcd_config_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * desired_pwr_mode)2412 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2413 struct ufs_pa_layer_attr *desired_pwr_mode)
2414 {
2415 struct ufs_pa_layer_attr final_params = { 0 };
2416 int ret;
2417
2418 if (hba->vops && hba->vops->pwr_change_notify)
2419 hba->vops->pwr_change_notify(hba,
2420 PRE_CHANGE, desired_pwr_mode, &final_params);
2421 else
2422 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2423
2424 ret = ufshcd_change_power_mode(hba, &final_params);
2425
2426 return ret;
2427 }
2428
2429 /**
2430 * ufshcd_complete_dev_init() - checks device readiness
2431 * hba: per-adapter instance
2432 *
2433 * Set fDeviceInit flag and poll until device toggles it.
2434 */
ufshcd_complete_dev_init(struct ufs_hba * hba)2435 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2436 {
2437 int i, retries, err = 0;
2438 bool flag_res = 1;
2439
2440 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2441 /* Set the fDeviceInit flag */
2442 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2443 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
2444 if (!err || err == -ETIMEDOUT)
2445 break;
2446 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2447 }
2448 if (err) {
2449 dev_err(hba->dev,
2450 "%s setting fDeviceInit flag failed with error %d\n",
2451 __func__, err);
2452 goto out;
2453 }
2454
2455 /* poll for max. 100 iterations for fDeviceInit flag to clear */
2456 for (i = 0; i < 100 && !err && flag_res; i++) {
2457 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2458 err = ufshcd_query_flag(hba,
2459 UPIU_QUERY_OPCODE_READ_FLAG,
2460 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2461 if (!err || err == -ETIMEDOUT)
2462 break;
2463 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
2464 err);
2465 }
2466 }
2467 if (err)
2468 dev_err(hba->dev,
2469 "%s reading fDeviceInit flag failed with error %d\n",
2470 __func__, err);
2471 else if (flag_res)
2472 dev_err(hba->dev,
2473 "%s fDeviceInit was not cleared by the device\n",
2474 __func__);
2475
2476 out:
2477 return err;
2478 }
2479
2480 /**
2481 * ufshcd_make_hba_operational - Make UFS controller operational
2482 * @hba: per adapter instance
2483 *
2484 * To bring UFS host controller to operational state,
2485 * 1. Enable required interrupts
2486 * 2. Configure interrupt aggregation
2487 * 3. Program UTRL and UTMRL base addres
2488 * 4. Configure run-stop-registers
2489 *
2490 * Returns 0 on success, non-zero value on failure
2491 */
ufshcd_make_hba_operational(struct ufs_hba * hba)2492 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2493 {
2494 int err = 0;
2495 u32 reg;
2496
2497 /* Enable required interrupts */
2498 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2499
2500 /* Configure interrupt aggregation */
2501 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2502
2503 /* Configure UTRL and UTMRL base address registers */
2504 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2505 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2506 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2507 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2508 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2509 REG_UTP_TASK_REQ_LIST_BASE_L);
2510 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2511 REG_UTP_TASK_REQ_LIST_BASE_H);
2512
2513 /*
2514 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
2515 * DEI, HEI bits must be 0
2516 */
2517 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
2518 if (!(ufshcd_get_lists_status(reg))) {
2519 ufshcd_enable_run_stop_reg(hba);
2520 } else {
2521 dev_err(hba->dev,
2522 "Host controller not ready to process requests");
2523 err = -EIO;
2524 goto out;
2525 }
2526
2527 out:
2528 return err;
2529 }
2530
2531 /**
2532 * ufshcd_hba_enable - initialize the controller
2533 * @hba: per adapter instance
2534 *
2535 * The controller resets itself and controller firmware initialization
2536 * sequence kicks off. When controller is ready it will set
2537 * the Host Controller Enable bit to 1.
2538 *
2539 * Returns 0 on success, non-zero value on failure
2540 */
ufshcd_hba_enable(struct ufs_hba * hba)2541 static int ufshcd_hba_enable(struct ufs_hba *hba)
2542 {
2543 int retry;
2544
2545 /*
2546 * msleep of 1 and 5 used in this function might result in msleep(20),
2547 * but it was necessary to send the UFS FPGA to reset mode during
2548 * development and testing of this driver. msleep can be changed to
2549 * mdelay and retry count can be reduced based on the controller.
2550 */
2551 if (!ufshcd_is_hba_active(hba)) {
2552
2553 /* change controller state to "reset state" */
2554 ufshcd_hba_stop(hba);
2555
2556 /*
2557 * This delay is based on the testing done with UFS host
2558 * controller FPGA. The delay can be changed based on the
2559 * host controller used.
2560 */
2561 msleep(5);
2562 }
2563
2564 /* UniPro link is disabled at this point */
2565 ufshcd_set_link_off(hba);
2566
2567 if (hba->vops && hba->vops->hce_enable_notify)
2568 hba->vops->hce_enable_notify(hba, PRE_CHANGE);
2569
2570 /* start controller initialization sequence */
2571 ufshcd_hba_start(hba);
2572
2573 /*
2574 * To initialize a UFS host controller HCE bit must be set to 1.
2575 * During initialization the HCE bit value changes from 1->0->1.
2576 * When the host controller completes initialization sequence
2577 * it sets the value of HCE bit to 1. The same HCE bit is read back
2578 * to check if the controller has completed initialization sequence.
2579 * So without this delay the value HCE = 1, set in the previous
2580 * instruction might be read back.
2581 * This delay can be changed based on the controller.
2582 */
2583 msleep(1);
2584
2585 /* wait for the host controller to complete initialization */
2586 retry = 10;
2587 while (ufshcd_is_hba_active(hba)) {
2588 if (retry) {
2589 retry--;
2590 } else {
2591 dev_err(hba->dev,
2592 "Controller enable failed\n");
2593 return -EIO;
2594 }
2595 msleep(5);
2596 }
2597
2598 /* enable UIC related interrupts */
2599 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
2600
2601 if (hba->vops && hba->vops->hce_enable_notify)
2602 hba->vops->hce_enable_notify(hba, POST_CHANGE);
2603
2604 return 0;
2605 }
2606
2607 /**
2608 * ufshcd_link_startup - Initialize unipro link startup
2609 * @hba: per adapter instance
2610 *
2611 * Returns 0 for success, non-zero in case of failure
2612 */
ufshcd_link_startup(struct ufs_hba * hba)2613 static int ufshcd_link_startup(struct ufs_hba *hba)
2614 {
2615 int ret;
2616 int retries = DME_LINKSTARTUP_RETRIES;
2617
2618 do {
2619 if (hba->vops && hba->vops->link_startup_notify)
2620 hba->vops->link_startup_notify(hba, PRE_CHANGE);
2621
2622 ret = ufshcd_dme_link_startup(hba);
2623
2624 /* check if device is detected by inter-connect layer */
2625 if (!ret && !ufshcd_is_device_present(hba)) {
2626 dev_err(hba->dev, "%s: Device not present\n", __func__);
2627 ret = -ENXIO;
2628 goto out;
2629 }
2630
2631 /*
2632 * DME link lost indication is only received when link is up,
2633 * but we can't be sure if the link is up until link startup
2634 * succeeds. So reset the local Uni-Pro and try again.
2635 */
2636 if (ret && ufshcd_hba_enable(hba))
2637 goto out;
2638 } while (ret && retries--);
2639
2640 if (ret)
2641 /* failed to get the link up... retire */
2642 goto out;
2643
2644 /* Include any host controller configuration via UIC commands */
2645 if (hba->vops && hba->vops->link_startup_notify) {
2646 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
2647 if (ret)
2648 goto out;
2649 }
2650
2651 ret = ufshcd_make_hba_operational(hba);
2652 out:
2653 if (ret)
2654 dev_err(hba->dev, "link startup failed %d\n", ret);
2655 return ret;
2656 }
2657
2658 /**
2659 * ufshcd_verify_dev_init() - Verify device initialization
2660 * @hba: per-adapter instance
2661 *
2662 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
2663 * device Transport Protocol (UTP) layer is ready after a reset.
2664 * If the UTP layer at the device side is not initialized, it may
2665 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
2666 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
2667 */
ufshcd_verify_dev_init(struct ufs_hba * hba)2668 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2669 {
2670 int err = 0;
2671 int retries;
2672
2673 ufshcd_hold(hba, false);
2674 mutex_lock(&hba->dev_cmd.lock);
2675 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
2676 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
2677 NOP_OUT_TIMEOUT);
2678
2679 if (!err || err == -ETIMEDOUT)
2680 break;
2681
2682 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2683 }
2684 mutex_unlock(&hba->dev_cmd.lock);
2685 ufshcd_release(hba);
2686
2687 if (err)
2688 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
2689 return err;
2690 }
2691
2692 /**
2693 * ufshcd_set_queue_depth - set lun queue depth
2694 * @sdev: pointer to SCSI device
2695 *
2696 * Read bLUQueueDepth value and activate scsi tagged command
2697 * queueing. For WLUN, queue depth is set to 1. For best-effort
2698 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
2699 * value that host can queue.
2700 */
ufshcd_set_queue_depth(struct scsi_device * sdev)2701 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2702 {
2703 int ret = 0;
2704 u8 lun_qdepth;
2705 struct ufs_hba *hba;
2706
2707 hba = shost_priv(sdev->host);
2708
2709 lun_qdepth = hba->nutrs;
2710 ret = ufshcd_read_unit_desc_param(hba,
2711 ufshcd_scsi_to_upiu_lun(sdev->lun),
2712 UNIT_DESC_PARAM_LU_Q_DEPTH,
2713 &lun_qdepth,
2714 sizeof(lun_qdepth));
2715
2716 /* Some WLUN doesn't support unit descriptor */
2717 if (ret == -EOPNOTSUPP)
2718 lun_qdepth = 1;
2719 else if (!lun_qdepth)
2720 /* eventually, we can figure out the real queue depth */
2721 lun_qdepth = hba->nutrs;
2722 else
2723 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2724
2725 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2726 __func__, lun_qdepth);
2727 scsi_activate_tcq(sdev, lun_qdepth);
2728 }
2729
2730 /*
2731 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
2732 * @hba: per-adapter instance
2733 * @lun: UFS device lun id
2734 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
2735 *
2736 * Returns 0 in case of success and b_lu_write_protect status would be returned
2737 * @b_lu_write_protect parameter.
2738 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
2739 * Returns -EINVAL in case of invalid parameters passed to this function.
2740 */
ufshcd_get_lu_wp(struct ufs_hba * hba,u8 lun,u8 * b_lu_write_protect)2741 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
2742 u8 lun,
2743 u8 *b_lu_write_protect)
2744 {
2745 int ret;
2746
2747 if (!b_lu_write_protect)
2748 ret = -EINVAL;
2749 /*
2750 * According to UFS device spec, RPMB LU can't be write
2751 * protected so skip reading bLUWriteProtect parameter for
2752 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
2753 */
2754 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
2755 ret = -ENOTSUPP;
2756 else
2757 ret = ufshcd_read_unit_desc_param(hba,
2758 lun,
2759 UNIT_DESC_PARAM_LU_WR_PROTECT,
2760 b_lu_write_protect,
2761 sizeof(*b_lu_write_protect));
2762 return ret;
2763 }
2764
2765 /**
2766 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
2767 * status
2768 * @hba: per-adapter instance
2769 * @sdev: pointer to SCSI device
2770 *
2771 */
ufshcd_get_lu_power_on_wp_status(struct ufs_hba * hba,struct scsi_device * sdev)2772 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
2773 struct scsi_device *sdev)
2774 {
2775 if (hba->dev_info.f_power_on_wp_en &&
2776 !hba->dev_info.is_lu_power_on_wp) {
2777 u8 b_lu_write_protect;
2778
2779 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
2780 &b_lu_write_protect) &&
2781 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
2782 hba->dev_info.is_lu_power_on_wp = true;
2783 }
2784 }
2785
2786 /**
2787 * ufshcd_slave_alloc - handle initial SCSI device configurations
2788 * @sdev: pointer to SCSI device
2789 *
2790 * Returns success
2791 */
ufshcd_slave_alloc(struct scsi_device * sdev)2792 static int ufshcd_slave_alloc(struct scsi_device *sdev)
2793 {
2794 struct ufs_hba *hba;
2795
2796 hba = shost_priv(sdev->host);
2797 sdev->tagged_supported = 1;
2798
2799 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2800 sdev->use_10_for_ms = 1;
2801 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2802
2803 /* allow SCSI layer to restart the device in case of errors */
2804 sdev->allow_restart = 1;
2805
2806 /* REPORT SUPPORTED OPERATION CODES is not supported */
2807 sdev->no_report_opcodes = 1;
2808
2809
2810 ufshcd_set_queue_depth(sdev);
2811
2812 ufshcd_get_lu_power_on_wp_status(hba, sdev);
2813
2814 return 0;
2815 }
2816
2817 /**
2818 * ufshcd_change_queue_depth - change queue depth
2819 * @sdev: pointer to SCSI device
2820 * @depth: required depth to set
2821 * @reason: reason for changing the depth
2822 *
2823 * Change queue depth according to the reason and make sure
2824 * the max. limits are not crossed.
2825 */
ufshcd_change_queue_depth(struct scsi_device * sdev,int depth,int reason)2826 static int ufshcd_change_queue_depth(struct scsi_device *sdev,
2827 int depth, int reason)
2828 {
2829 struct ufs_hba *hba = shost_priv(sdev->host);
2830
2831 if (depth > hba->nutrs)
2832 depth = hba->nutrs;
2833
2834 switch (reason) {
2835 case SCSI_QDEPTH_DEFAULT:
2836 case SCSI_QDEPTH_RAMP_UP:
2837 if (!sdev->tagged_supported)
2838 depth = 1;
2839 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2840 break;
2841 case SCSI_QDEPTH_QFULL:
2842 scsi_track_queue_full(sdev, depth);
2843 break;
2844 default:
2845 return -EOPNOTSUPP;
2846 }
2847
2848 return depth;
2849 }
2850
2851 /**
2852 * ufshcd_slave_configure - adjust SCSI device configurations
2853 * @sdev: pointer to SCSI device
2854 */
ufshcd_slave_configure(struct scsi_device * sdev)2855 static int ufshcd_slave_configure(struct scsi_device *sdev)
2856 {
2857 struct request_queue *q = sdev->request_queue;
2858
2859 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2860 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
2861
2862 return 0;
2863 }
2864
2865 /**
2866 * ufshcd_slave_destroy - remove SCSI device configurations
2867 * @sdev: pointer to SCSI device
2868 */
ufshcd_slave_destroy(struct scsi_device * sdev)2869 static void ufshcd_slave_destroy(struct scsi_device *sdev)
2870 {
2871 struct ufs_hba *hba;
2872
2873 hba = shost_priv(sdev->host);
2874 scsi_deactivate_tcq(sdev, hba->nutrs);
2875 /* Drop the reference as it won't be needed anymore */
2876 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
2877 unsigned long flags;
2878
2879 spin_lock_irqsave(hba->host->host_lock, flags);
2880 hba->sdev_ufs_device = NULL;
2881 spin_unlock_irqrestore(hba->host->host_lock, flags);
2882 }
2883 }
2884
2885 /**
2886 * ufshcd_task_req_compl - handle task management request completion
2887 * @hba: per adapter instance
2888 * @index: index of the completed request
2889 * @resp: task management service response
2890 *
2891 * Returns non-zero value on error, zero on success
2892 */
ufshcd_task_req_compl(struct ufs_hba * hba,u32 index,u8 * resp)2893 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
2894 {
2895 struct utp_task_req_desc *task_req_descp;
2896 struct utp_upiu_task_rsp *task_rsp_upiup;
2897 unsigned long flags;
2898 int ocs_value;
2899 int task_result;
2900
2901 spin_lock_irqsave(hba->host->host_lock, flags);
2902
2903 /* Clear completed tasks from outstanding_tasks */
2904 __clear_bit(index, &hba->outstanding_tasks);
2905
2906 task_req_descp = hba->utmrdl_base_addr;
2907 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
2908
2909 if (ocs_value == OCS_SUCCESS) {
2910 task_rsp_upiup = (struct utp_upiu_task_rsp *)
2911 task_req_descp[index].task_rsp_upiu;
2912 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
2913 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
2914 if (resp)
2915 *resp = (u8)task_result;
2916 } else {
2917 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2918 __func__, ocs_value);
2919 }
2920 spin_unlock_irqrestore(hba->host->host_lock, flags);
2921
2922 return ocs_value;
2923 }
2924
2925 /**
2926 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2927 * @lrb: pointer to local reference block of completed command
2928 * @scsi_status: SCSI command status
2929 *
2930 * Returns value base on SCSI command status
2931 */
2932 static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb * lrbp,int scsi_status)2933 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2934 {
2935 int result = 0;
2936
2937 switch (scsi_status) {
2938 case SAM_STAT_CHECK_CONDITION:
2939 ufshcd_copy_sense_data(lrbp);
2940 case SAM_STAT_GOOD:
2941 result |= DID_OK << 16 |
2942 COMMAND_COMPLETE << 8 |
2943 scsi_status;
2944 break;
2945 case SAM_STAT_TASK_SET_FULL:
2946 case SAM_STAT_BUSY:
2947 case SAM_STAT_TASK_ABORTED:
2948 ufshcd_copy_sense_data(lrbp);
2949 result |= scsi_status;
2950 break;
2951 default:
2952 result |= DID_ERROR << 16;
2953 break;
2954 } /* end of switch */
2955
2956 return result;
2957 }
2958
2959 /**
2960 * ufshcd_transfer_rsp_status - Get overall status of the response
2961 * @hba: per adapter instance
2962 * @lrb: pointer to local reference block of completed command
2963 *
2964 * Returns result of the command to notify SCSI midlayer
2965 */
2966 static inline int
ufshcd_transfer_rsp_status(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2967 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2968 {
2969 int result = 0;
2970 int scsi_status;
2971 int ocs;
2972
2973 /* overall command status of utrd */
2974 ocs = ufshcd_get_tr_ocs(lrbp);
2975
2976 switch (ocs) {
2977 case OCS_SUCCESS:
2978 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2979
2980 switch (result) {
2981 case UPIU_TRANSACTION_RESPONSE:
2982 /*
2983 * get the response UPIU result to extract
2984 * the SCSI command status
2985 */
2986 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2987
2988 /*
2989 * get the result based on SCSI status response
2990 * to notify the SCSI midlayer of the command status
2991 */
2992 scsi_status = result & MASK_SCSI_STATUS;
2993 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
2994
2995 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2996 schedule_work(&hba->eeh_work);
2997 break;
2998 case UPIU_TRANSACTION_REJECT_UPIU:
2999 /* TODO: handle Reject UPIU Response */
3000 result = DID_ERROR << 16;
3001 dev_err(hba->dev,
3002 "Reject UPIU not fully implemented\n");
3003 break;
3004 default:
3005 result = DID_ERROR << 16;
3006 dev_err(hba->dev,
3007 "Unexpected request response code = %x\n",
3008 result);
3009 break;
3010 }
3011 break;
3012 case OCS_ABORTED:
3013 result |= DID_ABORT << 16;
3014 break;
3015 case OCS_INVALID_COMMAND_STATUS:
3016 result |= DID_REQUEUE << 16;
3017 break;
3018 case OCS_INVALID_CMD_TABLE_ATTR:
3019 case OCS_INVALID_PRDT_ATTR:
3020 case OCS_MISMATCH_DATA_BUF_SIZE:
3021 case OCS_MISMATCH_RESP_UPIU_SIZE:
3022 case OCS_PEER_COMM_FAILURE:
3023 case OCS_FATAL_ERROR:
3024 default:
3025 result |= DID_ERROR << 16;
3026 dev_err(hba->dev,
3027 "OCS error from controller = %x\n", ocs);
3028 break;
3029 } /* end of switch */
3030
3031 return result;
3032 }
3033
3034 /**
3035 * ufshcd_uic_cmd_compl - handle completion of uic command
3036 * @hba: per adapter instance
3037 * @intr_status: interrupt status generated by the controller
3038 */
ufshcd_uic_cmd_compl(struct ufs_hba * hba,u32 intr_status)3039 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3040 {
3041 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
3042 hba->active_uic_cmd->argument2 |=
3043 ufshcd_get_uic_cmd_result(hba);
3044 hba->active_uic_cmd->argument3 =
3045 ufshcd_get_dme_attr_val(hba);
3046 complete(&hba->active_uic_cmd->done);
3047 }
3048
3049 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3050 complete(hba->uic_async_done);
3051 }
3052
3053 /**
3054 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3055 * @hba: per adapter instance
3056 */
ufshcd_transfer_req_compl(struct ufs_hba * hba)3057 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3058 {
3059 struct ufshcd_lrb *lrbp;
3060 struct scsi_cmnd *cmd;
3061 unsigned long completed_reqs;
3062 u32 tr_doorbell;
3063 int result;
3064 int index;
3065 struct request *req;
3066
3067 /* Resetting interrupt aggregation counters first and reading the
3068 * DOOR_BELL afterward allows us to handle all the completed requests.
3069 * In order to prevent other interrupts starvation the DB is read once
3070 * after reset. The down side of this solution is the possibility of
3071 * false interrupt if device completes another request after resetting
3072 * aggregation and before reading the DB.
3073 */
3074 ufshcd_reset_intr_aggr(hba);
3075
3076 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3077 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3078
3079 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3080 lrbp = &hba->lrb[index];
3081 cmd = lrbp->cmd;
3082 if (cmd) {
3083 result = ufshcd_transfer_rsp_status(hba, lrbp);
3084 scsi_dma_unmap(cmd);
3085 cmd->result = result;
3086 /* Mark completed command as NULL in LRB */
3087 lrbp->cmd = NULL;
3088 clear_bit_unlock(index, &hba->lrb_in_use);
3089 req = cmd->request;
3090 if (req) {
3091 /* Update IO svc time latency histogram */
3092 if (req->lat_hist_enabled) {
3093 ktime_t completion;
3094 u_int64_t delta_us;
3095
3096 completion = ktime_get();
3097 delta_us = ktime_us_delta(completion,
3098 req->lat_hist_io_start);
3099 blk_update_latency_hist(
3100 (rq_data_dir(req) == READ) ?
3101 &hba->io_lat_read :
3102 &hba->io_lat_write, delta_us);
3103 }
3104 }
3105 /* Do not touch lrbp after scsi done */
3106 cmd->scsi_done(cmd);
3107 __ufshcd_release(hba);
3108 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
3109 if (hba->dev_cmd.complete)
3110 complete(hba->dev_cmd.complete);
3111 }
3112 }
3113
3114 /* clear corresponding bits of completed commands */
3115 hba->outstanding_reqs ^= completed_reqs;
3116
3117 ufshcd_clk_scaling_update_busy(hba);
3118
3119 /* we might have free'd some tags above */
3120 wake_up(&hba->dev_cmd.tag_wq);
3121 }
3122
3123 /**
3124 * ufshcd_disable_ee - disable exception event
3125 * @hba: per-adapter instance
3126 * @mask: exception event to disable
3127 *
3128 * Disables exception event in the device so that the EVENT_ALERT
3129 * bit is not set.
3130 *
3131 * Returns zero on success, non-zero error value on failure.
3132 */
ufshcd_disable_ee(struct ufs_hba * hba,u16 mask)3133 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3134 {
3135 int err = 0;
3136 u32 val;
3137
3138 if (!(hba->ee_ctrl_mask & mask))
3139 goto out;
3140
3141 val = hba->ee_ctrl_mask & ~mask;
3142 val &= 0xFFFF; /* 2 bytes */
3143 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3144 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3145 if (!err)
3146 hba->ee_ctrl_mask &= ~mask;
3147 out:
3148 return err;
3149 }
3150
3151 /**
3152 * ufshcd_enable_ee - enable exception event
3153 * @hba: per-adapter instance
3154 * @mask: exception event to enable
3155 *
3156 * Enable corresponding exception event in the device to allow
3157 * device to alert host in critical scenarios.
3158 *
3159 * Returns zero on success, non-zero error value on failure.
3160 */
ufshcd_enable_ee(struct ufs_hba * hba,u16 mask)3161 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3162 {
3163 int err = 0;
3164 u32 val;
3165
3166 if (hba->ee_ctrl_mask & mask)
3167 goto out;
3168
3169 val = hba->ee_ctrl_mask | mask;
3170 val &= 0xFFFF; /* 2 bytes */
3171 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3172 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3173 if (!err)
3174 hba->ee_ctrl_mask |= mask;
3175 out:
3176 return err;
3177 }
3178
3179 /**
3180 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3181 * @hba: per-adapter instance
3182 *
3183 * Allow device to manage background operations on its own. Enabling
3184 * this might lead to inconsistent latencies during normal data transfers
3185 * as the device is allowed to manage its own way of handling background
3186 * operations.
3187 *
3188 * Returns zero on success, non-zero on failure.
3189 */
ufshcd_enable_auto_bkops(struct ufs_hba * hba)3190 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3191 {
3192 int err = 0;
3193
3194 if (hba->auto_bkops_enabled)
3195 goto out;
3196
3197 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3198 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3199 if (err) {
3200 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3201 __func__, err);
3202 goto out;
3203 }
3204
3205 hba->auto_bkops_enabled = true;
3206
3207 /* No need of URGENT_BKOPS exception from the device */
3208 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3209 if (err)
3210 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3211 __func__, err);
3212 out:
3213 return err;
3214 }
3215
3216 /**
3217 * ufshcd_disable_auto_bkops - block device in doing background operations
3218 * @hba: per-adapter instance
3219 *
3220 * Disabling background operations improves command response latency but
3221 * has drawback of device moving into critical state where the device is
3222 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3223 * host is idle so that BKOPS are managed effectively without any negative
3224 * impacts.
3225 *
3226 * Returns zero on success, non-zero on failure.
3227 */
ufshcd_disable_auto_bkops(struct ufs_hba * hba)3228 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3229 {
3230 int err = 0;
3231
3232 if (!hba->auto_bkops_enabled)
3233 goto out;
3234
3235 /*
3236 * If host assisted BKOPs is to be enabled, make sure
3237 * urgent bkops exception is allowed.
3238 */
3239 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3240 if (err) {
3241 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3242 __func__, err);
3243 goto out;
3244 }
3245
3246 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3247 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3248 if (err) {
3249 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3250 __func__, err);
3251 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3252 goto out;
3253 }
3254
3255 hba->auto_bkops_enabled = false;
3256 out:
3257 return err;
3258 }
3259
3260 /**
3261 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
3262 * @hba: per adapter instance
3263 *
3264 * After a device reset the device may toggle the BKOPS_EN flag
3265 * to default value. The s/w tracking variables should be updated
3266 * as well. Do this by forcing enable of auto bkops.
3267 */
ufshcd_force_reset_auto_bkops(struct ufs_hba * hba)3268 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3269 {
3270 hba->auto_bkops_enabled = false;
3271 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3272 ufshcd_enable_auto_bkops(hba);
3273 }
3274
ufshcd_get_bkops_status(struct ufs_hba * hba,u32 * status)3275 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3276 {
3277 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3278 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3279 }
3280
3281 /**
3282 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3283 * @hba: per-adapter instance
3284 * @status: bkops_status value
3285 *
3286 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3287 * flag in the device to permit background operations if the device
3288 * bkops_status is greater than or equal to "status" argument passed to
3289 * this function, disable otherwise.
3290 *
3291 * Returns 0 for success, non-zero in case of failure.
3292 *
3293 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3294 * to know whether auto bkops is enabled or disabled after this function
3295 * returns control to it.
3296 */
ufshcd_bkops_ctrl(struct ufs_hba * hba,enum bkops_status status)3297 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3298 enum bkops_status status)
3299 {
3300 int err;
3301 u32 curr_status = 0;
3302
3303 err = ufshcd_get_bkops_status(hba, &curr_status);
3304 if (err) {
3305 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3306 __func__, err);
3307 goto out;
3308 } else if (curr_status > BKOPS_STATUS_MAX) {
3309 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3310 __func__, curr_status);
3311 err = -EINVAL;
3312 goto out;
3313 }
3314
3315 if (curr_status >= status)
3316 err = ufshcd_enable_auto_bkops(hba);
3317 else
3318 err = ufshcd_disable_auto_bkops(hba);
3319 out:
3320 return err;
3321 }
3322
3323 /**
3324 * ufshcd_urgent_bkops - handle urgent bkops exception event
3325 * @hba: per-adapter instance
3326 *
3327 * Enable fBackgroundOpsEn flag in the device to permit background
3328 * operations.
3329 *
3330 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3331 * and negative error value for any other failure.
3332 */
ufshcd_urgent_bkops(struct ufs_hba * hba)3333 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3334 {
3335 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
3336 }
3337
ufshcd_get_ee_status(struct ufs_hba * hba,u32 * status)3338 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3339 {
3340 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3341 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3342 }
3343
3344 /**
3345 * ufshcd_exception_event_handler - handle exceptions raised by device
3346 * @work: pointer to work data
3347 *
3348 * Read bExceptionEventStatus attribute from the device and handle the
3349 * exception event accordingly.
3350 */
ufshcd_exception_event_handler(struct work_struct * work)3351 static void ufshcd_exception_event_handler(struct work_struct *work)
3352 {
3353 struct ufs_hba *hba;
3354 int err;
3355 u32 status = 0;
3356 hba = container_of(work, struct ufs_hba, eeh_work);
3357
3358 pm_runtime_get_sync(hba->dev);
3359 err = ufshcd_get_ee_status(hba, &status);
3360 if (err) {
3361 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3362 __func__, err);
3363 goto out;
3364 }
3365
3366 status &= hba->ee_ctrl_mask;
3367 if (status & MASK_EE_URGENT_BKOPS) {
3368 err = ufshcd_urgent_bkops(hba);
3369 if (err < 0)
3370 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3371 __func__, err);
3372 }
3373 out:
3374 pm_runtime_put_sync(hba->dev);
3375 return;
3376 }
3377
3378 /**
3379 * ufshcd_err_handler - handle UFS errors that require s/w attention
3380 * @work: pointer to work structure
3381 */
ufshcd_err_handler(struct work_struct * work)3382 static void ufshcd_err_handler(struct work_struct *work)
3383 {
3384 struct ufs_hba *hba;
3385 unsigned long flags;
3386 u32 err_xfer = 0;
3387 u32 err_tm = 0;
3388 int err = 0;
3389 int tag;
3390
3391 hba = container_of(work, struct ufs_hba, eh_work);
3392
3393 pm_runtime_get_sync(hba->dev);
3394 ufshcd_hold(hba, false);
3395
3396 spin_lock_irqsave(hba->host->host_lock, flags);
3397 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
3398 spin_unlock_irqrestore(hba->host->host_lock, flags);
3399 goto out;
3400 }
3401
3402 hba->ufshcd_state = UFSHCD_STATE_RESET;
3403 ufshcd_set_eh_in_progress(hba);
3404
3405 /* Complete requests that have door-bell cleared by h/w */
3406 ufshcd_transfer_req_compl(hba);
3407 ufshcd_tmc_handler(hba);
3408 spin_unlock_irqrestore(hba->host->host_lock, flags);
3409
3410 /* Clear pending transfer requests */
3411 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
3412 if (ufshcd_clear_cmd(hba, tag))
3413 err_xfer |= 1 << tag;
3414
3415 /* Clear pending task management requests */
3416 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
3417 if (ufshcd_clear_tm_cmd(hba, tag))
3418 err_tm |= 1 << tag;
3419
3420 /* Complete the requests that are cleared by s/w */
3421 spin_lock_irqsave(hba->host->host_lock, flags);
3422 ufshcd_transfer_req_compl(hba);
3423 ufshcd_tmc_handler(hba);
3424 spin_unlock_irqrestore(hba->host->host_lock, flags);
3425
3426 /* Fatal errors need reset */
3427 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
3428 ((hba->saved_err & UIC_ERROR) &&
3429 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
3430 err = ufshcd_reset_and_restore(hba);
3431 if (err) {
3432 dev_err(hba->dev, "%s: reset and restore failed\n",
3433 __func__);
3434 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3435 }
3436 /*
3437 * Inform scsi mid-layer that we did reset and allow to handle
3438 * Unit Attention properly.
3439 */
3440 scsi_report_bus_reset(hba->host, 0);
3441 hba->saved_err = 0;
3442 hba->saved_uic_err = 0;
3443 }
3444 ufshcd_clear_eh_in_progress(hba);
3445
3446 out:
3447 scsi_unblock_requests(hba->host);
3448 ufshcd_release(hba);
3449 pm_runtime_put_sync(hba->dev);
3450 }
3451
3452 /**
3453 * ufshcd_update_uic_error - check and set fatal UIC error flags.
3454 * @hba: per-adapter instance
3455 */
ufshcd_update_uic_error(struct ufs_hba * hba)3456 static void ufshcd_update_uic_error(struct ufs_hba *hba)
3457 {
3458 u32 reg;
3459
3460 /* PA_INIT_ERROR is fatal and needs UIC reset */
3461 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
3462 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
3463 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
3464
3465 /* UIC NL/TL/DME errors needs software retry */
3466 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
3467 if (reg)
3468 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
3469
3470 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
3471 if (reg)
3472 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
3473
3474 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
3475 if (reg)
3476 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
3477
3478 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
3479 __func__, hba->uic_error);
3480 }
3481
3482 /**
3483 * ufshcd_check_errors - Check for errors that need s/w attention
3484 * @hba: per-adapter instance
3485 */
ufshcd_check_errors(struct ufs_hba * hba)3486 static void ufshcd_check_errors(struct ufs_hba *hba)
3487 {
3488 bool queue_eh_work = false;
3489
3490 if (hba->errors & INT_FATAL_ERRORS)
3491 queue_eh_work = true;
3492
3493 if (hba->errors & UIC_ERROR) {
3494 hba->uic_error = 0;
3495 ufshcd_update_uic_error(hba);
3496 if (hba->uic_error)
3497 queue_eh_work = true;
3498 }
3499
3500 if (queue_eh_work) {
3501 /* handle fatal errors only when link is functional */
3502 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
3503 /* block commands from scsi mid-layer */
3504 scsi_block_requests(hba->host);
3505
3506 /* transfer error masks to sticky bits */
3507 hba->saved_err |= hba->errors;
3508 hba->saved_uic_err |= hba->uic_error;
3509
3510 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3511 schedule_work(&hba->eh_work);
3512 }
3513 }
3514 /*
3515 * if (!queue_eh_work) -
3516 * Other errors are either non-fatal where host recovers
3517 * itself without s/w intervention or errors that will be
3518 * handled by the SCSI core layer.
3519 */
3520 }
3521
3522 /**
3523 * ufshcd_tmc_handler - handle task management function completion
3524 * @hba: per adapter instance
3525 */
ufshcd_tmc_handler(struct ufs_hba * hba)3526 static void ufshcd_tmc_handler(struct ufs_hba *hba)
3527 {
3528 u32 tm_doorbell;
3529
3530 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
3531 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
3532 wake_up(&hba->tm_wq);
3533 }
3534
3535 /**
3536 * ufshcd_sl_intr - Interrupt service routine
3537 * @hba: per adapter instance
3538 * @intr_status: contains interrupts generated by the controller
3539 */
ufshcd_sl_intr(struct ufs_hba * hba,u32 intr_status)3540 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3541 {
3542 hba->errors = UFSHCD_ERROR_MASK & intr_status;
3543 if (hba->errors)
3544 ufshcd_check_errors(hba);
3545
3546 if (intr_status & UFSHCD_UIC_MASK)
3547 ufshcd_uic_cmd_compl(hba, intr_status);
3548
3549 if (intr_status & UTP_TASK_REQ_COMPL)
3550 ufshcd_tmc_handler(hba);
3551
3552 if (intr_status & UTP_TRANSFER_REQ_COMPL)
3553 ufshcd_transfer_req_compl(hba);
3554 }
3555
3556 /**
3557 * ufshcd_intr - Main interrupt service routine
3558 * @irq: irq number
3559 * @__hba: pointer to adapter instance
3560 *
3561 * Returns IRQ_HANDLED - If interrupt is valid
3562 * IRQ_NONE - If invalid interrupt
3563 */
ufshcd_intr(int irq,void * __hba)3564 static irqreturn_t ufshcd_intr(int irq, void *__hba)
3565 {
3566 u32 intr_status;
3567 irqreturn_t retval = IRQ_NONE;
3568 struct ufs_hba *hba = __hba;
3569
3570 spin_lock(hba->host->host_lock);
3571 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3572
3573 if (intr_status) {
3574 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
3575 ufshcd_sl_intr(hba, intr_status);
3576 retval = IRQ_HANDLED;
3577 }
3578 spin_unlock(hba->host->host_lock);
3579 return retval;
3580 }
3581
ufshcd_clear_tm_cmd(struct ufs_hba * hba,int tag)3582 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
3583 {
3584 int err = 0;
3585 u32 mask = 1 << tag;
3586 unsigned long flags;
3587
3588 if (!test_bit(tag, &hba->outstanding_tasks))
3589 goto out;
3590
3591 spin_lock_irqsave(hba->host->host_lock, flags);
3592 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
3593 spin_unlock_irqrestore(hba->host->host_lock, flags);
3594
3595 /* poll for max. 1 sec to clear door bell register by h/w */
3596 err = ufshcd_wait_for_register(hba,
3597 REG_UTP_TASK_REQ_DOOR_BELL,
3598 mask, 0, 1000, 1000);
3599 out:
3600 return err;
3601 }
3602
3603 /**
3604 * ufshcd_issue_tm_cmd - issues task management commands to controller
3605 * @hba: per adapter instance
3606 * @lun_id: LUN ID to which TM command is sent
3607 * @task_id: task ID to which the TM command is applicable
3608 * @tm_function: task management function opcode
3609 * @tm_response: task management service response return value
3610 *
3611 * Returns non-zero value on error, zero on success.
3612 */
ufshcd_issue_tm_cmd(struct ufs_hba * hba,int lun_id,int task_id,u8 tm_function,u8 * tm_response)3613 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3614 u8 tm_function, u8 *tm_response)
3615 {
3616 struct utp_task_req_desc *task_req_descp;
3617 struct utp_upiu_task_req *task_req_upiup;
3618 struct Scsi_Host *host;
3619 unsigned long flags;
3620 int free_slot;
3621 int err;
3622 int task_tag;
3623
3624 host = hba->host;
3625
3626 /*
3627 * Get free slot, sleep if slots are unavailable.
3628 * Even though we use wait_event() which sleeps indefinitely,
3629 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
3630 */
3631 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
3632 ufshcd_hold(hba, false);
3633
3634 spin_lock_irqsave(host->host_lock, flags);
3635 task_req_descp = hba->utmrdl_base_addr;
3636 task_req_descp += free_slot;
3637
3638 /* Configure task request descriptor */
3639 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
3640 task_req_descp->header.dword_2 =
3641 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
3642
3643 /* Configure task request UPIU */
3644 task_req_upiup =
3645 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
3646 task_tag = hba->nutrs + free_slot;
3647 task_req_upiup->header.dword_0 =
3648 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
3649 lun_id, task_tag);
3650 task_req_upiup->header.dword_1 =
3651 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
3652 /*
3653 * The host shall provide the same value for LUN field in the basic
3654 * header and for Input Parameter.
3655 */
3656 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
3657 task_req_upiup->input_param2 = cpu_to_be32(task_id);
3658
3659 /* send command to the controller */
3660 __set_bit(free_slot, &hba->outstanding_tasks);
3661 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
3662
3663 spin_unlock_irqrestore(host->host_lock, flags);
3664
3665 /* wait until the task management command is completed */
3666 err = wait_event_timeout(hba->tm_wq,
3667 test_bit(free_slot, &hba->tm_condition),
3668 msecs_to_jiffies(TM_CMD_TIMEOUT));
3669 if (!err) {
3670 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
3671 __func__, tm_function);
3672 if (ufshcd_clear_tm_cmd(hba, free_slot))
3673 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
3674 __func__, free_slot);
3675 err = -ETIMEDOUT;
3676 } else {
3677 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
3678 }
3679
3680 clear_bit(free_slot, &hba->tm_condition);
3681 ufshcd_put_tm_slot(hba, free_slot);
3682 wake_up(&hba->tm_tag_wq);
3683
3684 ufshcd_release(hba);
3685 return err;
3686 }
3687
3688 /**
3689 * ufshcd_eh_device_reset_handler - device reset handler registered to
3690 * scsi layer.
3691 * @cmd: SCSI command pointer
3692 *
3693 * Returns SUCCESS/FAILED
3694 */
ufshcd_eh_device_reset_handler(struct scsi_cmnd * cmd)3695 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
3696 {
3697 struct Scsi_Host *host;
3698 struct ufs_hba *hba;
3699 unsigned int tag;
3700 u32 pos;
3701 int err;
3702 u8 resp = 0xF;
3703 struct ufshcd_lrb *lrbp;
3704 unsigned long flags;
3705
3706 host = cmd->device->host;
3707 hba = shost_priv(host);
3708 tag = cmd->request->tag;
3709
3710 lrbp = &hba->lrb[tag];
3711 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
3712 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3713 if (!err)
3714 err = resp;
3715 goto out;
3716 }
3717
3718 /* clear the commands that were pending for corresponding LUN */
3719 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
3720 if (hba->lrb[pos].lun == lrbp->lun) {
3721 err = ufshcd_clear_cmd(hba, pos);
3722 if (err)
3723 break;
3724 }
3725 }
3726 spin_lock_irqsave(host->host_lock, flags);
3727 ufshcd_transfer_req_compl(hba);
3728 spin_unlock_irqrestore(host->host_lock, flags);
3729 out:
3730 if (!err) {
3731 err = SUCCESS;
3732 } else {
3733 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3734 err = FAILED;
3735 }
3736 return err;
3737 }
3738
3739 /**
3740 * ufshcd_abort - abort a specific command
3741 * @cmd: SCSI command pointer
3742 *
3743 * Abort the pending command in device by sending UFS_ABORT_TASK task management
3744 * command, and in host controller by clearing the door-bell register. There can
3745 * be race between controller sending the command to the device while abort is
3746 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
3747 * really issued and then try to abort it.
3748 *
3749 * Returns SUCCESS/FAILED
3750 */
ufshcd_abort(struct scsi_cmnd * cmd)3751 static int ufshcd_abort(struct scsi_cmnd *cmd)
3752 {
3753 struct Scsi_Host *host;
3754 struct ufs_hba *hba;
3755 unsigned long flags;
3756 unsigned int tag;
3757 int err = 0;
3758 int poll_cnt;
3759 u8 resp = 0xF;
3760 struct ufshcd_lrb *lrbp;
3761 u32 reg;
3762
3763 host = cmd->device->host;
3764 hba = shost_priv(host);
3765 tag = cmd->request->tag;
3766
3767 ufshcd_hold(hba, false);
3768 /* If command is already aborted/completed, return SUCCESS */
3769 if (!(test_bit(tag, &hba->outstanding_reqs)))
3770 goto out;
3771
3772 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3773 if (!(reg & (1 << tag))) {
3774 dev_err(hba->dev,
3775 "%s: cmd was completed, but without a notifying intr, tag = %d",
3776 __func__, tag);
3777 }
3778
3779 lrbp = &hba->lrb[tag];
3780 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
3781 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3782 UFS_QUERY_TASK, &resp);
3783 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
3784 /* cmd pending in the device */
3785 break;
3786 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3787 /*
3788 * cmd not pending in the device, check if it is
3789 * in transition.
3790 */
3791 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3792 if (reg & (1 << tag)) {
3793 /* sleep for max. 200us to stabilize */
3794 usleep_range(100, 200);
3795 continue;
3796 }
3797 /* command completed already */
3798 goto out;
3799 } else {
3800 if (!err)
3801 err = resp; /* service response error */
3802 goto out;
3803 }
3804 }
3805
3806 if (!poll_cnt) {
3807 err = -EBUSY;
3808 goto out;
3809 }
3810
3811 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3812 UFS_ABORT_TASK, &resp);
3813 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3814 if (!err)
3815 err = resp; /* service response error */
3816 goto out;
3817 }
3818
3819 err = ufshcd_clear_cmd(hba, tag);
3820 if (err)
3821 goto out;
3822
3823 scsi_dma_unmap(cmd);
3824
3825 spin_lock_irqsave(host->host_lock, flags);
3826 __clear_bit(tag, &hba->outstanding_reqs);
3827 hba->lrb[tag].cmd = NULL;
3828 spin_unlock_irqrestore(host->host_lock, flags);
3829
3830 clear_bit_unlock(tag, &hba->lrb_in_use);
3831 wake_up(&hba->dev_cmd.tag_wq);
3832
3833 out:
3834 if (!err) {
3835 err = SUCCESS;
3836 } else {
3837 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3838 err = FAILED;
3839 }
3840
3841 /*
3842 * This ufshcd_release() corresponds to the original scsi cmd that got
3843 * aborted here (as we won't get any IRQ for it).
3844 */
3845 ufshcd_release(hba);
3846 return err;
3847 }
3848
3849 /**
3850 * ufshcd_host_reset_and_restore - reset and restore host controller
3851 * @hba: per-adapter instance
3852 *
3853 * Note that host controller reset may issue DME_RESET to
3854 * local and remote (device) Uni-Pro stack and the attributes
3855 * are reset to default state.
3856 *
3857 * Returns zero on success, non-zero on failure
3858 */
ufshcd_host_reset_and_restore(struct ufs_hba * hba)3859 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
3860 {
3861 int err;
3862 unsigned long flags;
3863
3864 /* Reset the host controller */
3865 spin_lock_irqsave(hba->host->host_lock, flags);
3866 ufshcd_hba_stop(hba);
3867 spin_unlock_irqrestore(hba->host->host_lock, flags);
3868
3869 err = ufshcd_hba_enable(hba);
3870 if (err)
3871 goto out;
3872
3873 /* Establish the link again and restore the device */
3874 err = ufshcd_probe_hba(hba);
3875
3876 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3877 err = -EIO;
3878 out:
3879 if (err)
3880 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
3881
3882 return err;
3883 }
3884
3885 /**
3886 * ufshcd_reset_and_restore - reset and re-initialize host/device
3887 * @hba: per-adapter instance
3888 *
3889 * Reset and recover device, host and re-establish link. This
3890 * is helpful to recover the communication in fatal error conditions.
3891 *
3892 * Returns zero on success, non-zero on failure
3893 */
ufshcd_reset_and_restore(struct ufs_hba * hba)3894 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3895 {
3896 int err = 0;
3897 unsigned long flags;
3898 int retries = MAX_HOST_RESET_RETRIES;
3899
3900 do {
3901 err = ufshcd_host_reset_and_restore(hba);
3902 } while (err && --retries);
3903
3904 /*
3905 * After reset the door-bell might be cleared, complete
3906 * outstanding requests in s/w here.
3907 */
3908 spin_lock_irqsave(hba->host->host_lock, flags);
3909 ufshcd_transfer_req_compl(hba);
3910 ufshcd_tmc_handler(hba);
3911 spin_unlock_irqrestore(hba->host->host_lock, flags);
3912
3913 return err;
3914 }
3915
3916 /**
3917 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3918 * @cmd - SCSI command pointer
3919 *
3920 * Returns SUCCESS/FAILED
3921 */
ufshcd_eh_host_reset_handler(struct scsi_cmnd * cmd)3922 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3923 {
3924 int err;
3925 unsigned long flags;
3926 struct ufs_hba *hba;
3927
3928 hba = shost_priv(cmd->device->host);
3929
3930 ufshcd_hold(hba, false);
3931 /*
3932 * Check if there is any race with fatal error handling.
3933 * If so, wait for it to complete. Even though fatal error
3934 * handling does reset and restore in some cases, don't assume
3935 * anything out of it. We are just avoiding race here.
3936 */
3937 do {
3938 spin_lock_irqsave(hba->host->host_lock, flags);
3939 if (!(work_pending(&hba->eh_work) ||
3940 hba->ufshcd_state == UFSHCD_STATE_RESET))
3941 break;
3942 spin_unlock_irqrestore(hba->host->host_lock, flags);
3943 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
3944 flush_work(&hba->eh_work);
3945 } while (1);
3946
3947 hba->ufshcd_state = UFSHCD_STATE_RESET;
3948 ufshcd_set_eh_in_progress(hba);
3949 spin_unlock_irqrestore(hba->host->host_lock, flags);
3950
3951 err = ufshcd_reset_and_restore(hba);
3952
3953 spin_lock_irqsave(hba->host->host_lock, flags);
3954 if (!err) {
3955 err = SUCCESS;
3956 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3957 } else {
3958 err = FAILED;
3959 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3960 }
3961 ufshcd_clear_eh_in_progress(hba);
3962 spin_unlock_irqrestore(hba->host->host_lock, flags);
3963
3964 ufshcd_release(hba);
3965 return err;
3966 }
3967
3968 /**
3969 * ufshcd_get_max_icc_level - calculate the ICC level
3970 * @sup_curr_uA: max. current supported by the regulator
3971 * @start_scan: row at the desc table to start scan from
3972 * @buff: power descriptor buffer
3973 *
3974 * Returns calculated max ICC level for specific regulator
3975 */
ufshcd_get_max_icc_level(int sup_curr_uA,u32 start_scan,char * buff)3976 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
3977 {
3978 int i;
3979 int curr_uA;
3980 u16 data;
3981 u16 unit;
3982
3983 for (i = start_scan; i >= 0; i--) {
3984 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
3985 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
3986 ATTR_ICC_LVL_UNIT_OFFSET;
3987 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
3988 switch (unit) {
3989 case UFSHCD_NANO_AMP:
3990 curr_uA = curr_uA / 1000;
3991 break;
3992 case UFSHCD_MILI_AMP:
3993 curr_uA = curr_uA * 1000;
3994 break;
3995 case UFSHCD_AMP:
3996 curr_uA = curr_uA * 1000 * 1000;
3997 break;
3998 case UFSHCD_MICRO_AMP:
3999 default:
4000 break;
4001 }
4002 if (sup_curr_uA >= curr_uA)
4003 break;
4004 }
4005 if (i < 0) {
4006 i = 0;
4007 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4008 }
4009
4010 return (u32)i;
4011 }
4012
4013 /**
4014 * ufshcd_calc_icc_level - calculate the max ICC level
4015 * In case regulators are not initialized we'll return 0
4016 * @hba: per-adapter instance
4017 * @desc_buf: power descriptor buffer to extract ICC levels from.
4018 * @len: length of desc_buff
4019 *
4020 * Returns calculated ICC level
4021 */
ufshcd_find_max_sup_active_icc_level(struct ufs_hba * hba,u8 * desc_buf,int len)4022 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4023 u8 *desc_buf, int len)
4024 {
4025 u32 icc_level = 0;
4026
4027 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4028 !hba->vreg_info.vccq2) {
4029 dev_err(hba->dev,
4030 "%s: Regulator capability was not set, actvIccLevel=%d",
4031 __func__, icc_level);
4032 goto out;
4033 }
4034
4035 if (hba->vreg_info.vcc)
4036 icc_level = ufshcd_get_max_icc_level(
4037 hba->vreg_info.vcc->max_uA,
4038 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4039 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4040
4041 if (hba->vreg_info.vccq)
4042 icc_level = ufshcd_get_max_icc_level(
4043 hba->vreg_info.vccq->max_uA,
4044 icc_level,
4045 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4046
4047 if (hba->vreg_info.vccq2)
4048 icc_level = ufshcd_get_max_icc_level(
4049 hba->vreg_info.vccq2->max_uA,
4050 icc_level,
4051 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4052 out:
4053 return icc_level;
4054 }
4055
ufshcd_init_icc_levels(struct ufs_hba * hba)4056 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4057 {
4058 int ret;
4059 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4060 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4061
4062 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4063 if (ret) {
4064 dev_err(hba->dev,
4065 "%s: Failed reading power descriptor.len = %d ret = %d",
4066 __func__, buff_len, ret);
4067 return;
4068 }
4069
4070 hba->init_prefetch_data.icc_level =
4071 ufshcd_find_max_sup_active_icc_level(hba,
4072 desc_buf, buff_len);
4073 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4074 __func__, hba->init_prefetch_data.icc_level);
4075
4076 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4077 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4078 &hba->init_prefetch_data.icc_level);
4079
4080 if (ret)
4081 dev_err(hba->dev,
4082 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4083 __func__, hba->init_prefetch_data.icc_level , ret);
4084
4085 }
4086
4087 /**
4088 * ufshcd_scsi_add_wlus - Adds required W-LUs
4089 * @hba: per-adapter instance
4090 *
4091 * UFS device specification requires the UFS devices to support 4 well known
4092 * logical units:
4093 * "REPORT_LUNS" (address: 01h)
4094 * "UFS Device" (address: 50h)
4095 * "RPMB" (address: 44h)
4096 * "BOOT" (address: 30h)
4097 * UFS device's power management needs to be controlled by "POWER CONDITION"
4098 * field of SSU (START STOP UNIT) command. But this "power condition" field
4099 * will take effect only when its sent to "UFS device" well known logical unit
4100 * hence we require the scsi_device instance to represent this logical unit in
4101 * order for the UFS host driver to send the SSU command for power management.
4102
4103 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4104 * Block) LU so user space process can control this LU. User space may also
4105 * want to have access to BOOT LU.
4106
4107 * This function adds scsi device instances for each of all well known LUs
4108 * (except "REPORT LUNS" LU).
4109 *
4110 * Returns zero on success (all required W-LUs are added successfully),
4111 * non-zero error value on failure (if failed to add any of the required W-LU).
4112 */
ufshcd_scsi_add_wlus(struct ufs_hba * hba)4113 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4114 {
4115 int ret = 0;
4116 struct scsi_device *sdev_rpmb;
4117 struct scsi_device *sdev_boot;
4118
4119 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4120 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4121 if (IS_ERR(hba->sdev_ufs_device)) {
4122 ret = PTR_ERR(hba->sdev_ufs_device);
4123 hba->sdev_ufs_device = NULL;
4124 goto out;
4125 }
4126 scsi_device_put(hba->sdev_ufs_device);
4127
4128 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4129 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4130 if (IS_ERR(sdev_boot)) {
4131 ret = PTR_ERR(sdev_boot);
4132 goto remove_sdev_ufs_device;
4133 }
4134 scsi_device_put(sdev_boot);
4135
4136 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4137 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4138 if (IS_ERR(sdev_rpmb)) {
4139 ret = PTR_ERR(sdev_rpmb);
4140 goto remove_sdev_boot;
4141 }
4142 scsi_device_put(sdev_rpmb);
4143 goto out;
4144
4145 remove_sdev_boot:
4146 scsi_remove_device(sdev_boot);
4147 remove_sdev_ufs_device:
4148 scsi_remove_device(hba->sdev_ufs_device);
4149 out:
4150 return ret;
4151 }
4152
4153 /**
4154 * ufshcd_probe_hba - probe hba to detect device and initialize
4155 * @hba: per-adapter instance
4156 *
4157 * Execute link-startup and verify device initialization
4158 */
ufshcd_probe_hba(struct ufs_hba * hba)4159 static int ufshcd_probe_hba(struct ufs_hba *hba)
4160 {
4161 int ret;
4162
4163 ret = ufshcd_link_startup(hba);
4164 if (ret)
4165 goto out;
4166
4167 ufshcd_init_pwr_info(hba);
4168
4169 /* UniPro link is active now */
4170 ufshcd_set_link_active(hba);
4171
4172 ret = ufshcd_verify_dev_init(hba);
4173 if (ret)
4174 goto out;
4175
4176 ret = ufshcd_complete_dev_init(hba);
4177 if (ret)
4178 goto out;
4179
4180 /* UFS device is also active now */
4181 ufshcd_set_ufs_dev_active(hba);
4182 ufshcd_force_reset_auto_bkops(hba);
4183 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4184 hba->wlun_dev_clr_ua = true;
4185
4186 if (ufshcd_get_max_pwr_mode(hba)) {
4187 dev_err(hba->dev,
4188 "%s: Failed getting max supported power mode\n",
4189 __func__);
4190 } else {
4191 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
4192 if (ret)
4193 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
4194 __func__, ret);
4195 }
4196
4197 /*
4198 * If we are in error handling context or in power management callbacks
4199 * context, no need to scan the host
4200 */
4201 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4202 bool flag;
4203
4204 /* clear any previous UFS device information */
4205 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
4206 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4207 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
4208 hba->dev_info.f_power_on_wp_en = flag;
4209
4210 if (!hba->is_init_prefetch)
4211 ufshcd_init_icc_levels(hba);
4212
4213 /* Add required well known logical units to scsi mid layer */
4214 if (ufshcd_scsi_add_wlus(hba))
4215 goto out;
4216
4217 scsi_scan_host(hba->host);
4218 pm_runtime_put_sync(hba->dev);
4219 }
4220
4221 if (!hba->is_init_prefetch)
4222 hba->is_init_prefetch = true;
4223
4224 /* Resume devfreq after UFS device is detected */
4225 if (ufshcd_is_clkscaling_enabled(hba))
4226 devfreq_resume_device(hba->devfreq);
4227
4228 out:
4229 /*
4230 * If we failed to initialize the device or the device is not
4231 * present, turn off the power/clocks etc.
4232 */
4233 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4234 pm_runtime_put_sync(hba->dev);
4235 ufshcd_hba_exit(hba);
4236 }
4237
4238 return ret;
4239 }
4240
4241 /**
4242 * ufshcd_async_scan - asynchronous execution for probing hba
4243 * @data: data pointer to pass to this function
4244 * @cookie: cookie data
4245 */
ufshcd_async_scan(void * data,async_cookie_t cookie)4246 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
4247 {
4248 struct ufs_hba *hba = (struct ufs_hba *)data;
4249
4250 ufshcd_probe_hba(hba);
4251 }
4252
4253 static struct scsi_host_template ufshcd_driver_template = {
4254 .module = THIS_MODULE,
4255 .name = UFSHCD,
4256 .proc_name = UFSHCD,
4257 .queuecommand = ufshcd_queuecommand,
4258 .slave_alloc = ufshcd_slave_alloc,
4259 .slave_configure = ufshcd_slave_configure,
4260 .slave_destroy = ufshcd_slave_destroy,
4261 .change_queue_depth = ufshcd_change_queue_depth,
4262 .eh_abort_handler = ufshcd_abort,
4263 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
4264 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
4265 .this_id = -1,
4266 .sg_tablesize = SG_ALL,
4267 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
4268 .can_queue = UFSHCD_CAN_QUEUE,
4269 .max_host_blocked = 1,
4270 };
4271
ufshcd_config_vreg_load(struct device * dev,struct ufs_vreg * vreg,int ua)4272 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4273 int ua)
4274 {
4275 int ret = 0;
4276 struct regulator *reg = vreg->reg;
4277 const char *name = vreg->name;
4278
4279 BUG_ON(!vreg);
4280
4281 ret = regulator_set_optimum_mode(reg, ua);
4282 if (ret >= 0) {
4283 /*
4284 * regulator_set_optimum_mode() returns new regulator
4285 * mode upon success.
4286 */
4287 ret = 0;
4288 } else {
4289 dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
4290 __func__, name, ua, ret);
4291 }
4292
4293 return ret;
4294 }
4295
ufshcd_config_vreg_lpm(struct ufs_hba * hba,struct ufs_vreg * vreg)4296 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4297 struct ufs_vreg *vreg)
4298 {
4299 if (!vreg)
4300 return 0;
4301
4302 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4303 }
4304
ufshcd_config_vreg_hpm(struct ufs_hba * hba,struct ufs_vreg * vreg)4305 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4306 struct ufs_vreg *vreg)
4307 {
4308 if (!vreg)
4309 return 0;
4310
4311 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4312 }
4313
ufshcd_config_vreg(struct device * dev,struct ufs_vreg * vreg,bool on)4314 static int ufshcd_config_vreg(struct device *dev,
4315 struct ufs_vreg *vreg, bool on)
4316 {
4317 int ret = 0;
4318 struct regulator *reg;
4319 const char *name;
4320 int min_uV, uA_load;
4321
4322 BUG_ON(!vreg);
4323
4324 reg = vreg->reg;
4325 name = vreg->name;
4326
4327 if (regulator_count_voltages(reg) > 0) {
4328 min_uV = on ? vreg->min_uV : 0;
4329 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
4330 if (ret) {
4331 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
4332 __func__, name, ret);
4333 goto out;
4334 }
4335
4336 uA_load = on ? vreg->max_uA : 0;
4337 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
4338 if (ret)
4339 goto out;
4340 }
4341 out:
4342 return ret;
4343 }
4344
ufshcd_enable_vreg(struct device * dev,struct ufs_vreg * vreg)4345 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
4346 {
4347 int ret = 0;
4348
4349 if (!vreg || vreg->enabled)
4350 goto out;
4351
4352 ret = ufshcd_config_vreg(dev, vreg, true);
4353 if (!ret)
4354 ret = regulator_enable(vreg->reg);
4355
4356 if (!ret)
4357 vreg->enabled = true;
4358 else
4359 dev_err(dev, "%s: %s enable failed, err=%d\n",
4360 __func__, vreg->name, ret);
4361 out:
4362 return ret;
4363 }
4364
ufshcd_disable_vreg(struct device * dev,struct ufs_vreg * vreg)4365 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
4366 {
4367 int ret = 0;
4368
4369 if (!vreg || !vreg->enabled)
4370 goto out;
4371
4372 ret = regulator_disable(vreg->reg);
4373
4374 if (!ret) {
4375 /* ignore errors on applying disable config */
4376 ufshcd_config_vreg(dev, vreg, false);
4377 vreg->enabled = false;
4378 } else {
4379 dev_err(dev, "%s: %s disable failed, err=%d\n",
4380 __func__, vreg->name, ret);
4381 }
4382 out:
4383 return ret;
4384 }
4385
ufshcd_setup_vreg(struct ufs_hba * hba,bool on)4386 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
4387 {
4388 int ret = 0;
4389 struct device *dev = hba->dev;
4390 struct ufs_vreg_info *info = &hba->vreg_info;
4391
4392 if (!info)
4393 goto out;
4394
4395 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
4396 if (ret)
4397 goto out;
4398
4399 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
4400 if (ret)
4401 goto out;
4402
4403 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
4404 if (ret)
4405 goto out;
4406
4407 out:
4408 if (ret) {
4409 ufshcd_toggle_vreg(dev, info->vccq2, false);
4410 ufshcd_toggle_vreg(dev, info->vccq, false);
4411 ufshcd_toggle_vreg(dev, info->vcc, false);
4412 }
4413 return ret;
4414 }
4415
ufshcd_setup_hba_vreg(struct ufs_hba * hba,bool on)4416 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
4417 {
4418 struct ufs_vreg_info *info = &hba->vreg_info;
4419
4420 if (info)
4421 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
4422
4423 return 0;
4424 }
4425
ufshcd_get_vreg(struct device * dev,struct ufs_vreg * vreg)4426 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
4427 {
4428 int ret = 0;
4429
4430 if (!vreg)
4431 goto out;
4432
4433 vreg->reg = devm_regulator_get(dev, vreg->name);
4434 if (IS_ERR(vreg->reg)) {
4435 ret = PTR_ERR(vreg->reg);
4436 dev_err(dev, "%s: %s get failed, err=%d\n",
4437 __func__, vreg->name, ret);
4438 }
4439 out:
4440 return ret;
4441 }
4442
ufshcd_init_vreg(struct ufs_hba * hba)4443 static int ufshcd_init_vreg(struct ufs_hba *hba)
4444 {
4445 int ret = 0;
4446 struct device *dev = hba->dev;
4447 struct ufs_vreg_info *info = &hba->vreg_info;
4448
4449 if (!info)
4450 goto out;
4451
4452 ret = ufshcd_get_vreg(dev, info->vcc);
4453 if (ret)
4454 goto out;
4455
4456 ret = ufshcd_get_vreg(dev, info->vccq);
4457 if (ret)
4458 goto out;
4459
4460 ret = ufshcd_get_vreg(dev, info->vccq2);
4461 out:
4462 return ret;
4463 }
4464
ufshcd_init_hba_vreg(struct ufs_hba * hba)4465 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4466 {
4467 struct ufs_vreg_info *info = &hba->vreg_info;
4468
4469 if (info)
4470 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
4471
4472 return 0;
4473 }
4474
__ufshcd_setup_clocks(struct ufs_hba * hba,bool on,bool skip_ref_clk)4475 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4476 bool skip_ref_clk)
4477 {
4478 int ret = 0;
4479 struct ufs_clk_info *clki;
4480 struct list_head *head = &hba->clk_list_head;
4481 unsigned long flags;
4482
4483 if (!head || list_empty(head))
4484 goto out;
4485
4486 list_for_each_entry(clki, head, list) {
4487 if (!IS_ERR_OR_NULL(clki->clk)) {
4488 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
4489 continue;
4490
4491 if (on && !clki->enabled) {
4492 ret = clk_prepare_enable(clki->clk);
4493 if (ret) {
4494 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
4495 __func__, clki->name, ret);
4496 goto out;
4497 }
4498 } else if (!on && clki->enabled) {
4499 clk_disable_unprepare(clki->clk);
4500 }
4501 clki->enabled = on;
4502 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
4503 clki->name, on ? "en" : "dis");
4504 }
4505 }
4506
4507 if (hba->vops && hba->vops->setup_clocks)
4508 ret = hba->vops->setup_clocks(hba, on);
4509 out:
4510 if (ret) {
4511 list_for_each_entry(clki, head, list) {
4512 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4513 clk_disable_unprepare(clki->clk);
4514 }
4515 } else if (on) {
4516 spin_lock_irqsave(hba->host->host_lock, flags);
4517 hba->clk_gating.state = CLKS_ON;
4518 spin_unlock_irqrestore(hba->host->host_lock, flags);
4519 }
4520 return ret;
4521 }
4522
ufshcd_setup_clocks(struct ufs_hba * hba,bool on)4523 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
4524 {
4525 return __ufshcd_setup_clocks(hba, on, false);
4526 }
4527
ufshcd_init_clocks(struct ufs_hba * hba)4528 static int ufshcd_init_clocks(struct ufs_hba *hba)
4529 {
4530 int ret = 0;
4531 struct ufs_clk_info *clki;
4532 struct device *dev = hba->dev;
4533 struct list_head *head = &hba->clk_list_head;
4534
4535 if (!head || list_empty(head))
4536 goto out;
4537
4538 list_for_each_entry(clki, head, list) {
4539 if (!clki->name)
4540 continue;
4541
4542 clki->clk = devm_clk_get(dev, clki->name);
4543 if (IS_ERR(clki->clk)) {
4544 ret = PTR_ERR(clki->clk);
4545 dev_err(dev, "%s: %s clk get failed, %d\n",
4546 __func__, clki->name, ret);
4547 goto out;
4548 }
4549
4550 if (clki->max_freq) {
4551 ret = clk_set_rate(clki->clk, clki->max_freq);
4552 if (ret) {
4553 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
4554 __func__, clki->name,
4555 clki->max_freq, ret);
4556 goto out;
4557 }
4558 clki->curr_freq = clki->max_freq;
4559 }
4560 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
4561 clki->name, clk_get_rate(clki->clk));
4562 }
4563 out:
4564 return ret;
4565 }
4566
ufshcd_variant_hba_init(struct ufs_hba * hba)4567 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4568 {
4569 int err = 0;
4570
4571 if (!hba->vops)
4572 goto out;
4573
4574 if (hba->vops->init) {
4575 err = hba->vops->init(hba);
4576 if (err)
4577 goto out;
4578 }
4579
4580 if (hba->vops->setup_regulators) {
4581 err = hba->vops->setup_regulators(hba, true);
4582 if (err)
4583 goto out_exit;
4584 }
4585
4586 goto out;
4587
4588 out_exit:
4589 if (hba->vops->exit)
4590 hba->vops->exit(hba);
4591 out:
4592 if (err)
4593 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
4594 __func__, hba->vops ? hba->vops->name : "", err);
4595 return err;
4596 }
4597
ufshcd_variant_hba_exit(struct ufs_hba * hba)4598 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
4599 {
4600 if (!hba->vops)
4601 return;
4602
4603 if (hba->vops->setup_clocks)
4604 hba->vops->setup_clocks(hba, false);
4605
4606 if (hba->vops->setup_regulators)
4607 hba->vops->setup_regulators(hba, false);
4608
4609 if (hba->vops->exit)
4610 hba->vops->exit(hba);
4611 }
4612
ufshcd_hba_init(struct ufs_hba * hba)4613 static int ufshcd_hba_init(struct ufs_hba *hba)
4614 {
4615 int err;
4616
4617 /*
4618 * Handle host controller power separately from the UFS device power
4619 * rails as it will help controlling the UFS host controller power
4620 * collapse easily which is different than UFS device power collapse.
4621 * Also, enable the host controller power before we go ahead with rest
4622 * of the initialization here.
4623 */
4624 err = ufshcd_init_hba_vreg(hba);
4625 if (err)
4626 goto out;
4627
4628 err = ufshcd_setup_hba_vreg(hba, true);
4629 if (err)
4630 goto out;
4631
4632 err = ufshcd_init_clocks(hba);
4633 if (err)
4634 goto out_disable_hba_vreg;
4635
4636 err = ufshcd_setup_clocks(hba, true);
4637 if (err)
4638 goto out_disable_hba_vreg;
4639
4640 err = ufshcd_init_vreg(hba);
4641 if (err)
4642 goto out_disable_clks;
4643
4644 err = ufshcd_setup_vreg(hba, true);
4645 if (err)
4646 goto out_disable_clks;
4647
4648 err = ufshcd_variant_hba_init(hba);
4649 if (err)
4650 goto out_disable_vreg;
4651
4652 hba->is_powered = true;
4653 goto out;
4654
4655 out_disable_vreg:
4656 ufshcd_setup_vreg(hba, false);
4657 out_disable_clks:
4658 ufshcd_setup_clocks(hba, false);
4659 out_disable_hba_vreg:
4660 ufshcd_setup_hba_vreg(hba, false);
4661 out:
4662 return err;
4663 }
4664
ufshcd_hba_exit(struct ufs_hba * hba)4665 static void ufshcd_hba_exit(struct ufs_hba *hba)
4666 {
4667 if (hba->is_powered) {
4668 ufshcd_variant_hba_exit(hba);
4669 ufshcd_setup_vreg(hba, false);
4670 ufshcd_setup_clocks(hba, false);
4671 ufshcd_setup_hba_vreg(hba, false);
4672 hba->is_powered = false;
4673 }
4674 }
4675
4676 static int
ufshcd_send_request_sense(struct ufs_hba * hba,struct scsi_device * sdp)4677 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
4678 {
4679 unsigned char cmd[6] = {REQUEST_SENSE,
4680 0,
4681 0,
4682 0,
4683 SCSI_SENSE_BUFFERSIZE,
4684 0};
4685 char *buffer;
4686 int ret;
4687
4688 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4689 if (!buffer) {
4690 ret = -ENOMEM;
4691 goto out;
4692 }
4693
4694 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
4695 SCSI_SENSE_BUFFERSIZE, NULL,
4696 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
4697 if (ret)
4698 pr_err("%s: failed with err %d\n", __func__, ret);
4699
4700 kfree(buffer);
4701 out:
4702 return ret;
4703 }
4704
4705 /**
4706 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
4707 * power mode
4708 * @hba: per adapter instance
4709 * @pwr_mode: device power mode to set
4710 *
4711 * Returns 0 if requested power mode is set successfully
4712 * Returns non-zero if failed to set the requested power mode
4713 */
ufshcd_set_dev_pwr_mode(struct ufs_hba * hba,enum ufs_dev_pwr_mode pwr_mode)4714 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4715 enum ufs_dev_pwr_mode pwr_mode)
4716 {
4717 unsigned char cmd[6] = { START_STOP };
4718 struct scsi_sense_hdr sshdr;
4719 struct scsi_device *sdp;
4720 unsigned long flags;
4721 int ret;
4722
4723 spin_lock_irqsave(hba->host->host_lock, flags);
4724 sdp = hba->sdev_ufs_device;
4725 if (sdp) {
4726 ret = scsi_device_get(sdp);
4727 if (!ret && !scsi_device_online(sdp)) {
4728 ret = -ENODEV;
4729 scsi_device_put(sdp);
4730 }
4731 } else {
4732 ret = -ENODEV;
4733 }
4734 spin_unlock_irqrestore(hba->host->host_lock, flags);
4735
4736 if (ret)
4737 return ret;
4738
4739 /*
4740 * If scsi commands fail, the scsi mid-layer schedules scsi error-
4741 * handling, which would wait for host to be resumed. Since we know
4742 * we are functional while we are here, skip host resume in error
4743 * handling context.
4744 */
4745 hba->host->eh_noresume = 1;
4746 if (hba->wlun_dev_clr_ua) {
4747 ret = ufshcd_send_request_sense(hba, sdp);
4748 if (ret)
4749 goto out;
4750 /* Unit attention condition is cleared now */
4751 hba->wlun_dev_clr_ua = false;
4752 }
4753
4754 cmd[4] = pwr_mode << 4;
4755
4756 /*
4757 * Current function would be generally called from the power management
4758 * callbacks hence set the REQ_PM flag so that it doesn't resume the
4759 * already suspended childs.
4760 */
4761 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
4762 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
4763 if (ret) {
4764 sdev_printk(KERN_WARNING, sdp,
4765 "START_STOP failed for power mode: %d\n", pwr_mode);
4766 scsi_show_result(ret);
4767 if (driver_byte(ret) & DRIVER_SENSE) {
4768 scsi_show_sense_hdr(&sshdr);
4769 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
4770 }
4771 }
4772
4773 if (!ret)
4774 hba->curr_dev_pwr_mode = pwr_mode;
4775 out:
4776 scsi_device_put(sdp);
4777 hba->host->eh_noresume = 0;
4778 return ret;
4779 }
4780
ufshcd_link_state_transition(struct ufs_hba * hba,enum uic_link_state req_link_state,int check_for_bkops)4781 static int ufshcd_link_state_transition(struct ufs_hba *hba,
4782 enum uic_link_state req_link_state,
4783 int check_for_bkops)
4784 {
4785 int ret = 0;
4786
4787 if (req_link_state == hba->uic_link_state)
4788 return 0;
4789
4790 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
4791 ret = ufshcd_uic_hibern8_enter(hba);
4792 if (!ret)
4793 ufshcd_set_link_hibern8(hba);
4794 else
4795 goto out;
4796 }
4797 /*
4798 * If autobkops is enabled, link can't be turned off because
4799 * turning off the link would also turn off the device.
4800 */
4801 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
4802 (!check_for_bkops || (check_for_bkops &&
4803 !hba->auto_bkops_enabled))) {
4804 /*
4805 * Change controller state to "reset state" which
4806 * should also put the link in off/reset state
4807 */
4808 ufshcd_hba_stop(hba);
4809 /*
4810 * TODO: Check if we need any delay to make sure that
4811 * controller is reset
4812 */
4813 ufshcd_set_link_off(hba);
4814 }
4815
4816 out:
4817 return ret;
4818 }
4819
ufshcd_vreg_set_lpm(struct ufs_hba * hba)4820 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
4821 {
4822 /*
4823 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
4824 * power.
4825 *
4826 * If UFS device and link is in OFF state, all power supplies (VCC,
4827 * VCCQ, VCCQ2) can be turned off if power on write protect is not
4828 * required. If UFS link is inactive (Hibern8 or OFF state) and device
4829 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
4830 *
4831 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
4832 * in low power state which would save some power.
4833 */
4834 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4835 !hba->dev_info.is_lu_power_on_wp) {
4836 ufshcd_setup_vreg(hba, false);
4837 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4838 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4839 if (!ufshcd_is_link_active(hba)) {
4840 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4841 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
4842 }
4843 }
4844 }
4845
ufshcd_vreg_set_hpm(struct ufs_hba * hba)4846 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
4847 {
4848 int ret = 0;
4849
4850 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4851 !hba->dev_info.is_lu_power_on_wp) {
4852 ret = ufshcd_setup_vreg(hba, true);
4853 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4854 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
4855 if (!ret && !ufshcd_is_link_active(hba)) {
4856 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
4857 if (ret)
4858 goto vcc_disable;
4859 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
4860 if (ret)
4861 goto vccq_lpm;
4862 }
4863 }
4864 goto out;
4865
4866 vccq_lpm:
4867 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4868 vcc_disable:
4869 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4870 out:
4871 return ret;
4872 }
4873
ufshcd_hba_vreg_set_lpm(struct ufs_hba * hba)4874 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
4875 {
4876 if (ufshcd_is_link_off(hba))
4877 ufshcd_setup_hba_vreg(hba, false);
4878 }
4879
ufshcd_hba_vreg_set_hpm(struct ufs_hba * hba)4880 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
4881 {
4882 if (ufshcd_is_link_off(hba))
4883 ufshcd_setup_hba_vreg(hba, true);
4884 }
4885
4886 /**
4887 * ufshcd_suspend - helper function for suspend operations
4888 * @hba: per adapter instance
4889 * @pm_op: desired low power operation type
4890 *
4891 * This function will try to put the UFS device and link into low power
4892 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
4893 * (System PM level).
4894 *
4895 * If this function is called during shutdown, it will make sure that
4896 * both UFS device and UFS link is powered off.
4897 *
4898 * NOTE: UFS device & link must be active before we enter in this function.
4899 *
4900 * Returns 0 for success and non-zero for failure
4901 */
ufshcd_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)4902 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4903 {
4904 int ret = 0;
4905 enum ufs_pm_level pm_lvl;
4906 enum ufs_dev_pwr_mode req_dev_pwr_mode;
4907 enum uic_link_state req_link_state;
4908
4909 hba->pm_op_in_progress = 1;
4910 if (!ufshcd_is_shutdown_pm(pm_op)) {
4911 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
4912 hba->rpm_lvl : hba->spm_lvl;
4913 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
4914 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
4915 } else {
4916 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
4917 req_link_state = UIC_LINK_OFF_STATE;
4918 }
4919
4920 /*
4921 * If we can't transition into any of the low power modes
4922 * just gate the clocks.
4923 */
4924 ufshcd_hold(hba, false);
4925 hba->clk_gating.is_suspended = true;
4926
4927 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
4928 req_link_state == UIC_LINK_ACTIVE_STATE) {
4929 goto disable_clks;
4930 }
4931
4932 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
4933 (req_link_state == hba->uic_link_state))
4934 goto out;
4935
4936 /* UFS device & link must be active before we enter in this function */
4937 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
4938 ret = -EINVAL;
4939 goto out;
4940 }
4941
4942 if (ufshcd_is_runtime_pm(pm_op)) {
4943 if (ufshcd_can_autobkops_during_suspend(hba)) {
4944 /*
4945 * The device is idle with no requests in the queue,
4946 * allow background operations if bkops status shows
4947 * that performance might be impacted.
4948 */
4949 ret = ufshcd_urgent_bkops(hba);
4950 if (ret)
4951 goto enable_gating;
4952 } else {
4953 /* make sure that auto bkops is disabled */
4954 ufshcd_disable_auto_bkops(hba);
4955 }
4956 }
4957
4958 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
4959 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
4960 !ufshcd_is_runtime_pm(pm_op))) {
4961 /* ensure that bkops is disabled */
4962 ufshcd_disable_auto_bkops(hba);
4963 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
4964 if (ret)
4965 goto enable_gating;
4966 }
4967
4968 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
4969 if (ret)
4970 goto set_dev_active;
4971
4972 ufshcd_vreg_set_lpm(hba);
4973
4974 disable_clks:
4975 /*
4976 * The clock scaling needs access to controller registers. Hence, Wait
4977 * for pending clock scaling work to be done before clocks are
4978 * turned off.
4979 */
4980 if (ufshcd_is_clkscaling_enabled(hba)) {
4981 devfreq_suspend_device(hba->devfreq);
4982 hba->clk_scaling.window_start_t = 0;
4983 }
4984 /*
4985 * Call vendor specific suspend callback. As these callbacks may access
4986 * vendor specific host controller register space call them before the
4987 * host clocks are ON.
4988 */
4989 if (hba->vops && hba->vops->suspend) {
4990 ret = hba->vops->suspend(hba, pm_op);
4991 if (ret)
4992 goto set_link_active;
4993 }
4994
4995 if (hba->vops && hba->vops->setup_clocks) {
4996 ret = hba->vops->setup_clocks(hba, false);
4997 if (ret)
4998 goto vops_resume;
4999 }
5000
5001 if (!ufshcd_is_link_active(hba))
5002 ufshcd_setup_clocks(hba, false);
5003 else
5004 /* If link is active, device ref_clk can't be switched off */
5005 __ufshcd_setup_clocks(hba, false, true);
5006
5007 hba->clk_gating.state = CLKS_OFF;
5008 /*
5009 * Disable the host irq as host controller as there won't be any
5010 * host controller trasanction expected till resume.
5011 */
5012 ufshcd_disable_irq(hba);
5013 /* Put the host controller in low power mode if possible */
5014 ufshcd_hba_vreg_set_lpm(hba);
5015 goto out;
5016
5017 vops_resume:
5018 if (hba->vops && hba->vops->resume)
5019 hba->vops->resume(hba, pm_op);
5020 set_link_active:
5021 ufshcd_vreg_set_hpm(hba);
5022 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5023 ufshcd_set_link_active(hba);
5024 else if (ufshcd_is_link_off(hba))
5025 ufshcd_host_reset_and_restore(hba);
5026 set_dev_active:
5027 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5028 ufshcd_disable_auto_bkops(hba);
5029 enable_gating:
5030 hba->clk_gating.is_suspended = false;
5031 ufshcd_release(hba);
5032 out:
5033 hba->pm_op_in_progress = 0;
5034 return ret;
5035 }
5036
5037 /**
5038 * ufshcd_resume - helper function for resume operations
5039 * @hba: per adapter instance
5040 * @pm_op: runtime PM or system PM
5041 *
5042 * This function basically brings the UFS device, UniPro link and controller
5043 * to active state.
5044 *
5045 * Returns 0 for success and non-zero for failure
5046 */
ufshcd_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)5047 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5048 {
5049 int ret;
5050 enum uic_link_state old_link_state;
5051
5052 hba->pm_op_in_progress = 1;
5053 old_link_state = hba->uic_link_state;
5054
5055 ufshcd_hba_vreg_set_hpm(hba);
5056 /* Make sure clocks are enabled before accessing controller */
5057 ret = ufshcd_setup_clocks(hba, true);
5058 if (ret)
5059 goto out;
5060
5061 /* enable the host irq as host controller would be active soon */
5062 ret = ufshcd_enable_irq(hba);
5063 if (ret)
5064 goto disable_irq_and_vops_clks;
5065
5066 ret = ufshcd_vreg_set_hpm(hba);
5067 if (ret)
5068 goto disable_irq_and_vops_clks;
5069
5070 /*
5071 * Call vendor specific resume callback. As these callbacks may access
5072 * vendor specific host controller register space call them when the
5073 * host clocks are ON.
5074 */
5075 if (hba->vops && hba->vops->resume) {
5076 ret = hba->vops->resume(hba, pm_op);
5077 if (ret)
5078 goto disable_vreg;
5079 }
5080
5081 if (ufshcd_is_link_hibern8(hba)) {
5082 ret = ufshcd_uic_hibern8_exit(hba);
5083 if (!ret)
5084 ufshcd_set_link_active(hba);
5085 else
5086 goto vendor_suspend;
5087 } else if (ufshcd_is_link_off(hba)) {
5088 ret = ufshcd_host_reset_and_restore(hba);
5089 /*
5090 * ufshcd_host_reset_and_restore() should have already
5091 * set the link state as active
5092 */
5093 if (ret || !ufshcd_is_link_active(hba))
5094 goto vendor_suspend;
5095 }
5096
5097 if (!ufshcd_is_ufs_dev_active(hba)) {
5098 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
5099 if (ret)
5100 goto set_old_link_state;
5101 }
5102
5103 /*
5104 * If BKOPs operations are urgently needed at this moment then
5105 * keep auto-bkops enabled or else disable it.
5106 */
5107 ufshcd_urgent_bkops(hba);
5108 hba->clk_gating.is_suspended = false;
5109
5110 if (ufshcd_is_clkscaling_enabled(hba))
5111 devfreq_resume_device(hba->devfreq);
5112
5113 /* Schedule clock gating in case of no access to UFS device yet */
5114 ufshcd_release(hba);
5115 goto out;
5116
5117 set_old_link_state:
5118 ufshcd_link_state_transition(hba, old_link_state, 0);
5119 vendor_suspend:
5120 if (hba->vops && hba->vops->suspend)
5121 hba->vops->suspend(hba, pm_op);
5122 disable_vreg:
5123 ufshcd_vreg_set_lpm(hba);
5124 disable_irq_and_vops_clks:
5125 ufshcd_disable_irq(hba);
5126 ufshcd_setup_clocks(hba, false);
5127 out:
5128 hba->pm_op_in_progress = 0;
5129 return ret;
5130 }
5131
5132 /**
5133 * ufshcd_system_suspend - system suspend routine
5134 * @hba: per adapter instance
5135 * @pm_op: runtime PM or system PM
5136 *
5137 * Check the description of ufshcd_suspend() function for more details.
5138 *
5139 * Returns 0 for success and non-zero for failure
5140 */
ufshcd_system_suspend(struct ufs_hba * hba)5141 int ufshcd_system_suspend(struct ufs_hba *hba)
5142 {
5143 int ret = 0;
5144
5145 if (!hba || !hba->is_powered)
5146 return 0;
5147
5148 if (pm_runtime_suspended(hba->dev)) {
5149 if (hba->rpm_lvl == hba->spm_lvl)
5150 /*
5151 * There is possibility that device may still be in
5152 * active state during the runtime suspend.
5153 */
5154 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
5155 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
5156 goto out;
5157
5158 /*
5159 * UFS device and/or UFS link low power states during runtime
5160 * suspend seems to be different than what is expected during
5161 * system suspend. Hence runtime resume the devic & link and
5162 * let the system suspend low power states to take effect.
5163 * TODO: If resume takes longer time, we might have optimize
5164 * it in future by not resuming everything if possible.
5165 */
5166 ret = ufshcd_runtime_resume(hba);
5167 if (ret)
5168 goto out;
5169 }
5170
5171 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
5172 out:
5173 if (!ret)
5174 hba->is_sys_suspended = true;
5175 return ret;
5176 }
5177 EXPORT_SYMBOL(ufshcd_system_suspend);
5178
5179 /**
5180 * ufshcd_system_resume - system resume routine
5181 * @hba: per adapter instance
5182 *
5183 * Returns 0 for success and non-zero for failure
5184 */
5185
ufshcd_system_resume(struct ufs_hba * hba)5186 int ufshcd_system_resume(struct ufs_hba *hba)
5187 {
5188 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
5189 /*
5190 * Let the runtime resume take care of resuming
5191 * if runtime suspended.
5192 */
5193 return 0;
5194
5195 return ufshcd_resume(hba, UFS_SYSTEM_PM);
5196 }
5197 EXPORT_SYMBOL(ufshcd_system_resume);
5198
5199 /**
5200 * ufshcd_runtime_suspend - runtime suspend routine
5201 * @hba: per adapter instance
5202 *
5203 * Check the description of ufshcd_suspend() function for more details.
5204 *
5205 * Returns 0 for success and non-zero for failure
5206 */
ufshcd_runtime_suspend(struct ufs_hba * hba)5207 int ufshcd_runtime_suspend(struct ufs_hba *hba)
5208 {
5209 if (!hba || !hba->is_powered)
5210 return 0;
5211
5212 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
5213 }
5214 EXPORT_SYMBOL(ufshcd_runtime_suspend);
5215
5216 /**
5217 * ufshcd_runtime_resume - runtime resume routine
5218 * @hba: per adapter instance
5219 *
5220 * This function basically brings the UFS device, UniPro link and controller
5221 * to active state. Following operations are done in this function:
5222 *
5223 * 1. Turn on all the controller related clocks
5224 * 2. Bring the UniPro link out of Hibernate state
5225 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
5226 * to active state.
5227 * 4. If auto-bkops is enabled on the device, disable it.
5228 *
5229 * So following would be the possible power state after this function return
5230 * successfully:
5231 * S1: UFS device in Active state with VCC rail ON
5232 * UniPro link in Active state
5233 * All the UFS/UniPro controller clocks are ON
5234 *
5235 * Returns 0 for success and non-zero for failure
5236 */
ufshcd_runtime_resume(struct ufs_hba * hba)5237 int ufshcd_runtime_resume(struct ufs_hba *hba)
5238 {
5239 if (!hba || !hba->is_powered)
5240 return 0;
5241 else
5242 return ufshcd_resume(hba, UFS_RUNTIME_PM);
5243 }
5244 EXPORT_SYMBOL(ufshcd_runtime_resume);
5245
ufshcd_runtime_idle(struct ufs_hba * hba)5246 int ufshcd_runtime_idle(struct ufs_hba *hba)
5247 {
5248 return 0;
5249 }
5250 EXPORT_SYMBOL(ufshcd_runtime_idle);
5251
5252 /**
5253 * ufshcd_shutdown - shutdown routine
5254 * @hba: per adapter instance
5255 *
5256 * This function would power off both UFS device and UFS link.
5257 *
5258 * Returns 0 always to allow force shutdown even in case of errors.
5259 */
ufshcd_shutdown(struct ufs_hba * hba)5260 int ufshcd_shutdown(struct ufs_hba *hba)
5261 {
5262 int ret = 0;
5263
5264 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5265 goto out;
5266
5267 if (pm_runtime_suspended(hba->dev)) {
5268 ret = ufshcd_runtime_resume(hba);
5269 if (ret)
5270 goto out;
5271 }
5272
5273 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
5274 out:
5275 if (ret)
5276 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
5277 /* allow force shutdown even in case of errors */
5278 return 0;
5279 }
5280 EXPORT_SYMBOL(ufshcd_shutdown);
5281
5282 /*
5283 * Values permitted 0, 1, 2.
5284 * 0 -> Disable IO latency histograms (default)
5285 * 1 -> Enable IO latency histograms
5286 * 2 -> Zero out IO latency histograms
5287 */
5288 static ssize_t
latency_hist_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5289 latency_hist_store(struct device *dev, struct device_attribute *attr,
5290 const char *buf, size_t count)
5291 {
5292 struct ufs_hba *hba = dev_get_drvdata(dev);
5293 long value;
5294
5295 if (kstrtol(buf, 0, &value))
5296 return -EINVAL;
5297 if (value == BLK_IO_LAT_HIST_ZERO) {
5298 memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
5299 memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
5300 } else if (value == BLK_IO_LAT_HIST_ENABLE ||
5301 value == BLK_IO_LAT_HIST_DISABLE)
5302 hba->latency_hist_enabled = value;
5303 return count;
5304 }
5305
5306 ssize_t
latency_hist_show(struct device * dev,struct device_attribute * attr,char * buf)5307 latency_hist_show(struct device *dev, struct device_attribute *attr,
5308 char *buf)
5309 {
5310 struct ufs_hba *hba = dev_get_drvdata(dev);
5311 size_t written_bytes;
5312
5313 written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
5314 buf, PAGE_SIZE);
5315 written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
5316 buf + written_bytes, PAGE_SIZE - written_bytes);
5317
5318 return written_bytes;
5319 }
5320
5321 static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
5322 latency_hist_show, latency_hist_store);
5323
5324 static void
ufshcd_init_latency_hist(struct ufs_hba * hba)5325 ufshcd_init_latency_hist(struct ufs_hba *hba)
5326 {
5327 if (device_create_file(hba->dev, &dev_attr_latency_hist))
5328 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
5329 }
5330
5331 static void
ufshcd_exit_latency_hist(struct ufs_hba * hba)5332 ufshcd_exit_latency_hist(struct ufs_hba *hba)
5333 {
5334 device_create_file(hba->dev, &dev_attr_latency_hist);
5335 }
5336
5337 /**
5338 * ufshcd_remove - de-allocate SCSI host and host memory space
5339 * data structure memory
5340 * @hba - per adapter instance
5341 */
ufshcd_remove(struct ufs_hba * hba)5342 void ufshcd_remove(struct ufs_hba *hba)
5343 {
5344 scsi_remove_host(hba->host);
5345 /* disable interrupts */
5346 ufshcd_disable_intr(hba, hba->intr_mask);
5347 ufshcd_hba_stop(hba);
5348
5349 scsi_host_put(hba->host);
5350
5351 ufshcd_exit_clk_gating(hba);
5352 ufshcd_exit_latency_hist(hba);
5353 if (ufshcd_is_clkscaling_enabled(hba))
5354 devfreq_remove_device(hba->devfreq);
5355 ufshcd_hba_exit(hba);
5356 }
5357 EXPORT_SYMBOL_GPL(ufshcd_remove);
5358
5359 /**
5360 * ufshcd_set_dma_mask - Set dma mask based on the controller
5361 * addressing capability
5362 * @hba: per adapter instance
5363 *
5364 * Returns 0 for success, non-zero for failure
5365 */
ufshcd_set_dma_mask(struct ufs_hba * hba)5366 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
5367 {
5368 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
5369 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
5370 return 0;
5371 }
5372 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
5373 }
5374
5375 /**
5376 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
5377 * @dev: pointer to device handle
5378 * @hba_handle: driver private handle
5379 * Returns 0 on success, non-zero value on failure
5380 */
ufshcd_alloc_host(struct device * dev,struct ufs_hba ** hba_handle)5381 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
5382 {
5383 struct Scsi_Host *host;
5384 struct ufs_hba *hba;
5385 int err = 0;
5386
5387 if (!dev) {
5388 dev_err(dev,
5389 "Invalid memory reference for dev is NULL\n");
5390 err = -ENODEV;
5391 goto out_error;
5392 }
5393
5394 host = scsi_host_alloc(&ufshcd_driver_template,
5395 sizeof(struct ufs_hba));
5396 if (!host) {
5397 dev_err(dev, "scsi_host_alloc failed\n");
5398 err = -ENOMEM;
5399 goto out_error;
5400 }
5401 hba = shost_priv(host);
5402 hba->host = host;
5403 hba->dev = dev;
5404 *hba_handle = hba;
5405
5406 out_error:
5407 return err;
5408 }
5409 EXPORT_SYMBOL(ufshcd_alloc_host);
5410
ufshcd_scale_clks(struct ufs_hba * hba,bool scale_up)5411 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5412 {
5413 int ret = 0;
5414 struct ufs_clk_info *clki;
5415 struct list_head *head = &hba->clk_list_head;
5416
5417 if (!head || list_empty(head))
5418 goto out;
5419
5420 list_for_each_entry(clki, head, list) {
5421 if (!IS_ERR_OR_NULL(clki->clk)) {
5422 if (scale_up && clki->max_freq) {
5423 if (clki->curr_freq == clki->max_freq)
5424 continue;
5425 ret = clk_set_rate(clki->clk, clki->max_freq);
5426 if (ret) {
5427 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5428 __func__, clki->name,
5429 clki->max_freq, ret);
5430 break;
5431 }
5432 clki->curr_freq = clki->max_freq;
5433
5434 } else if (!scale_up && clki->min_freq) {
5435 if (clki->curr_freq == clki->min_freq)
5436 continue;
5437 ret = clk_set_rate(clki->clk, clki->min_freq);
5438 if (ret) {
5439 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5440 __func__, clki->name,
5441 clki->min_freq, ret);
5442 break;
5443 }
5444 clki->curr_freq = clki->min_freq;
5445 }
5446 }
5447 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5448 clki->name, clk_get_rate(clki->clk));
5449 }
5450 if (hba->vops->clk_scale_notify)
5451 hba->vops->clk_scale_notify(hba);
5452 out:
5453 return ret;
5454 }
5455
ufshcd_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)5456 static int ufshcd_devfreq_target(struct device *dev,
5457 unsigned long *freq, u32 flags)
5458 {
5459 int err = 0;
5460 struct ufs_hba *hba = dev_get_drvdata(dev);
5461
5462 if (!ufshcd_is_clkscaling_enabled(hba))
5463 return -EINVAL;
5464
5465 if (*freq == UINT_MAX)
5466 err = ufshcd_scale_clks(hba, true);
5467 else if (*freq == 0)
5468 err = ufshcd_scale_clks(hba, false);
5469
5470 return err;
5471 }
5472
ufshcd_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)5473 static int ufshcd_devfreq_get_dev_status(struct device *dev,
5474 struct devfreq_dev_status *stat)
5475 {
5476 struct ufs_hba *hba = dev_get_drvdata(dev);
5477 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
5478 unsigned long flags;
5479
5480 if (!ufshcd_is_clkscaling_enabled(hba))
5481 return -EINVAL;
5482
5483 memset(stat, 0, sizeof(*stat));
5484
5485 spin_lock_irqsave(hba->host->host_lock, flags);
5486 if (!scaling->window_start_t)
5487 goto start_window;
5488
5489 if (scaling->is_busy_started)
5490 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
5491 scaling->busy_start_t));
5492
5493 stat->total_time = jiffies_to_usecs((long)jiffies -
5494 (long)scaling->window_start_t);
5495 stat->busy_time = scaling->tot_busy_t;
5496 start_window:
5497 scaling->window_start_t = jiffies;
5498 scaling->tot_busy_t = 0;
5499
5500 if (hba->outstanding_reqs) {
5501 scaling->busy_start_t = ktime_get();
5502 scaling->is_busy_started = true;
5503 } else {
5504 scaling->busy_start_t = ktime_set(0, 0);
5505 scaling->is_busy_started = false;
5506 }
5507 spin_unlock_irqrestore(hba->host->host_lock, flags);
5508 return 0;
5509 }
5510
5511 static struct devfreq_dev_profile ufs_devfreq_profile = {
5512 .polling_ms = 100,
5513 .target = ufshcd_devfreq_target,
5514 .get_dev_status = ufshcd_devfreq_get_dev_status,
5515 };
5516
5517 /**
5518 * ufshcd_init - Driver initialization routine
5519 * @hba: per-adapter instance
5520 * @mmio_base: base register address
5521 * @irq: Interrupt line of device
5522 * Returns 0 on success, non-zero value on failure
5523 */
ufshcd_init(struct ufs_hba * hba,void __iomem * mmio_base,unsigned int irq)5524 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5525 {
5526 int err;
5527 struct Scsi_Host *host = hba->host;
5528 struct device *dev = hba->dev;
5529
5530 if (!mmio_base) {
5531 dev_err(hba->dev,
5532 "Invalid memory reference for mmio_base is NULL\n");
5533 err = -ENODEV;
5534 goto out_error;
5535 }
5536
5537 hba->mmio_base = mmio_base;
5538 hba->irq = irq;
5539
5540 err = ufshcd_hba_init(hba);
5541 if (err)
5542 goto out_error;
5543
5544 /* Read capabilities registers */
5545 ufshcd_hba_capabilities(hba);
5546
5547 /* Get UFS version supported by the controller */
5548 hba->ufs_version = ufshcd_get_ufs_version(hba);
5549
5550 /* Get Interrupt bit mask per version */
5551 hba->intr_mask = ufshcd_get_intr_mask(hba);
5552
5553 err = ufshcd_set_dma_mask(hba);
5554 if (err) {
5555 dev_err(hba->dev, "set dma mask failed\n");
5556 goto out_disable;
5557 }
5558
5559 /* Allocate memory for host memory space */
5560 err = ufshcd_memory_alloc(hba);
5561 if (err) {
5562 dev_err(hba->dev, "Memory allocation failed\n");
5563 goto out_disable;
5564 }
5565
5566 /* Configure LRB */
5567 ufshcd_host_memory_configure(hba);
5568
5569 host->can_queue = hba->nutrs;
5570 host->cmd_per_lun = hba->nutrs;
5571 host->max_id = UFSHCD_MAX_ID;
5572 host->max_lun = UFS_MAX_LUNS;
5573 host->max_channel = UFSHCD_MAX_CHANNEL;
5574 host->unique_id = host->host_no;
5575 host->max_cmd_len = MAX_CDB_SIZE;
5576
5577 hba->max_pwr_info.is_valid = false;
5578
5579 /* Initailize wait queue for task management */
5580 init_waitqueue_head(&hba->tm_wq);
5581 init_waitqueue_head(&hba->tm_tag_wq);
5582
5583 /* Initialize work queues */
5584 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
5585 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
5586
5587 /* Initialize UIC command mutex */
5588 mutex_init(&hba->uic_cmd_mutex);
5589
5590 /* Initialize mutex for device management commands */
5591 mutex_init(&hba->dev_cmd.lock);
5592
5593 /* Initialize device management tag acquire wait queue */
5594 init_waitqueue_head(&hba->dev_cmd.tag_wq);
5595
5596 ufshcd_init_clk_gating(hba);
5597 /* IRQ registration */
5598 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
5599 if (err) {
5600 dev_err(hba->dev, "request irq failed\n");
5601 goto exit_gating;
5602 } else {
5603 hba->is_irq_enabled = true;
5604 }
5605
5606 /* Enable SCSI tag mapping */
5607 err = scsi_init_shared_tag_map(host, host->can_queue);
5608 if (err) {
5609 dev_err(hba->dev, "init shared queue failed\n");
5610 goto exit_gating;
5611 }
5612
5613 err = scsi_add_host(host, hba->dev);
5614 if (err) {
5615 dev_err(hba->dev, "scsi_add_host failed\n");
5616 goto exit_gating;
5617 }
5618
5619 /* Host controller enable */
5620 err = ufshcd_hba_enable(hba);
5621 if (err) {
5622 dev_err(hba->dev, "Host controller enable failed\n");
5623 goto out_remove_scsi_host;
5624 }
5625
5626 if (ufshcd_is_clkscaling_enabled(hba)) {
5627 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
5628 "simple_ondemand", NULL);
5629 if (IS_ERR(hba->devfreq)) {
5630 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
5631 PTR_ERR(hba->devfreq));
5632 goto out_remove_scsi_host;
5633 }
5634 /* Suspend devfreq until the UFS device is detected */
5635 devfreq_suspend_device(hba->devfreq);
5636 hba->clk_scaling.window_start_t = 0;
5637 }
5638
5639 /* Hold auto suspend until async scan completes */
5640 pm_runtime_get_sync(dev);
5641
5642 ufshcd_init_latency_hist(hba);
5643
5644 /*
5645 * The device-initialize-sequence hasn't been invoked yet.
5646 * Set the device to power-off state
5647 */
5648 ufshcd_set_ufs_dev_poweroff(hba);
5649
5650 async_schedule(ufshcd_async_scan, hba);
5651
5652 return 0;
5653
5654 out_remove_scsi_host:
5655 scsi_remove_host(hba->host);
5656 exit_gating:
5657 ufshcd_exit_clk_gating(hba);
5658 ufshcd_exit_latency_hist(hba);
5659 out_disable:
5660 hba->is_irq_enabled = false;
5661 scsi_host_put(host);
5662 ufshcd_hba_exit(hba);
5663 out_error:
5664 return err;
5665 }
5666 EXPORT_SYMBOL_GPL(ufshcd_init);
5667
5668 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
5669 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
5670 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
5671 MODULE_LICENSE("GPL");
5672 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
5673