1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #include <scsi/sas.h>
56 #include <linux/bitops.h>
57 #include "isci.h"
58 #include "port.h"
59 #include "remote_device.h"
60 #include "request.h"
61 #include "remote_node_context.h"
62 #include "scu_event_codes.h"
63 #include "task.h"
64
65 #undef C
66 #define C(a) (#a)
dev_state_name(enum sci_remote_device_states state)67 const char *dev_state_name(enum sci_remote_device_states state)
68 {
69 static const char * const strings[] = REMOTE_DEV_STATES;
70
71 return strings[state];
72 }
73 #undef C
74
sci_remote_device_suspend(struct isci_remote_device * idev,enum sci_remote_node_suspension_reasons reason)75 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76 enum sci_remote_node_suspension_reasons reason)
77 {
78 return sci_remote_node_context_suspend(&idev->rnc, reason,
79 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
80 }
81
82 /**
83 * isci_remote_device_ready() - This function is called by the ihost when the
84 * remote device is ready. We mark the isci device as ready and signal the
85 * waiting proccess.
86 * @ihost: our valid isci_host
87 * @idev: remote device
88 *
89 */
isci_remote_device_ready(struct isci_host * ihost,struct isci_remote_device * idev)90 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91 {
92 dev_dbg(&ihost->pdev->dev,
93 "%s: idev = %p\n", __func__, idev);
94
95 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
96 set_bit(IDEV_IO_READY, &idev->flags);
97 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
98 wake_up(&ihost->eventq);
99 }
100
sci_remote_device_terminate_req(struct isci_host * ihost,struct isci_remote_device * idev,int check_abort,struct isci_request * ireq)101 static enum sci_status sci_remote_device_terminate_req(
102 struct isci_host *ihost,
103 struct isci_remote_device *idev,
104 int check_abort,
105 struct isci_request *ireq)
106 {
107 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108 (ireq->target_device != idev) ||
109 (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110 return SCI_SUCCESS;
111
112 dev_dbg(&ihost->pdev->dev,
113 "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114 __func__, idev, idev->flags, ireq, ireq->target_device);
115
116 set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118 return sci_controller_terminate_request(ihost, idev, ireq);
119 }
120
sci_remote_device_terminate_reqs_checkabort(struct isci_remote_device * idev,int chk)121 static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122 struct isci_remote_device *idev,
123 int chk)
124 {
125 struct isci_host *ihost = idev->owning_port->owning_controller;
126 enum sci_status status = SCI_SUCCESS;
127 u32 i;
128
129 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130 struct isci_request *ireq = ihost->reqs[i];
131 enum sci_status s;
132
133 s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134 if (s != SCI_SUCCESS)
135 status = s;
136 }
137 return status;
138 }
139
isci_compare_suspendcount(struct isci_remote_device * idev,u32 localcount)140 static bool isci_compare_suspendcount(
141 struct isci_remote_device *idev,
142 u32 localcount)
143 {
144 smp_rmb();
145
146 /* Check for a change in the suspend count, or the RNC
147 * being destroyed.
148 */
149 return (localcount != idev->rnc.suspend_count)
150 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151 }
152
isci_check_reqterm(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq,u32 localcount)153 static bool isci_check_reqterm(
154 struct isci_host *ihost,
155 struct isci_remote_device *idev,
156 struct isci_request *ireq,
157 u32 localcount)
158 {
159 unsigned long flags;
160 bool res;
161
162 spin_lock_irqsave(&ihost->scic_lock, flags);
163 res = isci_compare_suspendcount(idev, localcount)
164 && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165 spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167 return res;
168 }
169
isci_check_devempty(struct isci_host * ihost,struct isci_remote_device * idev,u32 localcount)170 static bool isci_check_devempty(
171 struct isci_host *ihost,
172 struct isci_remote_device *idev,
173 u32 localcount)
174 {
175 unsigned long flags;
176 bool res;
177
178 spin_lock_irqsave(&ihost->scic_lock, flags);
179 res = isci_compare_suspendcount(idev, localcount)
180 && idev->started_request_count == 0;
181 spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183 return res;
184 }
185
isci_remote_device_terminate_requests(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)186 enum sci_status isci_remote_device_terminate_requests(
187 struct isci_host *ihost,
188 struct isci_remote_device *idev,
189 struct isci_request *ireq)
190 {
191 enum sci_status status = SCI_SUCCESS;
192 unsigned long flags;
193 u32 rnc_suspend_count;
194
195 spin_lock_irqsave(&ihost->scic_lock, flags);
196
197 if (isci_get_device(idev) == NULL) {
198 dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199 __func__, idev);
200 spin_unlock_irqrestore(&ihost->scic_lock, flags);
201 status = SCI_FAILURE;
202 } else {
203 /* If already suspended, don't wait for another suspension. */
204 smp_rmb();
205 rnc_suspend_count
206 = sci_remote_node_context_is_suspended(&idev->rnc)
207 ? 0 : idev->rnc.suspend_count;
208
209 dev_dbg(&ihost->pdev->dev,
210 "%s: idev=%p, ireq=%p; started_request_count=%d, "
211 "rnc_suspend_count=%d, rnc.suspend_count=%d"
212 "about to wait\n",
213 __func__, idev, ireq, idev->started_request_count,
214 rnc_suspend_count, idev->rnc.suspend_count);
215
216 #define MAX_SUSPEND_MSECS 10000
217 if (ireq) {
218 /* Terminate a specific TC. */
219 set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220 sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221 spin_unlock_irqrestore(&ihost->scic_lock, flags);
222 if (!wait_event_timeout(ihost->eventq,
223 isci_check_reqterm(ihost, idev, ireq,
224 rnc_suspend_count),
225 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227 dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228 __func__, ihost->id);
229 dev_dbg(&ihost->pdev->dev,
230 "%s: ******* Timeout waiting for "
231 "suspend; idev=%p, current state %s; "
232 "started_request_count=%d, flags=%lx\n\t"
233 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234 "RNC: current state %s, current "
235 "suspend_type %x dest state %d;\n"
236 "ireq=%p, ireq->flags = %lx\n",
237 __func__, idev,
238 dev_state_name(idev->sm.current_state_id),
239 idev->started_request_count, idev->flags,
240 rnc_suspend_count, idev->rnc.suspend_count,
241 rnc_state_name(idev->rnc.sm.current_state_id),
242 idev->rnc.suspend_type,
243 idev->rnc.destination_state,
244 ireq, ireq->flags);
245 }
246 spin_lock_irqsave(&ihost->scic_lock, flags);
247 clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249 isci_free_tag(ihost, ireq->io_tag);
250 spin_unlock_irqrestore(&ihost->scic_lock, flags);
251 } else {
252 /* Terminate all TCs. */
253 sci_remote_device_terminate_requests(idev);
254 spin_unlock_irqrestore(&ihost->scic_lock, flags);
255 if (!wait_event_timeout(ihost->eventq,
256 isci_check_devempty(ihost, idev,
257 rnc_suspend_count),
258 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260 dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261 __func__, ihost->id);
262 dev_dbg(&ihost->pdev->dev,
263 "%s: ******* Timeout waiting for "
264 "suspend; idev=%p, current state %s; "
265 "started_request_count=%d, flags=%lx\n\t"
266 "rnc_suspend_count=%d, "
267 "RNC: current state %s, "
268 "rnc.suspend_count=%d, current "
269 "suspend_type %x dest state %d\n",
270 __func__, idev,
271 dev_state_name(idev->sm.current_state_id),
272 idev->started_request_count, idev->flags,
273 rnc_suspend_count,
274 rnc_state_name(idev->rnc.sm.current_state_id),
275 idev->rnc.suspend_count,
276 idev->rnc.suspend_type,
277 idev->rnc.destination_state);
278 }
279 }
280 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281 __func__, idev);
282 isci_put_device(idev);
283 }
284 return status;
285 }
286
287 /**
288 * isci_remote_device_not_ready() - This function is called by the ihost when
289 * the remote device is not ready. We mark the isci device as ready (not
290 * "ready_for_io") and signal the waiting proccess.
291 * @isci_host: This parameter specifies the isci host object.
292 * @isci_device: This parameter specifies the remote device
293 *
294 * sci_lock is held on entrance to this function.
295 */
isci_remote_device_not_ready(struct isci_host * ihost,struct isci_remote_device * idev,u32 reason)296 static void isci_remote_device_not_ready(struct isci_host *ihost,
297 struct isci_remote_device *idev,
298 u32 reason)
299 {
300 dev_dbg(&ihost->pdev->dev,
301 "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
302
303 switch (reason) {
304 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
305 set_bit(IDEV_IO_NCQERROR, &idev->flags);
306
307 /* Suspend the remote device so the I/O can be terminated. */
308 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
309
310 /* Kill all outstanding requests for the device. */
311 sci_remote_device_terminate_requests(idev);
312
313 fallthrough; /* into the default case */
314 default:
315 clear_bit(IDEV_IO_READY, &idev->flags);
316 break;
317 }
318 }
319
320 /* called once the remote node context is ready to be freed.
321 * The remote device can now report that its stop operation is complete. none
322 */
rnc_destruct_done(void * _dev)323 static void rnc_destruct_done(void *_dev)
324 {
325 struct isci_remote_device *idev = _dev;
326
327 BUG_ON(idev->started_request_count != 0);
328 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
329 }
330
sci_remote_device_terminate_requests(struct isci_remote_device * idev)331 enum sci_status sci_remote_device_terminate_requests(
332 struct isci_remote_device *idev)
333 {
334 return sci_remote_device_terminate_reqs_checkabort(idev, 0);
335 }
336
sci_remote_device_stop(struct isci_remote_device * idev,u32 timeout)337 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
338 u32 timeout)
339 {
340 struct sci_base_state_machine *sm = &idev->sm;
341 enum sci_remote_device_states state = sm->current_state_id;
342
343 switch (state) {
344 case SCI_DEV_INITIAL:
345 case SCI_DEV_FAILED:
346 case SCI_DEV_FINAL:
347 default:
348 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
349 __func__, dev_state_name(state));
350 return SCI_FAILURE_INVALID_STATE;
351 case SCI_DEV_STOPPED:
352 return SCI_SUCCESS;
353 case SCI_DEV_STARTING:
354 /* device not started so there had better be no requests */
355 BUG_ON(idev->started_request_count != 0);
356 sci_remote_node_context_destruct(&idev->rnc,
357 rnc_destruct_done, idev);
358 /* Transition to the stopping state and wait for the
359 * remote node to complete being posted and invalidated.
360 */
361 sci_change_state(sm, SCI_DEV_STOPPING);
362 return SCI_SUCCESS;
363 case SCI_DEV_READY:
364 case SCI_STP_DEV_IDLE:
365 case SCI_STP_DEV_CMD:
366 case SCI_STP_DEV_NCQ:
367 case SCI_STP_DEV_NCQ_ERROR:
368 case SCI_STP_DEV_AWAIT_RESET:
369 case SCI_SMP_DEV_IDLE:
370 case SCI_SMP_DEV_CMD:
371 sci_change_state(sm, SCI_DEV_STOPPING);
372 if (idev->started_request_count == 0)
373 sci_remote_node_context_destruct(&idev->rnc,
374 rnc_destruct_done,
375 idev);
376 else {
377 sci_remote_device_suspend(
378 idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
379 sci_remote_device_terminate_requests(idev);
380 }
381 return SCI_SUCCESS;
382 case SCI_DEV_STOPPING:
383 /* All requests should have been terminated, but if there is an
384 * attempt to stop a device already in the stopping state, then
385 * try again to terminate.
386 */
387 return sci_remote_device_terminate_requests(idev);
388 case SCI_DEV_RESETTING:
389 sci_change_state(sm, SCI_DEV_STOPPING);
390 return SCI_SUCCESS;
391 }
392 }
393
sci_remote_device_reset(struct isci_remote_device * idev)394 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
395 {
396 struct sci_base_state_machine *sm = &idev->sm;
397 enum sci_remote_device_states state = sm->current_state_id;
398
399 switch (state) {
400 case SCI_DEV_INITIAL:
401 case SCI_DEV_STOPPED:
402 case SCI_DEV_STARTING:
403 case SCI_SMP_DEV_IDLE:
404 case SCI_SMP_DEV_CMD:
405 case SCI_DEV_STOPPING:
406 case SCI_DEV_FAILED:
407 case SCI_DEV_RESETTING:
408 case SCI_DEV_FINAL:
409 default:
410 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
411 __func__, dev_state_name(state));
412 return SCI_FAILURE_INVALID_STATE;
413 case SCI_DEV_READY:
414 case SCI_STP_DEV_IDLE:
415 case SCI_STP_DEV_CMD:
416 case SCI_STP_DEV_NCQ:
417 case SCI_STP_DEV_NCQ_ERROR:
418 case SCI_STP_DEV_AWAIT_RESET:
419 sci_change_state(sm, SCI_DEV_RESETTING);
420 return SCI_SUCCESS;
421 }
422 }
423
sci_remote_device_reset_complete(struct isci_remote_device * idev)424 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
425 {
426 struct sci_base_state_machine *sm = &idev->sm;
427 enum sci_remote_device_states state = sm->current_state_id;
428
429 if (state != SCI_DEV_RESETTING) {
430 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
431 __func__, dev_state_name(state));
432 return SCI_FAILURE_INVALID_STATE;
433 }
434
435 sci_change_state(sm, SCI_DEV_READY);
436 return SCI_SUCCESS;
437 }
438
sci_remote_device_frame_handler(struct isci_remote_device * idev,u32 frame_index)439 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
440 u32 frame_index)
441 {
442 struct sci_base_state_machine *sm = &idev->sm;
443 enum sci_remote_device_states state = sm->current_state_id;
444 struct isci_host *ihost = idev->owning_port->owning_controller;
445 enum sci_status status;
446
447 switch (state) {
448 case SCI_DEV_INITIAL:
449 case SCI_DEV_STOPPED:
450 case SCI_DEV_STARTING:
451 case SCI_STP_DEV_IDLE:
452 case SCI_SMP_DEV_IDLE:
453 case SCI_DEV_FINAL:
454 default:
455 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
456 __func__, dev_state_name(state));
457 /* Return the frame back to the controller */
458 sci_controller_release_frame(ihost, frame_index);
459 return SCI_FAILURE_INVALID_STATE;
460 case SCI_DEV_READY:
461 case SCI_STP_DEV_NCQ_ERROR:
462 case SCI_STP_DEV_AWAIT_RESET:
463 case SCI_DEV_STOPPING:
464 case SCI_DEV_FAILED:
465 case SCI_DEV_RESETTING: {
466 struct isci_request *ireq;
467 struct ssp_frame_hdr hdr;
468 void *frame_header;
469 ssize_t word_cnt;
470
471 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
472 frame_index,
473 &frame_header);
474 if (status != SCI_SUCCESS)
475 return status;
476
477 word_cnt = sizeof(hdr) / sizeof(u32);
478 sci_swab32_cpy(&hdr, frame_header, word_cnt);
479
480 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
481 if (ireq && ireq->target_device == idev) {
482 /* The IO request is now in charge of releasing the frame */
483 status = sci_io_request_frame_handler(ireq, frame_index);
484 } else {
485 /* We could not map this tag to a valid IO
486 * request Just toss the frame and continue
487 */
488 sci_controller_release_frame(ihost, frame_index);
489 }
490 break;
491 }
492 case SCI_STP_DEV_NCQ: {
493 struct dev_to_host_fis *hdr;
494
495 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
496 frame_index,
497 (void **)&hdr);
498 if (status != SCI_SUCCESS)
499 return status;
500
501 if (hdr->fis_type == FIS_SETDEVBITS &&
502 (hdr->status & ATA_ERR)) {
503 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
504
505 /* TODO Check sactive and complete associated IO if any. */
506 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
507 } else if (hdr->fis_type == FIS_REGD2H &&
508 (hdr->status & ATA_ERR)) {
509 /*
510 * Some devices return D2H FIS when an NCQ error is detected.
511 * Treat this like an SDB error FIS ready reason.
512 */
513 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
514 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
515 } else
516 status = SCI_FAILURE;
517
518 sci_controller_release_frame(ihost, frame_index);
519 break;
520 }
521 case SCI_STP_DEV_CMD:
522 case SCI_SMP_DEV_CMD:
523 /* The device does not process any UF received from the hardware while
524 * in this state. All unsolicited frames are forwarded to the io request
525 * object.
526 */
527 status = sci_io_request_frame_handler(idev->working_request, frame_index);
528 break;
529 }
530
531 return status;
532 }
533
is_remote_device_ready(struct isci_remote_device * idev)534 static bool is_remote_device_ready(struct isci_remote_device *idev)
535 {
536
537 struct sci_base_state_machine *sm = &idev->sm;
538 enum sci_remote_device_states state = sm->current_state_id;
539
540 switch (state) {
541 case SCI_DEV_READY:
542 case SCI_STP_DEV_IDLE:
543 case SCI_STP_DEV_CMD:
544 case SCI_STP_DEV_NCQ:
545 case SCI_STP_DEV_NCQ_ERROR:
546 case SCI_STP_DEV_AWAIT_RESET:
547 case SCI_SMP_DEV_IDLE:
548 case SCI_SMP_DEV_CMD:
549 return true;
550 default:
551 return false;
552 }
553 }
554
555 /*
556 * called once the remote node context has transisitioned to a ready
557 * state (after suspending RX and/or TX due to early D2H fis)
558 */
atapi_remote_device_resume_done(void * _dev)559 static void atapi_remote_device_resume_done(void *_dev)
560 {
561 struct isci_remote_device *idev = _dev;
562 struct isci_request *ireq = idev->working_request;
563
564 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
565 }
566
sci_remote_device_event_handler(struct isci_remote_device * idev,u32 event_code)567 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
568 u32 event_code)
569 {
570 enum sci_status status;
571 struct sci_base_state_machine *sm = &idev->sm;
572 enum sci_remote_device_states state = sm->current_state_id;
573
574 switch (scu_get_event_type(event_code)) {
575 case SCU_EVENT_TYPE_RNC_OPS_MISC:
576 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
577 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
578 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
579 break;
580 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
581 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
582 status = SCI_SUCCESS;
583
584 /* Suspend the associated RNC */
585 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
586
587 dev_dbg(scirdev_to_dev(idev),
588 "%s: device: %p event code: %x: %s\n",
589 __func__, idev, event_code,
590 is_remote_device_ready(idev)
591 ? "I_T_Nexus_Timeout event"
592 : "I_T_Nexus_Timeout event in wrong state");
593
594 break;
595 }
596 fallthrough; /* and treat as unhandled */
597 default:
598 dev_dbg(scirdev_to_dev(idev),
599 "%s: device: %p event code: %x: %s\n",
600 __func__, idev, event_code,
601 is_remote_device_ready(idev)
602 ? "unexpected event"
603 : "unexpected event in wrong state");
604 status = SCI_FAILURE_INVALID_STATE;
605 break;
606 }
607
608 if (status != SCI_SUCCESS)
609 return status;
610
611 /* Decode device-specific states that may require an RNC resume during
612 * normal operation. When the abort path is active, these resumes are
613 * managed when the abort path exits.
614 */
615 if (state == SCI_STP_DEV_ATAPI_ERROR) {
616 /* For ATAPI error state resume the RNC right away. */
617 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
618 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
619 return sci_remote_node_context_resume(&idev->rnc,
620 atapi_remote_device_resume_done,
621 idev);
622 }
623 }
624
625 if (state == SCI_STP_DEV_IDLE) {
626
627 /* We pick up suspension events to handle specifically to this
628 * state. We resume the RNC right away.
629 */
630 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
631 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
632 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
633 }
634
635 return status;
636 }
637
sci_remote_device_start_request(struct isci_remote_device * idev,struct isci_request * ireq,enum sci_status status)638 static void sci_remote_device_start_request(struct isci_remote_device *idev,
639 struct isci_request *ireq,
640 enum sci_status status)
641 {
642 struct isci_port *iport = idev->owning_port;
643
644 /* cleanup requests that failed after starting on the port */
645 if (status != SCI_SUCCESS)
646 sci_port_complete_io(iport, idev, ireq);
647 else {
648 kref_get(&idev->kref);
649 idev->started_request_count++;
650 }
651 }
652
sci_remote_device_start_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)653 enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
654 struct isci_remote_device *idev,
655 struct isci_request *ireq)
656 {
657 struct sci_base_state_machine *sm = &idev->sm;
658 enum sci_remote_device_states state = sm->current_state_id;
659 struct isci_port *iport = idev->owning_port;
660 enum sci_status status;
661
662 switch (state) {
663 case SCI_DEV_INITIAL:
664 case SCI_DEV_STOPPED:
665 case SCI_DEV_STARTING:
666 case SCI_STP_DEV_NCQ_ERROR:
667 case SCI_DEV_STOPPING:
668 case SCI_DEV_FAILED:
669 case SCI_DEV_RESETTING:
670 case SCI_DEV_FINAL:
671 default:
672 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
673 __func__, dev_state_name(state));
674 return SCI_FAILURE_INVALID_STATE;
675 case SCI_DEV_READY:
676 /* attempt to start an io request for this device object. The remote
677 * device object will issue the start request for the io and if
678 * successful it will start the request for the port object then
679 * increment its own request count.
680 */
681 status = sci_port_start_io(iport, idev, ireq);
682 if (status != SCI_SUCCESS)
683 return status;
684
685 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
686 if (status != SCI_SUCCESS)
687 break;
688
689 status = sci_request_start(ireq);
690 break;
691 case SCI_STP_DEV_IDLE: {
692 /* handle the start io operation for a sata device that is in
693 * the command idle state. - Evalute the type of IO request to
694 * be started - If its an NCQ request change to NCQ substate -
695 * If its any other command change to the CMD substate
696 *
697 * If this is a softreset we may want to have a different
698 * substate.
699 */
700 enum sci_remote_device_states new_state;
701 struct sas_task *task = isci_request_access_task(ireq);
702
703 status = sci_port_start_io(iport, idev, ireq);
704 if (status != SCI_SUCCESS)
705 return status;
706
707 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
708 if (status != SCI_SUCCESS)
709 break;
710
711 status = sci_request_start(ireq);
712 if (status != SCI_SUCCESS)
713 break;
714
715 if (task->ata_task.use_ncq)
716 new_state = SCI_STP_DEV_NCQ;
717 else {
718 idev->working_request = ireq;
719 new_state = SCI_STP_DEV_CMD;
720 }
721 sci_change_state(sm, new_state);
722 break;
723 }
724 case SCI_STP_DEV_NCQ: {
725 struct sas_task *task = isci_request_access_task(ireq);
726
727 if (task->ata_task.use_ncq) {
728 status = sci_port_start_io(iport, idev, ireq);
729 if (status != SCI_SUCCESS)
730 return status;
731
732 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
733 if (status != SCI_SUCCESS)
734 break;
735
736 status = sci_request_start(ireq);
737 } else
738 return SCI_FAILURE_INVALID_STATE;
739 break;
740 }
741 case SCI_STP_DEV_AWAIT_RESET:
742 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
743 case SCI_SMP_DEV_IDLE:
744 status = sci_port_start_io(iport, idev, ireq);
745 if (status != SCI_SUCCESS)
746 return status;
747
748 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
749 if (status != SCI_SUCCESS)
750 break;
751
752 status = sci_request_start(ireq);
753 if (status != SCI_SUCCESS)
754 break;
755
756 idev->working_request = ireq;
757 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
758 break;
759 case SCI_STP_DEV_CMD:
760 case SCI_SMP_DEV_CMD:
761 /* device is already handling a command it can not accept new commands
762 * until this one is complete.
763 */
764 return SCI_FAILURE_INVALID_STATE;
765 }
766
767 sci_remote_device_start_request(idev, ireq, status);
768 return status;
769 }
770
common_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)771 static enum sci_status common_complete_io(struct isci_port *iport,
772 struct isci_remote_device *idev,
773 struct isci_request *ireq)
774 {
775 enum sci_status status;
776
777 status = sci_request_complete(ireq);
778 if (status != SCI_SUCCESS)
779 return status;
780
781 status = sci_port_complete_io(iport, idev, ireq);
782 if (status != SCI_SUCCESS)
783 return status;
784
785 sci_remote_device_decrement_request_count(idev);
786 return status;
787 }
788
sci_remote_device_complete_io(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)789 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
790 struct isci_remote_device *idev,
791 struct isci_request *ireq)
792 {
793 struct sci_base_state_machine *sm = &idev->sm;
794 enum sci_remote_device_states state = sm->current_state_id;
795 struct isci_port *iport = idev->owning_port;
796 enum sci_status status;
797
798 switch (state) {
799 case SCI_DEV_INITIAL:
800 case SCI_DEV_STOPPED:
801 case SCI_DEV_STARTING:
802 case SCI_STP_DEV_IDLE:
803 case SCI_SMP_DEV_IDLE:
804 case SCI_DEV_FAILED:
805 case SCI_DEV_FINAL:
806 default:
807 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
808 __func__, dev_state_name(state));
809 return SCI_FAILURE_INVALID_STATE;
810 case SCI_DEV_READY:
811 case SCI_STP_DEV_AWAIT_RESET:
812 case SCI_DEV_RESETTING:
813 status = common_complete_io(iport, idev, ireq);
814 break;
815 case SCI_STP_DEV_CMD:
816 case SCI_STP_DEV_NCQ:
817 case SCI_STP_DEV_NCQ_ERROR:
818 case SCI_STP_DEV_ATAPI_ERROR:
819 status = common_complete_io(iport, idev, ireq);
820 if (status != SCI_SUCCESS)
821 break;
822
823 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
824 /* This request causes hardware error, device needs to be Lun Reset.
825 * So here we force the state machine to IDLE state so the rest IOs
826 * can reach RNC state handler, these IOs will be completed by RNC with
827 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
828 */
829 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
830 } else if (idev->started_request_count == 0)
831 sci_change_state(sm, SCI_STP_DEV_IDLE);
832 break;
833 case SCI_SMP_DEV_CMD:
834 status = common_complete_io(iport, idev, ireq);
835 if (status != SCI_SUCCESS)
836 break;
837 sci_change_state(sm, SCI_SMP_DEV_IDLE);
838 break;
839 case SCI_DEV_STOPPING:
840 status = common_complete_io(iport, idev, ireq);
841 if (status != SCI_SUCCESS)
842 break;
843
844 if (idev->started_request_count == 0)
845 sci_remote_node_context_destruct(&idev->rnc,
846 rnc_destruct_done,
847 idev);
848 break;
849 }
850
851 if (status != SCI_SUCCESS)
852 dev_err(scirdev_to_dev(idev),
853 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
854 "could not complete\n", __func__, iport,
855 idev, ireq, status);
856 else
857 isci_put_device(idev);
858
859 return status;
860 }
861
sci_remote_device_continue_request(void * dev)862 static void sci_remote_device_continue_request(void *dev)
863 {
864 struct isci_remote_device *idev = dev;
865
866 /* we need to check if this request is still valid to continue. */
867 if (idev->working_request)
868 sci_controller_continue_io(idev->working_request);
869 }
870
sci_remote_device_start_task(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)871 enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
872 struct isci_remote_device *idev,
873 struct isci_request *ireq)
874 {
875 struct sci_base_state_machine *sm = &idev->sm;
876 enum sci_remote_device_states state = sm->current_state_id;
877 struct isci_port *iport = idev->owning_port;
878 enum sci_status status;
879
880 switch (state) {
881 case SCI_DEV_INITIAL:
882 case SCI_DEV_STOPPED:
883 case SCI_DEV_STARTING:
884 case SCI_SMP_DEV_IDLE:
885 case SCI_SMP_DEV_CMD:
886 case SCI_DEV_STOPPING:
887 case SCI_DEV_FAILED:
888 case SCI_DEV_RESETTING:
889 case SCI_DEV_FINAL:
890 default:
891 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
892 __func__, dev_state_name(state));
893 return SCI_FAILURE_INVALID_STATE;
894 case SCI_STP_DEV_IDLE:
895 case SCI_STP_DEV_CMD:
896 case SCI_STP_DEV_NCQ:
897 case SCI_STP_DEV_NCQ_ERROR:
898 case SCI_STP_DEV_AWAIT_RESET:
899 status = sci_port_start_io(iport, idev, ireq);
900 if (status != SCI_SUCCESS)
901 return status;
902
903 status = sci_request_start(ireq);
904 if (status != SCI_SUCCESS)
905 goto out;
906
907 /* Note: If the remote device state is not IDLE this will
908 * replace the request that probably resulted in the task
909 * management request.
910 */
911 idev->working_request = ireq;
912 sci_change_state(sm, SCI_STP_DEV_CMD);
913
914 /* The remote node context must cleanup the TCi to NCQ mapping
915 * table. The only way to do this correctly is to either write
916 * to the TLCR register or to invalidate and repost the RNC. In
917 * either case the remote node context state machine will take
918 * the correct action when the remote node context is suspended
919 * and later resumed.
920 */
921 sci_remote_device_suspend(idev,
922 SCI_SW_SUSPEND_LINKHANG_DETECT);
923
924 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
925 sci_remote_device_continue_request, idev);
926
927 out:
928 sci_remote_device_start_request(idev, ireq, status);
929 /* We need to let the controller start request handler know that
930 * it can't post TC yet. We will provide a callback function to
931 * post TC when RNC gets resumed.
932 */
933 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
934 case SCI_DEV_READY:
935 status = sci_port_start_io(iport, idev, ireq);
936 if (status != SCI_SUCCESS)
937 return status;
938
939 /* Resume the RNC as needed: */
940 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
941 NULL, NULL);
942 if (status != SCI_SUCCESS)
943 break;
944
945 status = sci_request_start(ireq);
946 break;
947 }
948 sci_remote_device_start_request(idev, ireq, status);
949
950 return status;
951 }
952
sci_remote_device_post_request(struct isci_remote_device * idev,u32 request)953 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
954 {
955 struct isci_port *iport = idev->owning_port;
956 u32 context;
957
958 context = request |
959 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
960 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
961 idev->rnc.remote_node_index;
962
963 sci_controller_post_request(iport->owning_controller, context);
964 }
965
966 /* called once the remote node context has transisitioned to a
967 * ready state. This is the indication that the remote device object can also
968 * transition to ready.
969 */
remote_device_resume_done(void * _dev)970 static void remote_device_resume_done(void *_dev)
971 {
972 struct isci_remote_device *idev = _dev;
973
974 if (is_remote_device_ready(idev))
975 return;
976
977 /* go 'ready' if we are not already in a ready state */
978 sci_change_state(&idev->sm, SCI_DEV_READY);
979 }
980
sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void * _dev)981 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
982 {
983 struct isci_remote_device *idev = _dev;
984 struct isci_host *ihost = idev->owning_port->owning_controller;
985
986 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
987 * As a result, avoid sending the ready notification.
988 */
989 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
990 isci_remote_device_ready(ihost, idev);
991 }
992
sci_remote_device_initial_state_enter(struct sci_base_state_machine * sm)993 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
994 {
995 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
996
997 /* Initial state is a transitional state to the stopped state */
998 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
999 }
1000
1001 /**
1002 * sci_remote_device_destruct() - free remote node context and destruct
1003 * @remote_device: This parameter specifies the remote device to be destructed.
1004 *
1005 * Remote device objects are a limited resource. As such, they must be
1006 * protected. Thus calls to construct and destruct are mutually exclusive and
1007 * non-reentrant. The return value shall indicate if the device was
1008 * successfully destructed or if some failure occurred. enum sci_status This value
1009 * is returned if the device is successfully destructed.
1010 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
1011 * device isn't valid (e.g. it's already been destoryed, the handle isn't
1012 * valid, etc.).
1013 */
sci_remote_device_destruct(struct isci_remote_device * idev)1014 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1015 {
1016 struct sci_base_state_machine *sm = &idev->sm;
1017 enum sci_remote_device_states state = sm->current_state_id;
1018 struct isci_host *ihost;
1019
1020 if (state != SCI_DEV_STOPPED) {
1021 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1022 __func__, dev_state_name(state));
1023 return SCI_FAILURE_INVALID_STATE;
1024 }
1025
1026 ihost = idev->owning_port->owning_controller;
1027 sci_controller_free_remote_node_context(ihost, idev,
1028 idev->rnc.remote_node_index);
1029 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1030 sci_change_state(sm, SCI_DEV_FINAL);
1031
1032 return SCI_SUCCESS;
1033 }
1034
1035 /**
1036 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1037 * @ihost: This parameter specifies the isci host object.
1038 * @idev: This parameter specifies the remote device to be freed.
1039 *
1040 */
isci_remote_device_deconstruct(struct isci_host * ihost,struct isci_remote_device * idev)1041 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1042 {
1043 dev_dbg(&ihost->pdev->dev,
1044 "%s: isci_device = %p\n", __func__, idev);
1045
1046 /* There should not be any outstanding io's. All paths to
1047 * here should go through isci_remote_device_nuke_requests.
1048 * If we hit this condition, we will need a way to complete
1049 * io requests in process */
1050 BUG_ON(idev->started_request_count > 0);
1051
1052 sci_remote_device_destruct(idev);
1053 list_del_init(&idev->node);
1054 isci_put_device(idev);
1055 }
1056
sci_remote_device_stopped_state_enter(struct sci_base_state_machine * sm)1057 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1058 {
1059 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1060 struct isci_host *ihost = idev->owning_port->owning_controller;
1061 u32 prev_state;
1062
1063 /* If we are entering from the stopping state let the SCI User know that
1064 * the stop operation has completed.
1065 */
1066 prev_state = idev->sm.previous_state_id;
1067 if (prev_state == SCI_DEV_STOPPING)
1068 isci_remote_device_deconstruct(ihost, idev);
1069
1070 sci_controller_remote_device_stopped(ihost, idev);
1071 }
1072
sci_remote_device_starting_state_enter(struct sci_base_state_machine * sm)1073 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1074 {
1075 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1076 struct isci_host *ihost = idev->owning_port->owning_controller;
1077
1078 isci_remote_device_not_ready(ihost, idev,
1079 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1080 }
1081
sci_remote_device_ready_state_enter(struct sci_base_state_machine * sm)1082 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1083 {
1084 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1085 struct isci_host *ihost = idev->owning_port->owning_controller;
1086 struct domain_device *dev = idev->domain_dev;
1087
1088 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090 } else if (dev_is_expander(dev->dev_type)) {
1091 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1092 } else
1093 isci_remote_device_ready(ihost, idev);
1094 }
1095
sci_remote_device_ready_state_exit(struct sci_base_state_machine * sm)1096 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1097 {
1098 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099 struct domain_device *dev = idev->domain_dev;
1100
1101 if (dev->dev_type == SAS_END_DEVICE) {
1102 struct isci_host *ihost = idev->owning_port->owning_controller;
1103
1104 isci_remote_device_not_ready(ihost, idev,
1105 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1106 }
1107 }
1108
sci_remote_device_resetting_state_enter(struct sci_base_state_machine * sm)1109 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1110 {
1111 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112 struct isci_host *ihost = idev->owning_port->owning_controller;
1113
1114 dev_dbg(&ihost->pdev->dev,
1115 "%s: isci_device = %p\n", __func__, idev);
1116
1117 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1118 }
1119
sci_remote_device_resetting_state_exit(struct sci_base_state_machine * sm)1120 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1121 {
1122 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123 struct isci_host *ihost = idev->owning_port->owning_controller;
1124
1125 dev_dbg(&ihost->pdev->dev,
1126 "%s: isci_device = %p\n", __func__, idev);
1127
1128 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1129 }
1130
sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1131 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1132 {
1133 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1134
1135 idev->working_request = NULL;
1136 if (sci_remote_node_context_is_ready(&idev->rnc)) {
1137 /*
1138 * Since the RNC is ready, it's alright to finish completion
1139 * processing (e.g. signal the remote device is ready). */
1140 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1141 } else {
1142 sci_remote_node_context_resume(&idev->rnc,
1143 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1144 idev);
1145 }
1146 }
1147
sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1148 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1149 {
1150 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1151 struct isci_host *ihost = idev->owning_port->owning_controller;
1152
1153 BUG_ON(idev->working_request == NULL);
1154
1155 isci_remote_device_not_ready(ihost, idev,
1156 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1157 }
1158
sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine * sm)1159 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1160 {
1161 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1162 struct isci_host *ihost = idev->owning_port->owning_controller;
1163
1164 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1165 isci_remote_device_not_ready(ihost, idev,
1166 idev->not_ready_reason);
1167 }
1168
sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine * sm)1169 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1170 {
1171 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1172 struct isci_host *ihost = idev->owning_port->owning_controller;
1173
1174 isci_remote_device_ready(ihost, idev);
1175 }
1176
sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine * sm)1177 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1178 {
1179 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1180 struct isci_host *ihost = idev->owning_port->owning_controller;
1181
1182 BUG_ON(idev->working_request == NULL);
1183
1184 isci_remote_device_not_ready(ihost, idev,
1185 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1186 }
1187
sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine * sm)1188 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1189 {
1190 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1191
1192 idev->working_request = NULL;
1193 }
1194
1195 static const struct sci_base_state sci_remote_device_state_table[] = {
1196 [SCI_DEV_INITIAL] = {
1197 .enter_state = sci_remote_device_initial_state_enter,
1198 },
1199 [SCI_DEV_STOPPED] = {
1200 .enter_state = sci_remote_device_stopped_state_enter,
1201 },
1202 [SCI_DEV_STARTING] = {
1203 .enter_state = sci_remote_device_starting_state_enter,
1204 },
1205 [SCI_DEV_READY] = {
1206 .enter_state = sci_remote_device_ready_state_enter,
1207 .exit_state = sci_remote_device_ready_state_exit
1208 },
1209 [SCI_STP_DEV_IDLE] = {
1210 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1211 },
1212 [SCI_STP_DEV_CMD] = {
1213 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1214 },
1215 [SCI_STP_DEV_NCQ] = { },
1216 [SCI_STP_DEV_NCQ_ERROR] = {
1217 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1218 },
1219 [SCI_STP_DEV_ATAPI_ERROR] = { },
1220 [SCI_STP_DEV_AWAIT_RESET] = { },
1221 [SCI_SMP_DEV_IDLE] = {
1222 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1223 },
1224 [SCI_SMP_DEV_CMD] = {
1225 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1226 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1227 },
1228 [SCI_DEV_STOPPING] = { },
1229 [SCI_DEV_FAILED] = { },
1230 [SCI_DEV_RESETTING] = {
1231 .enter_state = sci_remote_device_resetting_state_enter,
1232 .exit_state = sci_remote_device_resetting_state_exit
1233 },
1234 [SCI_DEV_FINAL] = { },
1235 };
1236
1237 /**
1238 * sci_remote_device_construct() - common construction
1239 * @sci_port: SAS/SATA port through which this device is accessed.
1240 * @sci_dev: remote device to construct
1241 *
1242 * This routine just performs benign initialization and does not
1243 * allocate the remote_node_context which is left to
1244 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1245 * frees the remote_node_context(s) for the device.
1246 */
sci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1247 static void sci_remote_device_construct(struct isci_port *iport,
1248 struct isci_remote_device *idev)
1249 {
1250 idev->owning_port = iport;
1251 idev->started_request_count = 0;
1252
1253 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1254
1255 sci_remote_node_context_construct(&idev->rnc,
1256 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1257 }
1258
1259 /**
1260 * sci_remote_device_da_construct() - construct direct attached device.
1261 *
1262 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1263 * the device is known to the SCI Core since it is contained in the
1264 * sci_phy object. Remote node context(s) is/are a global resource
1265 * allocated by this routine, freed by sci_remote_device_destruct().
1266 *
1267 * Returns:
1268 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1269 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1270 * sata-only controller instance.
1271 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1272 */
sci_remote_device_da_construct(struct isci_port * iport,struct isci_remote_device * idev)1273 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1274 struct isci_remote_device *idev)
1275 {
1276 enum sci_status status;
1277 struct sci_port_properties properties;
1278
1279 sci_remote_device_construct(iport, idev);
1280
1281 sci_port_get_properties(iport, &properties);
1282 /* Get accurate port width from port's phy mask for a DA device. */
1283 idev->device_port_width = hweight32(properties.phy_mask);
1284
1285 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1286 idev,
1287 &idev->rnc.remote_node_index);
1288
1289 if (status != SCI_SUCCESS)
1290 return status;
1291
1292 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1293
1294 return SCI_SUCCESS;
1295 }
1296
1297 /**
1298 * sci_remote_device_ea_construct() - construct expander attached device
1299 *
1300 * Remote node context(s) is/are a global resource allocated by this
1301 * routine, freed by sci_remote_device_destruct().
1302 *
1303 * Returns:
1304 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1305 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1306 * sata-only controller instance.
1307 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1308 */
sci_remote_device_ea_construct(struct isci_port * iport,struct isci_remote_device * idev)1309 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1310 struct isci_remote_device *idev)
1311 {
1312 struct domain_device *dev = idev->domain_dev;
1313 enum sci_status status;
1314
1315 sci_remote_device_construct(iport, idev);
1316
1317 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1318 idev,
1319 &idev->rnc.remote_node_index);
1320 if (status != SCI_SUCCESS)
1321 return status;
1322
1323 /* For SAS-2 the physical link rate is actually a logical link
1324 * rate that incorporates multiplexing. The SCU doesn't
1325 * incorporate multiplexing and for the purposes of the
1326 * connection the logical link rate is that same as the
1327 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1328 * one another, so this code works for both situations.
1329 */
1330 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1331 dev->linkrate);
1332
1333 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1334 idev->device_port_width = 1;
1335
1336 return SCI_SUCCESS;
1337 }
1338
sci_remote_device_resume(struct isci_remote_device * idev,scics_sds_remote_node_context_callback cb_fn,void * cb_p)1339 enum sci_status sci_remote_device_resume(
1340 struct isci_remote_device *idev,
1341 scics_sds_remote_node_context_callback cb_fn,
1342 void *cb_p)
1343 {
1344 enum sci_status status;
1345
1346 status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347 if (status != SCI_SUCCESS)
1348 dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349 __func__, status);
1350 return status;
1351 }
1352
isci_remote_device_resume_from_abort_complete(void * cbparam)1353 static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354 {
1355 struct isci_remote_device *idev = cbparam;
1356 struct isci_host *ihost = idev->owning_port->owning_controller;
1357 scics_sds_remote_node_context_callback abort_resume_cb =
1358 idev->abort_resume_cb;
1359
1360 dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361 __func__, abort_resume_cb);
1362
1363 if (abort_resume_cb != NULL) {
1364 idev->abort_resume_cb = NULL;
1365 abort_resume_cb(idev->abort_resume_cbparam);
1366 }
1367 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1368 wake_up(&ihost->eventq);
1369 }
1370
isci_remote_device_test_resume_done(struct isci_host * ihost,struct isci_remote_device * idev)1371 static bool isci_remote_device_test_resume_done(
1372 struct isci_host *ihost,
1373 struct isci_remote_device *idev)
1374 {
1375 unsigned long flags;
1376 bool done;
1377
1378 spin_lock_irqsave(&ihost->scic_lock, flags);
1379 done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1380 || test_bit(IDEV_STOP_PENDING, &idev->flags)
1381 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383
1384 return done;
1385 }
1386
isci_remote_device_wait_for_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1387 void isci_remote_device_wait_for_resume_from_abort(
1388 struct isci_host *ihost,
1389 struct isci_remote_device *idev)
1390 {
1391 dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392 __func__, idev);
1393
1394 #define MAX_RESUME_MSECS 10000
1395 if (!wait_event_timeout(ihost->eventq,
1396 isci_remote_device_test_resume_done(ihost, idev),
1397 msecs_to_jiffies(MAX_RESUME_MSECS))) {
1398
1399 dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400 "resume: %p\n", __func__, idev);
1401 }
1402 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1403
1404 dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405 __func__, idev);
1406 }
1407
isci_remote_device_resume_from_abort(struct isci_host * ihost,struct isci_remote_device * idev)1408 enum sci_status isci_remote_device_resume_from_abort(
1409 struct isci_host *ihost,
1410 struct isci_remote_device *idev)
1411 {
1412 unsigned long flags;
1413 enum sci_status status = SCI_SUCCESS;
1414 int destroyed;
1415
1416 spin_lock_irqsave(&ihost->scic_lock, flags);
1417 /* Preserve any current resume callbacks, for instance from other
1418 * resumptions.
1419 */
1420 idev->abort_resume_cb = idev->rnc.user_callback;
1421 idev->abort_resume_cbparam = idev->rnc.user_cookie;
1422 set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1423 clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1424 destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425 if (!destroyed)
1426 status = sci_remote_device_resume(
1427 idev, isci_remote_device_resume_from_abort_complete,
1428 idev);
1429 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430 if (!destroyed && (status == SCI_SUCCESS))
1431 isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1432 else
1433 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1434
1435 return status;
1436 }
1437
1438 /**
1439 * sci_remote_device_start() - This method will start the supplied remote
1440 * device. This method enables normal IO requests to flow through to the
1441 * remote device.
1442 * @remote_device: This parameter specifies the device to be started.
1443 * @timeout: This parameter specifies the number of milliseconds in which the
1444 * start operation should complete.
1445 *
1446 * An indication of whether the device was successfully started. SCI_SUCCESS
1447 * This value is returned if the device was successfully started.
1448 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1449 * the device when there have been no phys added to it.
1450 */
sci_remote_device_start(struct isci_remote_device * idev,u32 timeout)1451 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1452 u32 timeout)
1453 {
1454 struct sci_base_state_machine *sm = &idev->sm;
1455 enum sci_remote_device_states state = sm->current_state_id;
1456 enum sci_status status;
1457
1458 if (state != SCI_DEV_STOPPED) {
1459 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1460 __func__, dev_state_name(state));
1461 return SCI_FAILURE_INVALID_STATE;
1462 }
1463
1464 status = sci_remote_device_resume(idev, remote_device_resume_done,
1465 idev);
1466 if (status != SCI_SUCCESS)
1467 return status;
1468
1469 sci_change_state(sm, SCI_DEV_STARTING);
1470
1471 return SCI_SUCCESS;
1472 }
1473
isci_remote_device_construct(struct isci_port * iport,struct isci_remote_device * idev)1474 static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1475 struct isci_remote_device *idev)
1476 {
1477 struct isci_host *ihost = iport->isci_host;
1478 struct domain_device *dev = idev->domain_dev;
1479 enum sci_status status;
1480
1481 if (dev->parent && dev_is_expander(dev->parent->dev_type))
1482 status = sci_remote_device_ea_construct(iport, idev);
1483 else
1484 status = sci_remote_device_da_construct(iport, idev);
1485
1486 if (status != SCI_SUCCESS) {
1487 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1488 __func__, status);
1489
1490 return status;
1491 }
1492
1493 /* start the device. */
1494 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1495
1496 if (status != SCI_SUCCESS)
1497 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1498 status);
1499
1500 return status;
1501 }
1502
1503 /**
1504 * This function builds the isci_remote_device when a libsas dev_found message
1505 * is received.
1506 * @isci_host: This parameter specifies the isci host object.
1507 * @port: This parameter specifies the isci_port connected to this device.
1508 *
1509 * pointer to new isci_remote_device.
1510 */
1511 static struct isci_remote_device *
isci_remote_device_alloc(struct isci_host * ihost,struct isci_port * iport)1512 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1513 {
1514 struct isci_remote_device *idev;
1515 int i;
1516
1517 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1518 idev = &ihost->devices[i];
1519 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1520 break;
1521 }
1522
1523 if (i >= SCI_MAX_REMOTE_DEVICES) {
1524 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1525 return NULL;
1526 }
1527 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1528 return NULL;
1529
1530 return idev;
1531 }
1532
isci_remote_device_release(struct kref * kref)1533 void isci_remote_device_release(struct kref *kref)
1534 {
1535 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1536 struct isci_host *ihost = idev->isci_port->isci_host;
1537
1538 idev->domain_dev = NULL;
1539 idev->isci_port = NULL;
1540 clear_bit(IDEV_START_PENDING, &idev->flags);
1541 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1542 clear_bit(IDEV_IO_READY, &idev->flags);
1543 clear_bit(IDEV_GONE, &idev->flags);
1544 smp_mb__before_atomic();
1545 clear_bit(IDEV_ALLOCATED, &idev->flags);
1546 wake_up(&ihost->eventq);
1547 }
1548
1549 /**
1550 * isci_remote_device_stop() - This function is called internally to stop the
1551 * remote device.
1552 * @isci_host: This parameter specifies the isci host object.
1553 * @isci_device: This parameter specifies the remote device.
1554 *
1555 * The status of the ihost request to stop.
1556 */
isci_remote_device_stop(struct isci_host * ihost,struct isci_remote_device * idev)1557 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1558 {
1559 enum sci_status status;
1560 unsigned long flags;
1561
1562 dev_dbg(&ihost->pdev->dev,
1563 "%s: isci_device = %p\n", __func__, idev);
1564
1565 spin_lock_irqsave(&ihost->scic_lock, flags);
1566 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1567 set_bit(IDEV_GONE, &idev->flags);
1568
1569 set_bit(IDEV_STOP_PENDING, &idev->flags);
1570 status = sci_remote_device_stop(idev, 50);
1571 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1572
1573 /* Wait for the stop complete callback. */
1574 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1575 /* nothing to wait for */;
1576 else
1577 wait_for_device_stop(ihost, idev);
1578
1579 dev_dbg(&ihost->pdev->dev,
1580 "%s: isci_device = %p, waiting done.\n", __func__, idev);
1581
1582 return status;
1583 }
1584
1585 /**
1586 * isci_remote_device_gone() - This function is called by libsas when a domain
1587 * device is removed.
1588 * @domain_device: This parameter specifies the libsas domain device.
1589 *
1590 */
isci_remote_device_gone(struct domain_device * dev)1591 void isci_remote_device_gone(struct domain_device *dev)
1592 {
1593 struct isci_host *ihost = dev_to_ihost(dev);
1594 struct isci_remote_device *idev = dev->lldd_dev;
1595
1596 dev_dbg(&ihost->pdev->dev,
1597 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1598 __func__, dev, idev, idev->isci_port);
1599
1600 isci_remote_device_stop(ihost, idev);
1601 }
1602
1603
1604 /**
1605 * isci_remote_device_found() - This function is called by libsas when a remote
1606 * device is discovered. A remote device object is created and started. the
1607 * function then sleeps until the sci core device started message is
1608 * received.
1609 * @domain_device: This parameter specifies the libsas domain device.
1610 *
1611 * status, zero indicates success.
1612 */
isci_remote_device_found(struct domain_device * dev)1613 int isci_remote_device_found(struct domain_device *dev)
1614 {
1615 struct isci_host *isci_host = dev_to_ihost(dev);
1616 struct isci_port *isci_port = dev->port->lldd_port;
1617 struct isci_remote_device *isci_device;
1618 enum sci_status status;
1619
1620 dev_dbg(&isci_host->pdev->dev,
1621 "%s: domain_device = %p\n", __func__, dev);
1622
1623 if (!isci_port)
1624 return -ENODEV;
1625
1626 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1627 if (!isci_device)
1628 return -ENODEV;
1629
1630 kref_init(&isci_device->kref);
1631 INIT_LIST_HEAD(&isci_device->node);
1632
1633 spin_lock_irq(&isci_host->scic_lock);
1634 isci_device->domain_dev = dev;
1635 isci_device->isci_port = isci_port;
1636 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1637
1638 set_bit(IDEV_START_PENDING, &isci_device->flags);
1639 status = isci_remote_device_construct(isci_port, isci_device);
1640
1641 dev_dbg(&isci_host->pdev->dev,
1642 "%s: isci_device = %p\n",
1643 __func__, isci_device);
1644
1645 if (status == SCI_SUCCESS) {
1646 /* device came up, advertise it to the world */
1647 dev->lldd_dev = isci_device;
1648 } else
1649 isci_put_device(isci_device);
1650 spin_unlock_irq(&isci_host->scic_lock);
1651
1652 /* wait for the device ready callback. */
1653 wait_for_device_start(isci_host, isci_device);
1654
1655 return status == SCI_SUCCESS ? 0 : -ENODEV;
1656 }
1657
isci_remote_device_suspend_terminate(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)1658 enum sci_status isci_remote_device_suspend_terminate(
1659 struct isci_host *ihost,
1660 struct isci_remote_device *idev,
1661 struct isci_request *ireq)
1662 {
1663 unsigned long flags;
1664 enum sci_status status;
1665
1666 /* Put the device into suspension. */
1667 spin_lock_irqsave(&ihost->scic_lock, flags);
1668 set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1669 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1670 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671
1672 /* Terminate and wait for the completions. */
1673 status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674 if (status != SCI_SUCCESS)
1675 dev_dbg(&ihost->pdev->dev,
1676 "%s: isci_remote_device_terminate_requests(%p) "
1677 "returned %d!\n",
1678 __func__, idev, status);
1679
1680 /* NOTE: RNC resumption is left to the caller! */
1681 return status;
1682 }
1683
isci_remote_device_is_safe_to_abort(struct isci_remote_device * idev)1684 int isci_remote_device_is_safe_to_abort(
1685 struct isci_remote_device *idev)
1686 {
1687 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1688 }
1689
sci_remote_device_abort_requests_pending_abort(struct isci_remote_device * idev)1690 enum sci_status sci_remote_device_abort_requests_pending_abort(
1691 struct isci_remote_device *idev)
1692 {
1693 return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694 }
1695
isci_remote_device_reset_complete(struct isci_host * ihost,struct isci_remote_device * idev)1696 enum sci_status isci_remote_device_reset_complete(
1697 struct isci_host *ihost,
1698 struct isci_remote_device *idev)
1699 {
1700 unsigned long flags;
1701 enum sci_status status;
1702
1703 spin_lock_irqsave(&ihost->scic_lock, flags);
1704 status = sci_remote_device_reset_complete(idev);
1705 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706
1707 return status;
1708 }
1709
isci_dev_set_hang_detection_timeout(struct isci_remote_device * idev,u32 timeout)1710 void isci_dev_set_hang_detection_timeout(
1711 struct isci_remote_device *idev,
1712 u32 timeout)
1713 {
1714 if (dev_is_sata(idev->domain_dev)) {
1715 if (timeout) {
1716 if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1717 &idev->flags))
1718 return; /* Already enabled. */
1719 } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1720 &idev->flags))
1721 return; /* Not enabled. */
1722
1723 sci_port_set_hang_detection_timeout(idev->owning_port,
1724 timeout);
1725 }
1726 }
1727