1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corp. 2002, 2015
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20
21 struct kmem_cache *zfcp_fsf_qtcb_cache;
22
23 static bool ber_stop = true;
24 module_param(ber_stop, bool, 0600);
25 MODULE_PARM_DESC(ber_stop,
26 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
27
zfcp_fsf_request_timeout_handler(unsigned long data)28 static void zfcp_fsf_request_timeout_handler(unsigned long data)
29 {
30 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
31 zfcp_qdio_siosl(adapter);
32 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
33 "fsrth_1");
34 }
35
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)36 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
37 unsigned long timeout)
38 {
39 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
40 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
41 fsf_req->timer.expires = jiffies + timeout;
42 add_timer(&fsf_req->timer);
43 }
44
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)45 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
46 {
47 BUG_ON(!fsf_req->erp_action);
48 fsf_req->timer.function = zfcp_erp_timeout_handler;
49 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
50 fsf_req->timer.expires = jiffies + 30 * HZ;
51 add_timer(&fsf_req->timer);
52 }
53
54 /* association between FSF command and FSF QTCB type */
55 static u32 fsf_qtcb_type[] = {
56 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
57 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
59 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
60 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
61 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
62 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
63 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
64 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
65 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
66 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
67 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
68 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
69 };
70
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)71 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
72 {
73 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
74 "operational because of an unsupported FC class\n");
75 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
76 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
77 }
78
79 /**
80 * zfcp_fsf_req_free - free memory used by fsf request
81 * @fsf_req: pointer to struct zfcp_fsf_req
82 */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)83 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
84 {
85 if (likely(req->pool)) {
86 if (likely(req->qtcb))
87 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
88 mempool_free(req, req->pool);
89 return;
90 }
91
92 if (likely(req->qtcb))
93 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
94 kfree(req);
95 }
96
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)97 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
98 {
99 unsigned long flags;
100 struct fsf_status_read_buffer *sr_buf = req->data;
101 struct zfcp_adapter *adapter = req->adapter;
102 struct zfcp_port *port;
103 int d_id = ntoh24(sr_buf->d_id);
104
105 read_lock_irqsave(&adapter->port_list_lock, flags);
106 list_for_each_entry(port, &adapter->port_list, list)
107 if (port->d_id == d_id) {
108 zfcp_erp_port_reopen(port, 0, "fssrpc1");
109 break;
110 }
111 read_unlock_irqrestore(&adapter->port_list_lock, flags);
112 }
113
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)114 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
115 struct fsf_link_down_info *link_down)
116 {
117 struct zfcp_adapter *adapter = req->adapter;
118
119 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
120 return;
121
122 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
123
124 zfcp_scsi_schedule_rports_block(adapter);
125
126 if (!link_down)
127 goto out;
128
129 switch (link_down->error_code) {
130 case FSF_PSQ_LINK_NO_LIGHT:
131 dev_warn(&req->adapter->ccw_device->dev,
132 "There is no light signal from the local "
133 "fibre channel cable\n");
134 break;
135 case FSF_PSQ_LINK_WRAP_PLUG:
136 dev_warn(&req->adapter->ccw_device->dev,
137 "There is a wrap plug instead of a fibre "
138 "channel cable\n");
139 break;
140 case FSF_PSQ_LINK_NO_FCP:
141 dev_warn(&req->adapter->ccw_device->dev,
142 "The adjacent fibre channel node does not "
143 "support FCP\n");
144 break;
145 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
146 dev_warn(&req->adapter->ccw_device->dev,
147 "The FCP device is suspended because of a "
148 "firmware update\n");
149 break;
150 case FSF_PSQ_LINK_INVALID_WWPN:
151 dev_warn(&req->adapter->ccw_device->dev,
152 "The FCP device detected a WWPN that is "
153 "duplicate or not valid\n");
154 break;
155 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
156 dev_warn(&req->adapter->ccw_device->dev,
157 "The fibre channel fabric does not support NPIV\n");
158 break;
159 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
160 dev_warn(&req->adapter->ccw_device->dev,
161 "The FCP adapter cannot support more NPIV ports\n");
162 break;
163 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
164 dev_warn(&req->adapter->ccw_device->dev,
165 "The adjacent switch cannot support "
166 "more NPIV ports\n");
167 break;
168 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
169 dev_warn(&req->adapter->ccw_device->dev,
170 "The FCP adapter could not log in to the "
171 "fibre channel fabric\n");
172 break;
173 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
174 dev_warn(&req->adapter->ccw_device->dev,
175 "The WWPN assignment file on the FCP adapter "
176 "has been damaged\n");
177 break;
178 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
179 dev_warn(&req->adapter->ccw_device->dev,
180 "The mode table on the FCP adapter "
181 "has been damaged\n");
182 break;
183 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
184 dev_warn(&req->adapter->ccw_device->dev,
185 "All NPIV ports on the FCP adapter have "
186 "been assigned\n");
187 break;
188 default:
189 dev_warn(&req->adapter->ccw_device->dev,
190 "The link between the FCP adapter and "
191 "the FC fabric is down\n");
192 }
193 out:
194 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
195 }
196
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)197 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
198 {
199 struct fsf_status_read_buffer *sr_buf = req->data;
200 struct fsf_link_down_info *ldi =
201 (struct fsf_link_down_info *) &sr_buf->payload;
202
203 switch (sr_buf->status_subtype) {
204 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
205 zfcp_fsf_link_down_info_eval(req, ldi);
206 break;
207 case FSF_STATUS_READ_SUB_FDISC_FAILED:
208 zfcp_fsf_link_down_info_eval(req, ldi);
209 break;
210 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
211 zfcp_fsf_link_down_info_eval(req, NULL);
212 }
213 }
214
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)215 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
216 {
217 struct zfcp_adapter *adapter = req->adapter;
218 struct fsf_status_read_buffer *sr_buf = req->data;
219
220 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
221 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
222 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
223 zfcp_fsf_req_free(req);
224 return;
225 }
226
227 zfcp_dbf_hba_fsf_uss("fssrh_4", req);
228
229 switch (sr_buf->status_type) {
230 case FSF_STATUS_READ_PORT_CLOSED:
231 zfcp_fsf_status_read_port_closed(req);
232 break;
233 case FSF_STATUS_READ_INCOMING_ELS:
234 zfcp_fc_incoming_els(req);
235 break;
236 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
237 break;
238 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
239 zfcp_dbf_hba_bit_err("fssrh_3", req);
240 if (ber_stop) {
241 dev_warn(&adapter->ccw_device->dev,
242 "All paths over this FCP device are disused because of excessive bit errors\n");
243 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
244 } else {
245 dev_warn(&adapter->ccw_device->dev,
246 "The error threshold for checksum statistics has been exceeded\n");
247 }
248 break;
249 case FSF_STATUS_READ_LINK_DOWN:
250 zfcp_fsf_status_read_link_down(req);
251 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
252 break;
253 case FSF_STATUS_READ_LINK_UP:
254 dev_info(&adapter->ccw_device->dev,
255 "The local link has been restored\n");
256 /* All ports should be marked as ready to run again */
257 zfcp_erp_set_adapter_status(adapter,
258 ZFCP_STATUS_COMMON_RUNNING);
259 zfcp_erp_adapter_reopen(adapter,
260 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
261 ZFCP_STATUS_COMMON_ERP_FAILED,
262 "fssrh_2");
263 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
264
265 break;
266 case FSF_STATUS_READ_NOTIFICATION_LOST:
267 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
268 zfcp_fc_conditional_port_scan(adapter);
269 break;
270 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
271 adapter->adapter_features = sr_buf->payload.word[0];
272 break;
273 }
274
275 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
276 zfcp_fsf_req_free(req);
277
278 atomic_inc(&adapter->stat_miss);
279 queue_work(adapter->work_queue, &adapter->stat_work);
280 }
281
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)282 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
283 {
284 switch (req->qtcb->header.fsf_status_qual.word[0]) {
285 case FSF_SQ_FCP_RSP_AVAILABLE:
286 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
287 case FSF_SQ_NO_RETRY_POSSIBLE:
288 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
289 return;
290 case FSF_SQ_COMMAND_ABORTED:
291 break;
292 case FSF_SQ_NO_RECOM:
293 dev_err(&req->adapter->ccw_device->dev,
294 "The FCP adapter reported a problem "
295 "that cannot be recovered\n");
296 zfcp_qdio_siosl(req->adapter);
297 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
298 break;
299 }
300 /* all non-return stats set FSFREQ_ERROR*/
301 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
302 }
303
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)304 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
305 {
306 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
307 return;
308
309 switch (req->qtcb->header.fsf_status) {
310 case FSF_UNKNOWN_COMMAND:
311 dev_err(&req->adapter->ccw_device->dev,
312 "The FCP adapter does not recognize the command 0x%x\n",
313 req->qtcb->header.fsf_command);
314 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
315 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
316 break;
317 case FSF_ADAPTER_STATUS_AVAILABLE:
318 zfcp_fsf_fsfstatus_qual_eval(req);
319 break;
320 }
321 }
322
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)323 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
324 {
325 struct zfcp_adapter *adapter = req->adapter;
326 struct fsf_qtcb *qtcb = req->qtcb;
327 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
328
329 zfcp_dbf_hba_fsf_response(req);
330
331 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
332 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
333 return;
334 }
335
336 switch (qtcb->prefix.prot_status) {
337 case FSF_PROT_GOOD:
338 case FSF_PROT_FSF_STATUS_PRESENTED:
339 return;
340 case FSF_PROT_QTCB_VERSION_ERROR:
341 dev_err(&adapter->ccw_device->dev,
342 "QTCB version 0x%x not supported by FCP adapter "
343 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
344 psq->word[0], psq->word[1]);
345 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
346 break;
347 case FSF_PROT_ERROR_STATE:
348 case FSF_PROT_SEQ_NUMB_ERROR:
349 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
350 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
351 break;
352 case FSF_PROT_UNSUPP_QTCB_TYPE:
353 dev_err(&adapter->ccw_device->dev,
354 "The QTCB type is not supported by the FCP adapter\n");
355 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
356 break;
357 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
358 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
359 &adapter->status);
360 break;
361 case FSF_PROT_DUPLICATE_REQUEST_ID:
362 dev_err(&adapter->ccw_device->dev,
363 "0x%Lx is an ambiguous request identifier\n",
364 (unsigned long long)qtcb->bottom.support.req_handle);
365 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
366 break;
367 case FSF_PROT_LINK_DOWN:
368 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
369 /* go through reopen to flush pending requests */
370 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
371 break;
372 case FSF_PROT_REEST_QUEUE:
373 /* All ports should be marked as ready to run again */
374 zfcp_erp_set_adapter_status(adapter,
375 ZFCP_STATUS_COMMON_RUNNING);
376 zfcp_erp_adapter_reopen(adapter,
377 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
378 ZFCP_STATUS_COMMON_ERP_FAILED,
379 "fspse_8");
380 break;
381 default:
382 dev_err(&adapter->ccw_device->dev,
383 "0x%x is not a valid transfer protocol status\n",
384 qtcb->prefix.prot_status);
385 zfcp_qdio_siosl(adapter);
386 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
387 }
388 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
389 }
390
391 /**
392 * zfcp_fsf_req_complete - process completion of a FSF request
393 * @fsf_req: The FSF request that has been completed.
394 *
395 * When a request has been completed either from the FCP adapter,
396 * or it has been dismissed due to a queue shutdown, this function
397 * is called to process the completion status and trigger further
398 * events related to the FSF request.
399 */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)400 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
401 {
402 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
403 zfcp_fsf_status_read_handler(req);
404 return;
405 }
406
407 del_timer(&req->timer);
408 zfcp_fsf_protstatus_eval(req);
409 zfcp_fsf_fsfstatus_eval(req);
410 req->handler(req);
411
412 if (req->erp_action)
413 zfcp_erp_notify(req->erp_action, 0);
414
415 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
416 zfcp_fsf_req_free(req);
417 else
418 complete(&req->completion);
419 }
420
421 /**
422 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
423 * @adapter: pointer to struct zfcp_adapter
424 *
425 * Never ever call this without shutting down the adapter first.
426 * Otherwise the adapter would continue using and corrupting s390 storage.
427 * Included BUG_ON() call to ensure this is done.
428 * ERP is supposed to be the only user of this function.
429 */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)430 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
431 {
432 struct zfcp_fsf_req *req, *tmp;
433 LIST_HEAD(remove_queue);
434
435 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
436 zfcp_reqlist_move(adapter->req_list, &remove_queue);
437
438 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
439 list_del(&req->list);
440 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
441 zfcp_fsf_req_complete(req);
442 }
443 }
444
445 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
446 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
447 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
448 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
449 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
450 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
451 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
452
zfcp_fsf_convert_portspeed(u32 fsf_speed)453 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
454 {
455 u32 fdmi_speed = 0;
456 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
457 fdmi_speed |= FC_PORTSPEED_1GBIT;
458 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
459 fdmi_speed |= FC_PORTSPEED_2GBIT;
460 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
461 fdmi_speed |= FC_PORTSPEED_4GBIT;
462 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
463 fdmi_speed |= FC_PORTSPEED_10GBIT;
464 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
465 fdmi_speed |= FC_PORTSPEED_8GBIT;
466 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
467 fdmi_speed |= FC_PORTSPEED_16GBIT;
468 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
469 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
470 return fdmi_speed;
471 }
472
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)473 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
474 {
475 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
476 struct zfcp_adapter *adapter = req->adapter;
477 struct Scsi_Host *shost = adapter->scsi_host;
478 struct fc_els_flogi *nsp, *plogi;
479
480 /* adjust pointers for missing command code */
481 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
482 - sizeof(u32));
483 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
484 - sizeof(u32));
485
486 if (req->data)
487 memcpy(req->data, bottom, sizeof(*bottom));
488
489 fc_host_port_name(shost) = nsp->fl_wwpn;
490 fc_host_node_name(shost) = nsp->fl_wwnn;
491 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
492
493 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
494 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
495 (u16)FSF_STATUS_READS_RECOM);
496
497 if (fc_host_permanent_port_name(shost) == -1)
498 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
499
500 zfcp_scsi_set_prot(adapter);
501
502 /* no error return above here, otherwise must fix call chains */
503 /* do not evaluate invalid fields */
504 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
505 return 0;
506
507 fc_host_port_id(shost) = ntoh24(bottom->s_id);
508 fc_host_speed(shost) =
509 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
510
511 adapter->hydra_version = bottom->adapter_type;
512
513 switch (bottom->fc_topology) {
514 case FSF_TOPO_P2P:
515 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
516 adapter->peer_wwpn = plogi->fl_wwpn;
517 adapter->peer_wwnn = plogi->fl_wwnn;
518 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
519 break;
520 case FSF_TOPO_FABRIC:
521 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
522 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
523 else
524 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
525 break;
526 case FSF_TOPO_AL:
527 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
528 /* fall through */
529 default:
530 dev_err(&adapter->ccw_device->dev,
531 "Unknown or unsupported arbitrated loop "
532 "fibre channel topology detected\n");
533 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
534 return -EIO;
535 }
536
537 return 0;
538 }
539
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)540 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
541 {
542 struct zfcp_adapter *adapter = req->adapter;
543 struct fsf_qtcb *qtcb = req->qtcb;
544 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
545 struct Scsi_Host *shost = adapter->scsi_host;
546
547 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
548 return;
549
550 adapter->fsf_lic_version = bottom->lic_version;
551 adapter->adapter_features = bottom->adapter_features;
552 adapter->connection_features = bottom->connection_features;
553 adapter->peer_wwpn = 0;
554 adapter->peer_wwnn = 0;
555 adapter->peer_d_id = 0;
556
557 switch (qtcb->header.fsf_status) {
558 case FSF_GOOD:
559 if (zfcp_fsf_exchange_config_evaluate(req))
560 return;
561
562 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
563 dev_err(&adapter->ccw_device->dev,
564 "FCP adapter maximum QTCB size (%d bytes) "
565 "is too small\n",
566 bottom->max_qtcb_size);
567 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
568 return;
569 }
570 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
571 &adapter->status);
572 break;
573 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
574 fc_host_node_name(shost) = 0;
575 fc_host_port_name(shost) = 0;
576 fc_host_port_id(shost) = 0;
577 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
578 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
579 adapter->hydra_version = 0;
580
581 /* avoids adapter shutdown to be able to recognize
582 * events such as LINK UP */
583 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
584 &adapter->status);
585 zfcp_fsf_link_down_info_eval(req,
586 &qtcb->header.fsf_status_qual.link_down_info);
587 if (zfcp_fsf_exchange_config_evaluate(req))
588 return;
589 break;
590 default:
591 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
592 return;
593 }
594
595 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
596 adapter->hardware_version = bottom->hardware_version;
597 memcpy(fc_host_serial_number(shost), bottom->serial_number,
598 min(FC_SERIAL_NUMBER_SIZE, 17));
599 EBCASC(fc_host_serial_number(shost),
600 min(FC_SERIAL_NUMBER_SIZE, 17));
601 }
602
603 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
604 dev_err(&adapter->ccw_device->dev,
605 "The FCP adapter only supports newer "
606 "control block versions\n");
607 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
608 return;
609 }
610 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
611 dev_err(&adapter->ccw_device->dev,
612 "The FCP adapter only supports older "
613 "control block versions\n");
614 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
615 }
616 }
617
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)618 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
619 {
620 struct zfcp_adapter *adapter = req->adapter;
621 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
622 struct Scsi_Host *shost = adapter->scsi_host;
623
624 if (req->data)
625 memcpy(req->data, bottom, sizeof(*bottom));
626
627 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
628 fc_host_permanent_port_name(shost) = bottom->wwpn;
629 } else
630 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
631 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
632 fc_host_supported_speeds(shost) =
633 zfcp_fsf_convert_portspeed(bottom->supported_speed);
634 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
635 FC_FC4_LIST_SIZE);
636 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
637 FC_FC4_LIST_SIZE);
638 }
639
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)640 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
641 {
642 struct fsf_qtcb *qtcb = req->qtcb;
643
644 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
645 return;
646
647 switch (qtcb->header.fsf_status) {
648 case FSF_GOOD:
649 zfcp_fsf_exchange_port_evaluate(req);
650 break;
651 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
652 zfcp_fsf_exchange_port_evaluate(req);
653 zfcp_fsf_link_down_info_eval(req,
654 &qtcb->header.fsf_status_qual.link_down_info);
655 break;
656 }
657 }
658
zfcp_fsf_alloc(mempool_t * pool)659 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
660 {
661 struct zfcp_fsf_req *req;
662
663 if (likely(pool))
664 req = mempool_alloc(pool, GFP_ATOMIC);
665 else
666 req = kmalloc(sizeof(*req), GFP_ATOMIC);
667
668 if (unlikely(!req))
669 return NULL;
670
671 memset(req, 0, sizeof(*req));
672 req->pool = pool;
673 return req;
674 }
675
zfcp_qtcb_alloc(mempool_t * pool)676 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
677 {
678 struct fsf_qtcb *qtcb;
679
680 if (likely(pool))
681 qtcb = mempool_alloc(pool, GFP_ATOMIC);
682 else
683 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
684
685 if (unlikely(!qtcb))
686 return NULL;
687
688 memset(qtcb, 0, sizeof(*qtcb));
689 return qtcb;
690 }
691
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)692 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
693 u32 fsf_cmd, u8 sbtype,
694 mempool_t *pool)
695 {
696 struct zfcp_adapter *adapter = qdio->adapter;
697 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
698
699 if (unlikely(!req))
700 return ERR_PTR(-ENOMEM);
701
702 if (adapter->req_no == 0)
703 adapter->req_no++;
704
705 INIT_LIST_HEAD(&req->list);
706 init_timer(&req->timer);
707 init_completion(&req->completion);
708
709 req->adapter = adapter;
710 req->fsf_command = fsf_cmd;
711 req->req_id = adapter->req_no;
712
713 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
714 if (likely(pool))
715 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
716 else
717 req->qtcb = zfcp_qtcb_alloc(NULL);
718
719 if (unlikely(!req->qtcb)) {
720 zfcp_fsf_req_free(req);
721 return ERR_PTR(-ENOMEM);
722 }
723
724 req->seq_no = adapter->fsf_req_seq_no;
725 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
726 req->qtcb->prefix.req_id = req->req_id;
727 req->qtcb->prefix.ulp_info = 26;
728 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
729 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
730 req->qtcb->header.req_handle = req->req_id;
731 req->qtcb->header.fsf_command = req->fsf_command;
732 }
733
734 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
735 req->qtcb, sizeof(struct fsf_qtcb));
736
737 return req;
738 }
739
zfcp_fsf_req_send(struct zfcp_fsf_req * req)740 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
741 {
742 struct zfcp_adapter *adapter = req->adapter;
743 struct zfcp_qdio *qdio = adapter->qdio;
744 int with_qtcb = (req->qtcb != NULL);
745 int req_id = req->req_id;
746
747 zfcp_reqlist_add(adapter->req_list, req);
748
749 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
750 req->issued = get_tod_clock();
751 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
752 del_timer(&req->timer);
753 /* lookup request again, list might have changed */
754 zfcp_reqlist_find_rm(adapter->req_list, req_id);
755 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
756 return -EIO;
757 }
758
759 /* Don't increase for unsolicited status */
760 if (with_qtcb)
761 adapter->fsf_req_seq_no++;
762 adapter->req_no++;
763
764 return 0;
765 }
766
767 /**
768 * zfcp_fsf_status_read - send status read request
769 * @adapter: pointer to struct zfcp_adapter
770 * @req_flags: request flags
771 * Returns: 0 on success, ERROR otherwise
772 */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)773 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
774 {
775 struct zfcp_adapter *adapter = qdio->adapter;
776 struct zfcp_fsf_req *req;
777 struct fsf_status_read_buffer *sr_buf;
778 struct page *page;
779 int retval = -EIO;
780
781 spin_lock_irq(&qdio->req_q_lock);
782 if (zfcp_qdio_sbal_get(qdio))
783 goto out;
784
785 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
786 SBAL_SFLAGS0_TYPE_STATUS,
787 adapter->pool.status_read_req);
788 if (IS_ERR(req)) {
789 retval = PTR_ERR(req);
790 goto out;
791 }
792
793 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
794 if (!page) {
795 retval = -ENOMEM;
796 goto failed_buf;
797 }
798 sr_buf = page_address(page);
799 memset(sr_buf, 0, sizeof(*sr_buf));
800 req->data = sr_buf;
801
802 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
803 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
804
805 retval = zfcp_fsf_req_send(req);
806 if (retval)
807 goto failed_req_send;
808
809 goto out;
810
811 failed_req_send:
812 req->data = NULL;
813 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
814 failed_buf:
815 zfcp_dbf_hba_fsf_uss("fssr__1", req);
816 zfcp_fsf_req_free(req);
817 out:
818 spin_unlock_irq(&qdio->req_q_lock);
819 return retval;
820 }
821
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)822 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
823 {
824 struct scsi_device *sdev = req->data;
825 struct zfcp_scsi_dev *zfcp_sdev;
826 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
827
828 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
829 return;
830
831 zfcp_sdev = sdev_to_zfcp(sdev);
832
833 switch (req->qtcb->header.fsf_status) {
834 case FSF_PORT_HANDLE_NOT_VALID:
835 if (fsq->word[0] == fsq->word[1]) {
836 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
837 "fsafch1");
838 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
839 }
840 break;
841 case FSF_LUN_HANDLE_NOT_VALID:
842 if (fsq->word[0] == fsq->word[1]) {
843 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
844 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
845 }
846 break;
847 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
848 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
849 break;
850 case FSF_PORT_BOXED:
851 zfcp_erp_set_port_status(zfcp_sdev->port,
852 ZFCP_STATUS_COMMON_ACCESS_BOXED);
853 zfcp_erp_port_reopen(zfcp_sdev->port,
854 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
855 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
856 break;
857 case FSF_LUN_BOXED:
858 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
859 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
860 "fsafch4");
861 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
862 break;
863 case FSF_ADAPTER_STATUS_AVAILABLE:
864 switch (fsq->word[0]) {
865 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
866 zfcp_fc_test_link(zfcp_sdev->port);
867 /* fall through */
868 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
870 break;
871 }
872 break;
873 case FSF_GOOD:
874 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
875 break;
876 }
877 }
878
879 /**
880 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
881 * @scmnd: The SCSI command to abort
882 * Returns: pointer to struct zfcp_fsf_req
883 */
884
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)885 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
886 {
887 struct zfcp_fsf_req *req = NULL;
888 struct scsi_device *sdev = scmnd->device;
889 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
890 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
891 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
892
893 spin_lock_irq(&qdio->req_q_lock);
894 if (zfcp_qdio_sbal_get(qdio))
895 goto out;
896 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
897 SBAL_SFLAGS0_TYPE_READ,
898 qdio->adapter->pool.scsi_abort);
899 if (IS_ERR(req)) {
900 req = NULL;
901 goto out;
902 }
903
904 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
905 ZFCP_STATUS_COMMON_UNBLOCKED)))
906 goto out_error_free;
907
908 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
909
910 req->data = sdev;
911 req->handler = zfcp_fsf_abort_fcp_command_handler;
912 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
913 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
914 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
915
916 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
917 if (!zfcp_fsf_req_send(req))
918 goto out;
919
920 out_error_free:
921 zfcp_fsf_req_free(req);
922 req = NULL;
923 out:
924 spin_unlock_irq(&qdio->req_q_lock);
925 return req;
926 }
927
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)928 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
929 {
930 struct zfcp_adapter *adapter = req->adapter;
931 struct zfcp_fsf_ct_els *ct = req->data;
932 struct fsf_qtcb_header *header = &req->qtcb->header;
933
934 ct->status = -EINVAL;
935
936 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
937 goto skip_fsfstatus;
938
939 switch (header->fsf_status) {
940 case FSF_GOOD:
941 ct->status = 0;
942 zfcp_dbf_san_res("fsscth2", req);
943 break;
944 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
945 zfcp_fsf_class_not_supp(req);
946 break;
947 case FSF_ADAPTER_STATUS_AVAILABLE:
948 switch (header->fsf_status_qual.word[0]){
949 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
950 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
951 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
952 break;
953 }
954 break;
955 case FSF_PORT_BOXED:
956 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
957 break;
958 case FSF_PORT_HANDLE_NOT_VALID:
959 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
960 /* fall through */
961 case FSF_GENERIC_COMMAND_REJECTED:
962 case FSF_PAYLOAD_SIZE_MISMATCH:
963 case FSF_REQUEST_SIZE_TOO_LARGE:
964 case FSF_RESPONSE_SIZE_TOO_LARGE:
965 case FSF_SBAL_MISMATCH:
966 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
967 break;
968 }
969
970 skip_fsfstatus:
971 if (ct->handler)
972 ct->handler(ct->handler_data);
973 }
974
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)975 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
976 struct zfcp_qdio_req *q_req,
977 struct scatterlist *sg_req,
978 struct scatterlist *sg_resp)
979 {
980 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
981 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
982 zfcp_qdio_set_sbale_last(qdio, q_req);
983 }
984
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)985 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
986 struct scatterlist *sg_req,
987 struct scatterlist *sg_resp)
988 {
989 struct zfcp_adapter *adapter = req->adapter;
990 struct zfcp_qdio *qdio = adapter->qdio;
991 struct fsf_qtcb *qtcb = req->qtcb;
992 u32 feat = adapter->adapter_features;
993
994 if (zfcp_adapter_multi_buffer_active(adapter)) {
995 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
996 return -EIO;
997 qtcb->bottom.support.req_buf_length =
998 zfcp_qdio_real_bytes(sg_req);
999 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1000 return -EIO;
1001 qtcb->bottom.support.resp_buf_length =
1002 zfcp_qdio_real_bytes(sg_resp);
1003
1004 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
1005 zfcp_qdio_sbale_count(sg_req));
1006 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1007 zfcp_qdio_set_scount(qdio, &req->qdio_req);
1008 return 0;
1009 }
1010
1011 /* use single, unchained SBAL if it can hold the request */
1012 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1013 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1014 sg_req, sg_resp);
1015 return 0;
1016 }
1017
1018 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1019 return -EOPNOTSUPP;
1020
1021 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1022 return -EIO;
1023
1024 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1025
1026 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1027 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1028
1029 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1030 return -EIO;
1031
1032 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1033
1034 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1035
1036 return 0;
1037 }
1038
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1039 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1040 struct scatterlist *sg_req,
1041 struct scatterlist *sg_resp,
1042 unsigned int timeout)
1043 {
1044 int ret;
1045
1046 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1047 if (ret)
1048 return ret;
1049
1050 /* common settings for ct/gs and els requests */
1051 if (timeout > 255)
1052 timeout = 255; /* max value accepted by hardware */
1053 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1054 req->qtcb->bottom.support.timeout = timeout;
1055 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1056
1057 return 0;
1058 }
1059
1060 /**
1061 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1062 * @ct: pointer to struct zfcp_send_ct with data for request
1063 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1064 */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1065 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1066 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1067 unsigned int timeout)
1068 {
1069 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1070 struct zfcp_fsf_req *req;
1071 int ret = -EIO;
1072
1073 spin_lock_irq(&qdio->req_q_lock);
1074 if (zfcp_qdio_sbal_get(qdio))
1075 goto out;
1076
1077 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1078 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1079
1080 if (IS_ERR(req)) {
1081 ret = PTR_ERR(req);
1082 goto out;
1083 }
1084
1085 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1086 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1087 if (ret)
1088 goto failed_send;
1089
1090 req->handler = zfcp_fsf_send_ct_handler;
1091 req->qtcb->header.port_handle = wka_port->handle;
1092 ct->d_id = wka_port->d_id;
1093 req->data = ct;
1094
1095 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1096
1097 ret = zfcp_fsf_req_send(req);
1098 if (ret)
1099 goto failed_send;
1100
1101 goto out;
1102
1103 failed_send:
1104 zfcp_fsf_req_free(req);
1105 out:
1106 spin_unlock_irq(&qdio->req_q_lock);
1107 return ret;
1108 }
1109
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1110 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1111 {
1112 struct zfcp_fsf_ct_els *send_els = req->data;
1113 struct fsf_qtcb_header *header = &req->qtcb->header;
1114
1115 send_els->status = -EINVAL;
1116
1117 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1118 goto skip_fsfstatus;
1119
1120 switch (header->fsf_status) {
1121 case FSF_GOOD:
1122 send_els->status = 0;
1123 zfcp_dbf_san_res("fsselh1", req);
1124 break;
1125 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1126 zfcp_fsf_class_not_supp(req);
1127 break;
1128 case FSF_ADAPTER_STATUS_AVAILABLE:
1129 switch (header->fsf_status_qual.word[0]){
1130 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1131 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1132 case FSF_SQ_RETRY_IF_POSSIBLE:
1133 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1134 break;
1135 }
1136 break;
1137 case FSF_ELS_COMMAND_REJECTED:
1138 case FSF_PAYLOAD_SIZE_MISMATCH:
1139 case FSF_REQUEST_SIZE_TOO_LARGE:
1140 case FSF_RESPONSE_SIZE_TOO_LARGE:
1141 break;
1142 case FSF_SBAL_MISMATCH:
1143 /* should never occur, avoided in zfcp_fsf_send_els */
1144 /* fall through */
1145 default:
1146 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1147 break;
1148 }
1149 skip_fsfstatus:
1150 if (send_els->handler)
1151 send_els->handler(send_els->handler_data);
1152 }
1153
1154 /**
1155 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1156 * @els: pointer to struct zfcp_send_els with data for the command
1157 */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1158 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1159 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1160 {
1161 struct zfcp_fsf_req *req;
1162 struct zfcp_qdio *qdio = adapter->qdio;
1163 int ret = -EIO;
1164
1165 spin_lock_irq(&qdio->req_q_lock);
1166 if (zfcp_qdio_sbal_get(qdio))
1167 goto out;
1168
1169 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1170 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1171
1172 if (IS_ERR(req)) {
1173 ret = PTR_ERR(req);
1174 goto out;
1175 }
1176
1177 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1178
1179 if (!zfcp_adapter_multi_buffer_active(adapter))
1180 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1181
1182 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1183
1184 if (ret)
1185 goto failed_send;
1186
1187 hton24(req->qtcb->bottom.support.d_id, d_id);
1188 req->handler = zfcp_fsf_send_els_handler;
1189 els->d_id = d_id;
1190 req->data = els;
1191
1192 zfcp_dbf_san_req("fssels1", req, d_id);
1193
1194 ret = zfcp_fsf_req_send(req);
1195 if (ret)
1196 goto failed_send;
1197
1198 goto out;
1199
1200 failed_send:
1201 zfcp_fsf_req_free(req);
1202 out:
1203 spin_unlock_irq(&qdio->req_q_lock);
1204 return ret;
1205 }
1206
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1207 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1208 {
1209 struct zfcp_fsf_req *req;
1210 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1211 int retval = -EIO;
1212
1213 spin_lock_irq(&qdio->req_q_lock);
1214 if (zfcp_qdio_sbal_get(qdio))
1215 goto out;
1216
1217 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1218 SBAL_SFLAGS0_TYPE_READ,
1219 qdio->adapter->pool.erp_req);
1220
1221 if (IS_ERR(req)) {
1222 retval = PTR_ERR(req);
1223 goto out;
1224 }
1225
1226 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1227 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1228
1229 req->qtcb->bottom.config.feature_selection =
1230 FSF_FEATURE_NOTIFICATION_LOST |
1231 FSF_FEATURE_UPDATE_ALERT;
1232 req->erp_action = erp_action;
1233 req->handler = zfcp_fsf_exchange_config_data_handler;
1234 erp_action->fsf_req_id = req->req_id;
1235
1236 zfcp_fsf_start_erp_timer(req);
1237 retval = zfcp_fsf_req_send(req);
1238 if (retval) {
1239 zfcp_fsf_req_free(req);
1240 erp_action->fsf_req_id = 0;
1241 }
1242 out:
1243 spin_unlock_irq(&qdio->req_q_lock);
1244 return retval;
1245 }
1246
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1247 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1248 struct fsf_qtcb_bottom_config *data)
1249 {
1250 struct zfcp_fsf_req *req = NULL;
1251 int retval = -EIO;
1252
1253 spin_lock_irq(&qdio->req_q_lock);
1254 if (zfcp_qdio_sbal_get(qdio))
1255 goto out_unlock;
1256
1257 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1258 SBAL_SFLAGS0_TYPE_READ, NULL);
1259
1260 if (IS_ERR(req)) {
1261 retval = PTR_ERR(req);
1262 goto out_unlock;
1263 }
1264
1265 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1266 req->handler = zfcp_fsf_exchange_config_data_handler;
1267
1268 req->qtcb->bottom.config.feature_selection =
1269 FSF_FEATURE_NOTIFICATION_LOST |
1270 FSF_FEATURE_UPDATE_ALERT;
1271
1272 if (data)
1273 req->data = data;
1274
1275 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1276 retval = zfcp_fsf_req_send(req);
1277 spin_unlock_irq(&qdio->req_q_lock);
1278 if (!retval)
1279 wait_for_completion(&req->completion);
1280
1281 zfcp_fsf_req_free(req);
1282 return retval;
1283
1284 out_unlock:
1285 spin_unlock_irq(&qdio->req_q_lock);
1286 return retval;
1287 }
1288
1289 /**
1290 * zfcp_fsf_exchange_port_data - request information about local port
1291 * @erp_action: ERP action for the adapter for which port data is requested
1292 * Returns: 0 on success, error otherwise
1293 */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1294 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1295 {
1296 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1297 struct zfcp_fsf_req *req;
1298 int retval = -EIO;
1299
1300 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1301 return -EOPNOTSUPP;
1302
1303 spin_lock_irq(&qdio->req_q_lock);
1304 if (zfcp_qdio_sbal_get(qdio))
1305 goto out;
1306
1307 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1308 SBAL_SFLAGS0_TYPE_READ,
1309 qdio->adapter->pool.erp_req);
1310
1311 if (IS_ERR(req)) {
1312 retval = PTR_ERR(req);
1313 goto out;
1314 }
1315
1316 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1317 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1318
1319 req->handler = zfcp_fsf_exchange_port_data_handler;
1320 req->erp_action = erp_action;
1321 erp_action->fsf_req_id = req->req_id;
1322
1323 zfcp_fsf_start_erp_timer(req);
1324 retval = zfcp_fsf_req_send(req);
1325 if (retval) {
1326 zfcp_fsf_req_free(req);
1327 erp_action->fsf_req_id = 0;
1328 }
1329 out:
1330 spin_unlock_irq(&qdio->req_q_lock);
1331 return retval;
1332 }
1333
1334 /**
1335 * zfcp_fsf_exchange_port_data_sync - request information about local port
1336 * @qdio: pointer to struct zfcp_qdio
1337 * @data: pointer to struct fsf_qtcb_bottom_port
1338 * Returns: 0 on success, error otherwise
1339 */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1340 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1341 struct fsf_qtcb_bottom_port *data)
1342 {
1343 struct zfcp_fsf_req *req = NULL;
1344 int retval = -EIO;
1345
1346 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1347 return -EOPNOTSUPP;
1348
1349 spin_lock_irq(&qdio->req_q_lock);
1350 if (zfcp_qdio_sbal_get(qdio))
1351 goto out_unlock;
1352
1353 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1354 SBAL_SFLAGS0_TYPE_READ, NULL);
1355
1356 if (IS_ERR(req)) {
1357 retval = PTR_ERR(req);
1358 goto out_unlock;
1359 }
1360
1361 if (data)
1362 req->data = data;
1363
1364 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1365
1366 req->handler = zfcp_fsf_exchange_port_data_handler;
1367 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1368 retval = zfcp_fsf_req_send(req);
1369 spin_unlock_irq(&qdio->req_q_lock);
1370
1371 if (!retval)
1372 wait_for_completion(&req->completion);
1373
1374 zfcp_fsf_req_free(req);
1375
1376 return retval;
1377
1378 out_unlock:
1379 spin_unlock_irq(&qdio->req_q_lock);
1380 return retval;
1381 }
1382
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1383 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1384 {
1385 struct zfcp_port *port = req->data;
1386 struct fsf_qtcb_header *header = &req->qtcb->header;
1387 struct fc_els_flogi *plogi;
1388
1389 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1390 goto out;
1391
1392 switch (header->fsf_status) {
1393 case FSF_PORT_ALREADY_OPEN:
1394 break;
1395 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1396 dev_warn(&req->adapter->ccw_device->dev,
1397 "Not enough FCP adapter resources to open "
1398 "remote port 0x%016Lx\n",
1399 (unsigned long long)port->wwpn);
1400 zfcp_erp_set_port_status(port,
1401 ZFCP_STATUS_COMMON_ERP_FAILED);
1402 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1403 break;
1404 case FSF_ADAPTER_STATUS_AVAILABLE:
1405 switch (header->fsf_status_qual.word[0]) {
1406 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1407 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1408 case FSF_SQ_NO_RETRY_POSSIBLE:
1409 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1410 break;
1411 }
1412 break;
1413 case FSF_GOOD:
1414 port->handle = header->port_handle;
1415 atomic_or(ZFCP_STATUS_COMMON_OPEN |
1416 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1417 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1418 &port->status);
1419 /* check whether D_ID has changed during open */
1420 /*
1421 * FIXME: This check is not airtight, as the FCP channel does
1422 * not monitor closures of target port connections caused on
1423 * the remote side. Thus, they might miss out on invalidating
1424 * locally cached WWPNs (and other N_Port parameters) of gone
1425 * target ports. So, our heroic attempt to make things safe
1426 * could be undermined by 'open port' response data tagged with
1427 * obsolete WWPNs. Another reason to monitor potential
1428 * connection closures ourself at least (by interpreting
1429 * incoming ELS' and unsolicited status). It just crosses my
1430 * mind that one should be able to cross-check by means of
1431 * another GID_PN straight after a port has been opened.
1432 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1433 */
1434 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1435 if (req->qtcb->bottom.support.els1_length >=
1436 FSF_PLOGI_MIN_LEN)
1437 zfcp_fc_plogi_evaluate(port, plogi);
1438 break;
1439 case FSF_UNKNOWN_OP_SUBTYPE:
1440 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1441 break;
1442 }
1443
1444 out:
1445 put_device(&port->dev);
1446 }
1447
1448 /**
1449 * zfcp_fsf_open_port - create and send open port request
1450 * @erp_action: pointer to struct zfcp_erp_action
1451 * Returns: 0 on success, error otherwise
1452 */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1453 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1454 {
1455 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1456 struct zfcp_port *port = erp_action->port;
1457 struct zfcp_fsf_req *req;
1458 int retval = -EIO;
1459
1460 spin_lock_irq(&qdio->req_q_lock);
1461 if (zfcp_qdio_sbal_get(qdio))
1462 goto out;
1463
1464 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1465 SBAL_SFLAGS0_TYPE_READ,
1466 qdio->adapter->pool.erp_req);
1467
1468 if (IS_ERR(req)) {
1469 retval = PTR_ERR(req);
1470 goto out;
1471 }
1472
1473 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1474 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1475
1476 req->handler = zfcp_fsf_open_port_handler;
1477 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1478 req->data = port;
1479 req->erp_action = erp_action;
1480 erp_action->fsf_req_id = req->req_id;
1481 get_device(&port->dev);
1482
1483 zfcp_fsf_start_erp_timer(req);
1484 retval = zfcp_fsf_req_send(req);
1485 if (retval) {
1486 zfcp_fsf_req_free(req);
1487 erp_action->fsf_req_id = 0;
1488 put_device(&port->dev);
1489 }
1490 out:
1491 spin_unlock_irq(&qdio->req_q_lock);
1492 return retval;
1493 }
1494
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1495 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1496 {
1497 struct zfcp_port *port = req->data;
1498
1499 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1500 return;
1501
1502 switch (req->qtcb->header.fsf_status) {
1503 case FSF_PORT_HANDLE_NOT_VALID:
1504 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1505 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1506 break;
1507 case FSF_ADAPTER_STATUS_AVAILABLE:
1508 break;
1509 case FSF_GOOD:
1510 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1511 break;
1512 }
1513 }
1514
1515 /**
1516 * zfcp_fsf_close_port - create and send close port request
1517 * @erp_action: pointer to struct zfcp_erp_action
1518 * Returns: 0 on success, error otherwise
1519 */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1520 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1521 {
1522 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1523 struct zfcp_fsf_req *req;
1524 int retval = -EIO;
1525
1526 spin_lock_irq(&qdio->req_q_lock);
1527 if (zfcp_qdio_sbal_get(qdio))
1528 goto out;
1529
1530 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1531 SBAL_SFLAGS0_TYPE_READ,
1532 qdio->adapter->pool.erp_req);
1533
1534 if (IS_ERR(req)) {
1535 retval = PTR_ERR(req);
1536 goto out;
1537 }
1538
1539 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1540 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1541
1542 req->handler = zfcp_fsf_close_port_handler;
1543 req->data = erp_action->port;
1544 req->erp_action = erp_action;
1545 req->qtcb->header.port_handle = erp_action->port->handle;
1546 erp_action->fsf_req_id = req->req_id;
1547
1548 zfcp_fsf_start_erp_timer(req);
1549 retval = zfcp_fsf_req_send(req);
1550 if (retval) {
1551 zfcp_fsf_req_free(req);
1552 erp_action->fsf_req_id = 0;
1553 }
1554 out:
1555 spin_unlock_irq(&qdio->req_q_lock);
1556 return retval;
1557 }
1558
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1559 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1560 {
1561 struct zfcp_fc_wka_port *wka_port = req->data;
1562 struct fsf_qtcb_header *header = &req->qtcb->header;
1563
1564 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1565 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1566 goto out;
1567 }
1568
1569 switch (header->fsf_status) {
1570 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1571 dev_warn(&req->adapter->ccw_device->dev,
1572 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1573 /* fall through */
1574 case FSF_ADAPTER_STATUS_AVAILABLE:
1575 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1576 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1577 break;
1578 case FSF_GOOD:
1579 wka_port->handle = header->port_handle;
1580 /* fall through */
1581 case FSF_PORT_ALREADY_OPEN:
1582 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1583 }
1584 out:
1585 wake_up(&wka_port->completion_wq);
1586 }
1587
1588 /**
1589 * zfcp_fsf_open_wka_port - create and send open wka-port request
1590 * @wka_port: pointer to struct zfcp_fc_wka_port
1591 * Returns: 0 on success, error otherwise
1592 */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1593 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1594 {
1595 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1596 struct zfcp_fsf_req *req;
1597 unsigned long req_id = 0;
1598 int retval = -EIO;
1599
1600 spin_lock_irq(&qdio->req_q_lock);
1601 if (zfcp_qdio_sbal_get(qdio))
1602 goto out;
1603
1604 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1605 SBAL_SFLAGS0_TYPE_READ,
1606 qdio->adapter->pool.erp_req);
1607
1608 if (IS_ERR(req)) {
1609 retval = PTR_ERR(req);
1610 goto out;
1611 }
1612
1613 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1614 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1615
1616 req->handler = zfcp_fsf_open_wka_port_handler;
1617 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1618 req->data = wka_port;
1619
1620 req_id = req->req_id;
1621
1622 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1623 retval = zfcp_fsf_req_send(req);
1624 if (retval)
1625 zfcp_fsf_req_free(req);
1626 out:
1627 spin_unlock_irq(&qdio->req_q_lock);
1628 if (!retval)
1629 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1630 return retval;
1631 }
1632
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1633 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1634 {
1635 struct zfcp_fc_wka_port *wka_port = req->data;
1636
1637 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1638 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1639 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1640 }
1641
1642 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1643 wake_up(&wka_port->completion_wq);
1644 }
1645
1646 /**
1647 * zfcp_fsf_close_wka_port - create and send close wka port request
1648 * @wka_port: WKA port to open
1649 * Returns: 0 on success, error otherwise
1650 */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1651 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1652 {
1653 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1654 struct zfcp_fsf_req *req;
1655 unsigned long req_id = 0;
1656 int retval = -EIO;
1657
1658 spin_lock_irq(&qdio->req_q_lock);
1659 if (zfcp_qdio_sbal_get(qdio))
1660 goto out;
1661
1662 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1663 SBAL_SFLAGS0_TYPE_READ,
1664 qdio->adapter->pool.erp_req);
1665
1666 if (IS_ERR(req)) {
1667 retval = PTR_ERR(req);
1668 goto out;
1669 }
1670
1671 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1672 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1673
1674 req->handler = zfcp_fsf_close_wka_port_handler;
1675 req->data = wka_port;
1676 req->qtcb->header.port_handle = wka_port->handle;
1677
1678 req_id = req->req_id;
1679
1680 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1681 retval = zfcp_fsf_req_send(req);
1682 if (retval)
1683 zfcp_fsf_req_free(req);
1684 out:
1685 spin_unlock_irq(&qdio->req_q_lock);
1686 if (!retval)
1687 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
1688 return retval;
1689 }
1690
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)1691 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1692 {
1693 struct zfcp_port *port = req->data;
1694 struct fsf_qtcb_header *header = &req->qtcb->header;
1695 struct scsi_device *sdev;
1696
1697 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1698 return;
1699
1700 switch (header->fsf_status) {
1701 case FSF_PORT_HANDLE_NOT_VALID:
1702 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1703 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1704 break;
1705 case FSF_PORT_BOXED:
1706 /* can't use generic zfcp_erp_modify_port_status because
1707 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1708 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1709 shost_for_each_device(sdev, port->adapter->scsi_host)
1710 if (sdev_to_zfcp(sdev)->port == port)
1711 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1712 &sdev_to_zfcp(sdev)->status);
1713 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1714 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1715 "fscpph2");
1716 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1717 break;
1718 case FSF_ADAPTER_STATUS_AVAILABLE:
1719 switch (header->fsf_status_qual.word[0]) {
1720 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1721 /* fall through */
1722 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1723 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1724 break;
1725 }
1726 break;
1727 case FSF_GOOD:
1728 /* can't use generic zfcp_erp_modify_port_status because
1729 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1730 */
1731 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1732 shost_for_each_device(sdev, port->adapter->scsi_host)
1733 if (sdev_to_zfcp(sdev)->port == port)
1734 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1735 &sdev_to_zfcp(sdev)->status);
1736 break;
1737 }
1738 }
1739
1740 /**
1741 * zfcp_fsf_close_physical_port - close physical port
1742 * @erp_action: pointer to struct zfcp_erp_action
1743 * Returns: 0 on success
1744 */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)1745 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1746 {
1747 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1748 struct zfcp_fsf_req *req;
1749 int retval = -EIO;
1750
1751 spin_lock_irq(&qdio->req_q_lock);
1752 if (zfcp_qdio_sbal_get(qdio))
1753 goto out;
1754
1755 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1756 SBAL_SFLAGS0_TYPE_READ,
1757 qdio->adapter->pool.erp_req);
1758
1759 if (IS_ERR(req)) {
1760 retval = PTR_ERR(req);
1761 goto out;
1762 }
1763
1764 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1765 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1766
1767 req->data = erp_action->port;
1768 req->qtcb->header.port_handle = erp_action->port->handle;
1769 req->erp_action = erp_action;
1770 req->handler = zfcp_fsf_close_physical_port_handler;
1771 erp_action->fsf_req_id = req->req_id;
1772
1773 zfcp_fsf_start_erp_timer(req);
1774 retval = zfcp_fsf_req_send(req);
1775 if (retval) {
1776 zfcp_fsf_req_free(req);
1777 erp_action->fsf_req_id = 0;
1778 }
1779 out:
1780 spin_unlock_irq(&qdio->req_q_lock);
1781 return retval;
1782 }
1783
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)1784 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1785 {
1786 struct zfcp_adapter *adapter = req->adapter;
1787 struct scsi_device *sdev = req->data;
1788 struct zfcp_scsi_dev *zfcp_sdev;
1789 struct fsf_qtcb_header *header = &req->qtcb->header;
1790 union fsf_status_qual *qual = &header->fsf_status_qual;
1791
1792 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1793 return;
1794
1795 zfcp_sdev = sdev_to_zfcp(sdev);
1796
1797 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1798 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1799 &zfcp_sdev->status);
1800
1801 switch (header->fsf_status) {
1802
1803 case FSF_PORT_HANDLE_NOT_VALID:
1804 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1805 /* fall through */
1806 case FSF_LUN_ALREADY_OPEN:
1807 break;
1808 case FSF_PORT_BOXED:
1809 zfcp_erp_set_port_status(zfcp_sdev->port,
1810 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1811 zfcp_erp_port_reopen(zfcp_sdev->port,
1812 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1813 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1814 break;
1815 case FSF_LUN_SHARING_VIOLATION:
1816 if (qual->word[0])
1817 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1818 "LUN 0x%Lx on port 0x%Lx is already in "
1819 "use by CSS%d, MIF Image ID %x\n",
1820 zfcp_scsi_dev_lun(sdev),
1821 (unsigned long long)zfcp_sdev->port->wwpn,
1822 qual->fsf_queue_designator.cssid,
1823 qual->fsf_queue_designator.hla);
1824 zfcp_erp_set_lun_status(sdev,
1825 ZFCP_STATUS_COMMON_ERP_FAILED |
1826 ZFCP_STATUS_COMMON_ACCESS_DENIED);
1827 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1828 break;
1829 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1830 dev_warn(&adapter->ccw_device->dev,
1831 "No handle is available for LUN "
1832 "0x%016Lx on port 0x%016Lx\n",
1833 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1834 (unsigned long long)zfcp_sdev->port->wwpn);
1835 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1836 /* fall through */
1837 case FSF_INVALID_COMMAND_OPTION:
1838 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1839 break;
1840 case FSF_ADAPTER_STATUS_AVAILABLE:
1841 switch (header->fsf_status_qual.word[0]) {
1842 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1843 zfcp_fc_test_link(zfcp_sdev->port);
1844 /* fall through */
1845 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 break;
1848 }
1849 break;
1850
1851 case FSF_GOOD:
1852 zfcp_sdev->lun_handle = header->lun_handle;
1853 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1854 break;
1855 }
1856 }
1857
1858 /**
1859 * zfcp_fsf_open_lun - open LUN
1860 * @erp_action: pointer to struct zfcp_erp_action
1861 * Returns: 0 on success, error otherwise
1862 */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)1863 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1864 {
1865 struct zfcp_adapter *adapter = erp_action->adapter;
1866 struct zfcp_qdio *qdio = adapter->qdio;
1867 struct zfcp_fsf_req *req;
1868 int retval = -EIO;
1869
1870 spin_lock_irq(&qdio->req_q_lock);
1871 if (zfcp_qdio_sbal_get(qdio))
1872 goto out;
1873
1874 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1875 SBAL_SFLAGS0_TYPE_READ,
1876 adapter->pool.erp_req);
1877
1878 if (IS_ERR(req)) {
1879 retval = PTR_ERR(req);
1880 goto out;
1881 }
1882
1883 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1884 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1885
1886 req->qtcb->header.port_handle = erp_action->port->handle;
1887 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1888 req->handler = zfcp_fsf_open_lun_handler;
1889 req->data = erp_action->sdev;
1890 req->erp_action = erp_action;
1891 erp_action->fsf_req_id = req->req_id;
1892
1893 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1894 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1895
1896 zfcp_fsf_start_erp_timer(req);
1897 retval = zfcp_fsf_req_send(req);
1898 if (retval) {
1899 zfcp_fsf_req_free(req);
1900 erp_action->fsf_req_id = 0;
1901 }
1902 out:
1903 spin_unlock_irq(&qdio->req_q_lock);
1904 return retval;
1905 }
1906
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)1907 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1908 {
1909 struct scsi_device *sdev = req->data;
1910 struct zfcp_scsi_dev *zfcp_sdev;
1911
1912 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1913 return;
1914
1915 zfcp_sdev = sdev_to_zfcp(sdev);
1916
1917 switch (req->qtcb->header.fsf_status) {
1918 case FSF_PORT_HANDLE_NOT_VALID:
1919 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1920 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1921 break;
1922 case FSF_LUN_HANDLE_NOT_VALID:
1923 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1924 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1925 break;
1926 case FSF_PORT_BOXED:
1927 zfcp_erp_set_port_status(zfcp_sdev->port,
1928 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1929 zfcp_erp_port_reopen(zfcp_sdev->port,
1930 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1931 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1932 break;
1933 case FSF_ADAPTER_STATUS_AVAILABLE:
1934 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1935 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1936 zfcp_fc_test_link(zfcp_sdev->port);
1937 /* fall through */
1938 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1939 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1940 break;
1941 }
1942 break;
1943 case FSF_GOOD:
1944 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1945 break;
1946 }
1947 }
1948
1949 /**
1950 * zfcp_fsf_close_LUN - close LUN
1951 * @erp_action: pointer to erp_action triggering the "close LUN"
1952 * Returns: 0 on success, error otherwise
1953 */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)1954 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1955 {
1956 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1957 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1958 struct zfcp_fsf_req *req;
1959 int retval = -EIO;
1960
1961 spin_lock_irq(&qdio->req_q_lock);
1962 if (zfcp_qdio_sbal_get(qdio))
1963 goto out;
1964
1965 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1966 SBAL_SFLAGS0_TYPE_READ,
1967 qdio->adapter->pool.erp_req);
1968
1969 if (IS_ERR(req)) {
1970 retval = PTR_ERR(req);
1971 goto out;
1972 }
1973
1974 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1975 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1976
1977 req->qtcb->header.port_handle = erp_action->port->handle;
1978 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1979 req->handler = zfcp_fsf_close_lun_handler;
1980 req->data = erp_action->sdev;
1981 req->erp_action = erp_action;
1982 erp_action->fsf_req_id = req->req_id;
1983
1984 zfcp_fsf_start_erp_timer(req);
1985 retval = zfcp_fsf_req_send(req);
1986 if (retval) {
1987 zfcp_fsf_req_free(req);
1988 erp_action->fsf_req_id = 0;
1989 }
1990 out:
1991 spin_unlock_irq(&qdio->req_q_lock);
1992 return retval;
1993 }
1994
zfcp_fsf_update_lat(struct fsf_latency_record * lat_rec,u32 lat)1995 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1996 {
1997 lat_rec->sum += lat;
1998 lat_rec->min = min(lat_rec->min, lat);
1999 lat_rec->max = max(lat_rec->max, lat);
2000 }
2001
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2002 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2003 {
2004 struct fsf_qual_latency_info *lat_in;
2005 struct latency_cont *lat = NULL;
2006 struct zfcp_scsi_dev *zfcp_sdev;
2007 struct zfcp_blk_drv_data blktrc;
2008 int ticks = req->adapter->timer_ticks;
2009
2010 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2011
2012 blktrc.flags = 0;
2013 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2014 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2015 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2016 blktrc.inb_usage = 0;
2017 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2018
2019 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2020 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2021 zfcp_sdev = sdev_to_zfcp(scsi->device);
2022 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2023 blktrc.channel_lat = lat_in->channel_lat * ticks;
2024 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2025
2026 switch (req->qtcb->bottom.io.data_direction) {
2027 case FSF_DATADIR_DIF_READ_STRIP:
2028 case FSF_DATADIR_DIF_READ_CONVERT:
2029 case FSF_DATADIR_READ:
2030 lat = &zfcp_sdev->latencies.read;
2031 break;
2032 case FSF_DATADIR_DIF_WRITE_INSERT:
2033 case FSF_DATADIR_DIF_WRITE_CONVERT:
2034 case FSF_DATADIR_WRITE:
2035 lat = &zfcp_sdev->latencies.write;
2036 break;
2037 case FSF_DATADIR_CMND:
2038 lat = &zfcp_sdev->latencies.cmd;
2039 break;
2040 }
2041
2042 if (lat) {
2043 spin_lock(&zfcp_sdev->latencies.lock);
2044 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2045 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2046 lat->counter++;
2047 spin_unlock(&zfcp_sdev->latencies.lock);
2048 }
2049 }
2050
2051 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2052 sizeof(blktrc));
2053 }
2054
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req)2055 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2056 {
2057 struct scsi_cmnd *scmnd = req->data;
2058 struct scsi_device *sdev = scmnd->device;
2059 struct zfcp_scsi_dev *zfcp_sdev;
2060 struct fsf_qtcb_header *header = &req->qtcb->header;
2061
2062 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2063 return;
2064
2065 zfcp_sdev = sdev_to_zfcp(sdev);
2066
2067 switch (header->fsf_status) {
2068 case FSF_HANDLE_MISMATCH:
2069 case FSF_PORT_HANDLE_NOT_VALID:
2070 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2071 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2072 break;
2073 case FSF_FCPLUN_NOT_VALID:
2074 case FSF_LUN_HANDLE_NOT_VALID:
2075 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2076 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2077 break;
2078 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2079 zfcp_fsf_class_not_supp(req);
2080 break;
2081 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2082 dev_err(&req->adapter->ccw_device->dev,
2083 "Incorrect direction %d, LUN 0x%016Lx on port "
2084 "0x%016Lx closed\n",
2085 req->qtcb->bottom.io.data_direction,
2086 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2087 (unsigned long long)zfcp_sdev->port->wwpn);
2088 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2089 "fssfch3");
2090 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2091 break;
2092 case FSF_CMND_LENGTH_NOT_VALID:
2093 dev_err(&req->adapter->ccw_device->dev,
2094 "Incorrect CDB length %d, LUN 0x%016Lx on "
2095 "port 0x%016Lx closed\n",
2096 req->qtcb->bottom.io.fcp_cmnd_length,
2097 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2098 (unsigned long long)zfcp_sdev->port->wwpn);
2099 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2100 "fssfch4");
2101 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2102 break;
2103 case FSF_PORT_BOXED:
2104 zfcp_erp_set_port_status(zfcp_sdev->port,
2105 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2106 zfcp_erp_port_reopen(zfcp_sdev->port,
2107 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2108 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2109 break;
2110 case FSF_LUN_BOXED:
2111 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2112 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2113 "fssfch6");
2114 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2115 break;
2116 case FSF_ADAPTER_STATUS_AVAILABLE:
2117 if (header->fsf_status_qual.word[0] ==
2118 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2119 zfcp_fc_test_link(zfcp_sdev->port);
2120 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2121 break;
2122 }
2123 }
2124
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2125 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2126 {
2127 struct scsi_cmnd *scpnt;
2128 struct fcp_resp_with_ext *fcp_rsp;
2129 unsigned long flags;
2130
2131 read_lock_irqsave(&req->adapter->abort_lock, flags);
2132
2133 scpnt = req->data;
2134 if (unlikely(!scpnt)) {
2135 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2136 return;
2137 }
2138
2139 zfcp_fsf_fcp_handler_common(req);
2140
2141 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2142 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2143 goto skip_fsfstatus;
2144 }
2145
2146 switch (req->qtcb->header.fsf_status) {
2147 case FSF_INCONSISTENT_PROT_DATA:
2148 case FSF_INVALID_PROT_PARM:
2149 set_host_byte(scpnt, DID_ERROR);
2150 goto skip_fsfstatus;
2151 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2152 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2153 goto skip_fsfstatus;
2154 case FSF_APP_TAG_CHECK_FAILURE:
2155 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2156 goto skip_fsfstatus;
2157 case FSF_REF_TAG_CHECK_FAILURE:
2158 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2159 goto skip_fsfstatus;
2160 }
2161 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2162 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2163
2164 skip_fsfstatus:
2165 zfcp_fsf_req_trace(req, scpnt);
2166 zfcp_dbf_scsi_result(scpnt, req);
2167
2168 scpnt->host_scribble = NULL;
2169 (scpnt->scsi_done) (scpnt);
2170 /*
2171 * We must hold this lock until scsi_done has been called.
2172 * Otherwise we may call scsi_done after abort regarding this
2173 * command has completed.
2174 * Note: scsi_done must not block!
2175 */
2176 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2177 }
2178
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2179 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2180 {
2181 switch (scsi_get_prot_op(scsi_cmnd)) {
2182 case SCSI_PROT_NORMAL:
2183 switch (scsi_cmnd->sc_data_direction) {
2184 case DMA_NONE:
2185 *data_dir = FSF_DATADIR_CMND;
2186 break;
2187 case DMA_FROM_DEVICE:
2188 *data_dir = FSF_DATADIR_READ;
2189 break;
2190 case DMA_TO_DEVICE:
2191 *data_dir = FSF_DATADIR_WRITE;
2192 break;
2193 case DMA_BIDIRECTIONAL:
2194 return -EINVAL;
2195 }
2196 break;
2197
2198 case SCSI_PROT_READ_STRIP:
2199 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2200 break;
2201 case SCSI_PROT_WRITE_INSERT:
2202 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2203 break;
2204 case SCSI_PROT_READ_PASS:
2205 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2206 break;
2207 case SCSI_PROT_WRITE_PASS:
2208 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2209 break;
2210 default:
2211 return -EINVAL;
2212 }
2213
2214 return 0;
2215 }
2216
2217 /**
2218 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2219 * @scsi_cmnd: scsi command to be sent
2220 */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2221 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2222 {
2223 struct zfcp_fsf_req *req;
2224 struct fcp_cmnd *fcp_cmnd;
2225 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2226 int retval = -EIO;
2227 struct scsi_device *sdev = scsi_cmnd->device;
2228 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2229 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2230 struct zfcp_qdio *qdio = adapter->qdio;
2231 struct fsf_qtcb_bottom_io *io;
2232 unsigned long flags;
2233
2234 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2235 ZFCP_STATUS_COMMON_UNBLOCKED)))
2236 return -EBUSY;
2237
2238 spin_lock_irqsave(&qdio->req_q_lock, flags);
2239 if (atomic_read(&qdio->req_q_free) <= 0) {
2240 atomic_inc(&qdio->req_q_full);
2241 goto out;
2242 }
2243
2244 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2245 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2246
2247 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2248 sbtype, adapter->pool.scsi_req);
2249
2250 if (IS_ERR(req)) {
2251 retval = PTR_ERR(req);
2252 goto out;
2253 }
2254
2255 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2256
2257 io = &req->qtcb->bottom.io;
2258 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2259 req->data = scsi_cmnd;
2260 req->handler = zfcp_fsf_fcp_cmnd_handler;
2261 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2262 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2263 io->service_class = FSF_CLASS_3;
2264 io->fcp_cmnd_length = FCP_CMND_LEN;
2265
2266 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2267 io->data_block_length = scsi_cmnd->device->sector_size;
2268 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2269 }
2270
2271 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2272 goto failed_scsi_cmnd;
2273
2274 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2275 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2276
2277 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2278 scsi_prot_sg_count(scsi_cmnd)) {
2279 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2280 scsi_prot_sg_count(scsi_cmnd));
2281 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2282 scsi_prot_sglist(scsi_cmnd));
2283 if (retval)
2284 goto failed_scsi_cmnd;
2285 io->prot_data_length = zfcp_qdio_real_bytes(
2286 scsi_prot_sglist(scsi_cmnd));
2287 }
2288
2289 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2290 scsi_sglist(scsi_cmnd));
2291 if (unlikely(retval))
2292 goto failed_scsi_cmnd;
2293
2294 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2295 if (zfcp_adapter_multi_buffer_active(adapter))
2296 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2297
2298 retval = zfcp_fsf_req_send(req);
2299 if (unlikely(retval))
2300 goto failed_scsi_cmnd;
2301
2302 goto out;
2303
2304 failed_scsi_cmnd:
2305 zfcp_fsf_req_free(req);
2306 scsi_cmnd->host_scribble = NULL;
2307 out:
2308 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2309 return retval;
2310 }
2311
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2312 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2313 {
2314 struct fcp_resp_with_ext *fcp_rsp;
2315 struct fcp_resp_rsp_info *rsp_info;
2316
2317 zfcp_fsf_fcp_handler_common(req);
2318
2319 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2320 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2321
2322 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2323 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2324 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2325 }
2326
2327 /**
2328 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2329 * @scmnd: SCSI command to send the task management command for
2330 * @tm_flags: unsigned byte for task management flags
2331 * Returns: on success pointer to struct fsf_req, NULL otherwise
2332 */
zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd * scmnd,u8 tm_flags)2333 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2334 u8 tm_flags)
2335 {
2336 struct zfcp_fsf_req *req = NULL;
2337 struct fcp_cmnd *fcp_cmnd;
2338 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2339 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2340
2341 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2342 ZFCP_STATUS_COMMON_UNBLOCKED)))
2343 return NULL;
2344
2345 spin_lock_irq(&qdio->req_q_lock);
2346 if (zfcp_qdio_sbal_get(qdio))
2347 goto out;
2348
2349 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2350 SBAL_SFLAGS0_TYPE_WRITE,
2351 qdio->adapter->pool.scsi_req);
2352
2353 if (IS_ERR(req)) {
2354 req = NULL;
2355 goto out;
2356 }
2357
2358 req->data = scmnd;
2359 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2360 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2361 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2365
2366 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2367
2368 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2369 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2370
2371 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2372 if (!zfcp_fsf_req_send(req))
2373 goto out;
2374
2375 zfcp_fsf_req_free(req);
2376 req = NULL;
2377 out:
2378 spin_unlock_irq(&qdio->req_q_lock);
2379 return req;
2380 }
2381
2382 /**
2383 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2384 * @adapter: pointer to struct zfcp_adapter
2385 * @sbal_idx: response queue index of SBAL to be processed
2386 */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2387 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2388 {
2389 struct zfcp_adapter *adapter = qdio->adapter;
2390 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2391 struct qdio_buffer_element *sbale;
2392 struct zfcp_fsf_req *fsf_req;
2393 unsigned long req_id;
2394 int idx;
2395
2396 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2397
2398 sbale = &sbal->element[idx];
2399 req_id = (unsigned long) sbale->addr;
2400 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2401
2402 if (!fsf_req) {
2403 /*
2404 * Unknown request means that we have potentially memory
2405 * corruption and must stop the machine immediately.
2406 */
2407 zfcp_qdio_siosl(adapter);
2408 panic("error: unknown req_id (%lx) on adapter %s.\n",
2409 req_id, dev_name(&adapter->ccw_device->dev));
2410 }
2411
2412 fsf_req->qdio_req.sbal_response = sbal_idx;
2413 zfcp_fsf_req_complete(fsf_req);
2414
2415 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2416 break;
2417 }
2418 }
2419