1
2
3
4
5
6
7
8
9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/blktrace_api.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_els.h>
15#include "zfcp_ext.h"
16#include "zfcp_fc.h"
17#include "zfcp_dbf.h"
18#include "zfcp_qdio.h"
19#include "zfcp_reqlist.h"
20
21struct kmem_cache *zfcp_fsf_qtcb_cache;
22
23static void zfcp_fsf_request_timeout_handler(unsigned long data)
24{
25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26 zfcp_qdio_siosl(adapter);
27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
28 "fsrth_1");
29}
30
31static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32 unsigned long timeout)
33{
34 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36 fsf_req->timer.expires = jiffies + timeout;
37 add_timer(&fsf_req->timer);
38}
39
40static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41{
42 BUG_ON(!fsf_req->erp_action);
43 fsf_req->timer.function = zfcp_erp_timeout_handler;
44 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45 fsf_req->timer.expires = jiffies + 30 * HZ;
46 add_timer(&fsf_req->timer);
47}
48
49
50static u32 fsf_qtcb_type[] = {
51 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
52 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
59 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
60 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
61 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
62 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
64};
65
66static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
67{
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69 "operational because of an unsupported FC class\n");
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
72}
73
74
75
76
77
78void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
79{
80 if (likely(req->pool)) {
81 if (likely(req->qtcb))
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83 mempool_free(req, req->pool);
84 return;
85 }
86
87 if (likely(req->qtcb))
88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89 kfree(req);
90}
91
92static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
93{
94 unsigned long flags;
95 struct fsf_status_read_buffer *sr_buf = req->data;
96 struct zfcp_adapter *adapter = req->adapter;
97 struct zfcp_port *port;
98 int d_id = ntoh24(sr_buf->d_id);
99
100 read_lock_irqsave(&adapter->port_list_lock, flags);
101 list_for_each_entry(port, &adapter->port_list, list)
102 if (port->d_id == d_id) {
103 zfcp_erp_port_reopen(port, 0, "fssrpc1");
104 break;
105 }
106 read_unlock_irqrestore(&adapter->port_list_lock, flags);
107}
108
109static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
110 struct fsf_link_down_info *link_down)
111{
112 struct zfcp_adapter *adapter = req->adapter;
113
114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
115 return;
116
117 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
118
119 zfcp_scsi_schedule_rports_block(adapter);
120
121 if (!link_down)
122 goto out;
123
124 switch (link_down->error_code) {
125 case FSF_PSQ_LINK_NO_LIGHT:
126 dev_warn(&req->adapter->ccw_device->dev,
127 "There is no light signal from the local "
128 "fibre channel cable\n");
129 break;
130 case FSF_PSQ_LINK_WRAP_PLUG:
131 dev_warn(&req->adapter->ccw_device->dev,
132 "There is a wrap plug instead of a fibre "
133 "channel cable\n");
134 break;
135 case FSF_PSQ_LINK_NO_FCP:
136 dev_warn(&req->adapter->ccw_device->dev,
137 "The adjacent fibre channel node does not "
138 "support FCP\n");
139 break;
140 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
141 dev_warn(&req->adapter->ccw_device->dev,
142 "The FCP device is suspended because of a "
143 "firmware update\n");
144 break;
145 case FSF_PSQ_LINK_INVALID_WWPN:
146 dev_warn(&req->adapter->ccw_device->dev,
147 "The FCP device detected a WWPN that is "
148 "duplicate or not valid\n");
149 break;
150 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
151 dev_warn(&req->adapter->ccw_device->dev,
152 "The fibre channel fabric does not support NPIV\n");
153 break;
154 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
155 dev_warn(&req->adapter->ccw_device->dev,
156 "The FCP adapter cannot support more NPIV ports\n");
157 break;
158 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
159 dev_warn(&req->adapter->ccw_device->dev,
160 "The adjacent switch cannot support "
161 "more NPIV ports\n");
162 break;
163 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
164 dev_warn(&req->adapter->ccw_device->dev,
165 "The FCP adapter could not log in to the "
166 "fibre channel fabric\n");
167 break;
168 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
169 dev_warn(&req->adapter->ccw_device->dev,
170 "The WWPN assignment file on the FCP adapter "
171 "has been damaged\n");
172 break;
173 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
174 dev_warn(&req->adapter->ccw_device->dev,
175 "The mode table on the FCP adapter "
176 "has been damaged\n");
177 break;
178 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
179 dev_warn(&req->adapter->ccw_device->dev,
180 "All NPIV ports on the FCP adapter have "
181 "been assigned\n");
182 break;
183 default:
184 dev_warn(&req->adapter->ccw_device->dev,
185 "The link between the FCP adapter and "
186 "the FC fabric is down\n");
187 }
188out:
189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
190}
191
192static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
193{
194 struct fsf_status_read_buffer *sr_buf = req->data;
195 struct fsf_link_down_info *ldi =
196 (struct fsf_link_down_info *) &sr_buf->payload;
197
198 switch (sr_buf->status_subtype) {
199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
200 zfcp_fsf_link_down_info_eval(req, ldi);
201 break;
202 case FSF_STATUS_READ_SUB_FDISC_FAILED:
203 zfcp_fsf_link_down_info_eval(req, ldi);
204 break;
205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
206 zfcp_fsf_link_down_info_eval(req, NULL);
207 }
208}
209
210static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
211{
212 struct zfcp_adapter *adapter = req->adapter;
213 struct fsf_status_read_buffer *sr_buf = req->data;
214
215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
216 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
218 zfcp_fsf_req_free(req);
219 return;
220 }
221
222 zfcp_dbf_hba_fsf_uss("fssrh_4", req);
223
224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED:
226 zfcp_fsf_status_read_port_closed(req);
227 break;
228 case FSF_STATUS_READ_INCOMING_ELS:
229 zfcp_fc_incoming_els(req);
230 break;
231 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
232 break;
233 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
234 dev_warn(&adapter->ccw_device->dev,
235 "The error threshold for checksum statistics "
236 "has been exceeded\n");
237 zfcp_dbf_hba_bit_err("fssrh_3", req);
238 break;
239 case FSF_STATUS_READ_LINK_DOWN:
240 zfcp_fsf_status_read_link_down(req);
241 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
242 break;
243 case FSF_STATUS_READ_LINK_UP:
244 dev_info(&adapter->ccw_device->dev,
245 "The local link has been restored\n");
246
247 zfcp_erp_set_adapter_status(adapter,
248 ZFCP_STATUS_COMMON_RUNNING);
249 zfcp_erp_adapter_reopen(adapter,
250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
251 ZFCP_STATUS_COMMON_ERP_FAILED,
252 "fssrh_2");
253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
254
255 break;
256 case FSF_STATUS_READ_NOTIFICATION_LOST:
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
258 zfcp_fc_conditional_port_scan(adapter);
259 break;
260 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
261 adapter->adapter_features = sr_buf->payload.word[0];
262 break;
263 }
264
265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
266 zfcp_fsf_req_free(req);
267
268 atomic_inc(&adapter->stat_miss);
269 queue_work(adapter->work_queue, &adapter->stat_work);
270}
271
272static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
273{
274 switch (req->qtcb->header.fsf_status_qual.word[0]) {
275 case FSF_SQ_FCP_RSP_AVAILABLE:
276 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
277 case FSF_SQ_NO_RETRY_POSSIBLE:
278 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
279 return;
280 case FSF_SQ_COMMAND_ABORTED:
281 break;
282 case FSF_SQ_NO_RECOM:
283 dev_err(&req->adapter->ccw_device->dev,
284 "The FCP adapter reported a problem "
285 "that cannot be recovered\n");
286 zfcp_qdio_siosl(req->adapter);
287 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
288 break;
289 }
290
291 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
292}
293
294static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
295{
296 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
297 return;
298
299 switch (req->qtcb->header.fsf_status) {
300 case FSF_UNKNOWN_COMMAND:
301 dev_err(&req->adapter->ccw_device->dev,
302 "The FCP adapter does not recognize the command 0x%x\n",
303 req->qtcb->header.fsf_command);
304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
305 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
306 break;
307 case FSF_ADAPTER_STATUS_AVAILABLE:
308 zfcp_fsf_fsfstatus_qual_eval(req);
309 break;
310 }
311}
312
313static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
314{
315 struct zfcp_adapter *adapter = req->adapter;
316 struct fsf_qtcb *qtcb = req->qtcb;
317 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
318
319 zfcp_dbf_hba_fsf_response(req);
320
321 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
322 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
323 return;
324 }
325
326 switch (qtcb->prefix.prot_status) {
327 case FSF_PROT_GOOD:
328 case FSF_PROT_FSF_STATUS_PRESENTED:
329 return;
330 case FSF_PROT_QTCB_VERSION_ERROR:
331 dev_err(&adapter->ccw_device->dev,
332 "QTCB version 0x%x not supported by FCP adapter "
333 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
334 psq->word[0], psq->word[1]);
335 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
336 break;
337 case FSF_PROT_ERROR_STATE:
338 case FSF_PROT_SEQ_NUMB_ERROR:
339 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
340 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
341 break;
342 case FSF_PROT_UNSUPP_QTCB_TYPE:
343 dev_err(&adapter->ccw_device->dev,
344 "The QTCB type is not supported by the FCP adapter\n");
345 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
346 break;
347 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
348 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
349 &adapter->status);
350 break;
351 case FSF_PROT_DUPLICATE_REQUEST_ID:
352 dev_err(&adapter->ccw_device->dev,
353 "0x%Lx is an ambiguous request identifier\n",
354 (unsigned long long)qtcb->bottom.support.req_handle);
355 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
356 break;
357 case FSF_PROT_LINK_DOWN:
358 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
359
360 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
361 break;
362 case FSF_PROT_REEST_QUEUE:
363
364 zfcp_erp_set_adapter_status(adapter,
365 ZFCP_STATUS_COMMON_RUNNING);
366 zfcp_erp_adapter_reopen(adapter,
367 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
368 ZFCP_STATUS_COMMON_ERP_FAILED,
369 "fspse_8");
370 break;
371 default:
372 dev_err(&adapter->ccw_device->dev,
373 "0x%x is not a valid transfer protocol status\n",
374 qtcb->prefix.prot_status);
375 zfcp_qdio_siosl(adapter);
376 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
377 }
378 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
379}
380
381
382
383
384
385
386
387
388
389
390static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
391{
392 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
393 zfcp_fsf_status_read_handler(req);
394 return;
395 }
396
397 del_timer(&req->timer);
398 zfcp_fsf_protstatus_eval(req);
399 zfcp_fsf_fsfstatus_eval(req);
400 req->handler(req);
401
402 if (req->erp_action)
403 zfcp_erp_notify(req->erp_action, 0);
404
405 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
406 zfcp_fsf_req_free(req);
407 else
408 complete(&req->completion);
409}
410
411
412
413
414
415
416
417
418
419
420void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
421{
422 struct zfcp_fsf_req *req, *tmp;
423 LIST_HEAD(remove_queue);
424
425 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
426 zfcp_reqlist_move(adapter->req_list, &remove_queue);
427
428 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
429 list_del(&req->list);
430 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
431 zfcp_fsf_req_complete(req);
432 }
433}
434
435#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
436#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
437#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
438#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
439#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
440#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
441#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
442
443static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
444{
445 u32 fdmi_speed = 0;
446 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
447 fdmi_speed |= FC_PORTSPEED_1GBIT;
448 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
449 fdmi_speed |= FC_PORTSPEED_2GBIT;
450 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
451 fdmi_speed |= FC_PORTSPEED_4GBIT;
452 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
453 fdmi_speed |= FC_PORTSPEED_10GBIT;
454 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
455 fdmi_speed |= FC_PORTSPEED_8GBIT;
456 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
457 fdmi_speed |= FC_PORTSPEED_16GBIT;
458 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
459 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
460 return fdmi_speed;
461}
462
463static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
464{
465 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
466 struct zfcp_adapter *adapter = req->adapter;
467 struct Scsi_Host *shost = adapter->scsi_host;
468 struct fc_els_flogi *nsp, *plogi;
469
470
471 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
472 - sizeof(u32));
473 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
474 - sizeof(u32));
475
476 if (req->data)
477 memcpy(req->data, bottom, sizeof(*bottom));
478
479 fc_host_port_name(shost) = nsp->fl_wwpn;
480 fc_host_node_name(shost) = nsp->fl_wwnn;
481 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
482
483 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
484 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
485 (u16)FSF_STATUS_READS_RECOM);
486
487 if (fc_host_permanent_port_name(shost) == -1)
488 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
489
490 zfcp_scsi_set_prot(adapter);
491
492
493
494 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
495 return 0;
496
497 fc_host_port_id(shost) = ntoh24(bottom->s_id);
498 fc_host_speed(shost) =
499 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
500
501 adapter->hydra_version = bottom->adapter_type;
502
503 switch (bottom->fc_topology) {
504 case FSF_TOPO_P2P:
505 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
506 adapter->peer_wwpn = plogi->fl_wwpn;
507 adapter->peer_wwnn = plogi->fl_wwnn;
508 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
509 break;
510 case FSF_TOPO_FABRIC:
511 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
512 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
513 else
514 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
515 break;
516 case FSF_TOPO_AL:
517 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
518
519 default:
520 dev_err(&adapter->ccw_device->dev,
521 "Unknown or unsupported arbitrated loop "
522 "fibre channel topology detected\n");
523 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
524 return -EIO;
525 }
526
527 return 0;
528}
529
530static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
531{
532 struct zfcp_adapter *adapter = req->adapter;
533 struct fsf_qtcb *qtcb = req->qtcb;
534 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
535 struct Scsi_Host *shost = adapter->scsi_host;
536
537 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
538 return;
539
540 adapter->fsf_lic_version = bottom->lic_version;
541 adapter->adapter_features = bottom->adapter_features;
542 adapter->connection_features = bottom->connection_features;
543 adapter->peer_wwpn = 0;
544 adapter->peer_wwnn = 0;
545 adapter->peer_d_id = 0;
546
547 switch (qtcb->header.fsf_status) {
548 case FSF_GOOD:
549 if (zfcp_fsf_exchange_config_evaluate(req))
550 return;
551
552 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
553 dev_err(&adapter->ccw_device->dev,
554 "FCP adapter maximum QTCB size (%d bytes) "
555 "is too small\n",
556 bottom->max_qtcb_size);
557 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
558 return;
559 }
560 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
561 &adapter->status);
562 break;
563 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
564 fc_host_node_name(shost) = 0;
565 fc_host_port_name(shost) = 0;
566 fc_host_port_id(shost) = 0;
567 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
568 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
569 adapter->hydra_version = 0;
570
571
572
573 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
574 &adapter->status);
575 zfcp_fsf_link_down_info_eval(req,
576 &qtcb->header.fsf_status_qual.link_down_info);
577 if (zfcp_fsf_exchange_config_evaluate(req))
578 return;
579 break;
580 default:
581 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
582 return;
583 }
584
585 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
586 adapter->hardware_version = bottom->hardware_version;
587 memcpy(fc_host_serial_number(shost), bottom->serial_number,
588 min(FC_SERIAL_NUMBER_SIZE, 17));
589 EBCASC(fc_host_serial_number(shost),
590 min(FC_SERIAL_NUMBER_SIZE, 17));
591 }
592
593 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
594 dev_err(&adapter->ccw_device->dev,
595 "The FCP adapter only supports newer "
596 "control block versions\n");
597 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
598 return;
599 }
600 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
601 dev_err(&adapter->ccw_device->dev,
602 "The FCP adapter only supports older "
603 "control block versions\n");
604 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
605 }
606}
607
608static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
609{
610 struct zfcp_adapter *adapter = req->adapter;
611 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
612 struct Scsi_Host *shost = adapter->scsi_host;
613
614 if (req->data)
615 memcpy(req->data, bottom, sizeof(*bottom));
616
617 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
618 fc_host_permanent_port_name(shost) = bottom->wwpn;
619 } else
620 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
621 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
622 fc_host_supported_speeds(shost) =
623 zfcp_fsf_convert_portspeed(bottom->supported_speed);
624 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
625 FC_FC4_LIST_SIZE);
626 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
627 FC_FC4_LIST_SIZE);
628}
629
630static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
631{
632 struct fsf_qtcb *qtcb = req->qtcb;
633
634 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
635 return;
636
637 switch (qtcb->header.fsf_status) {
638 case FSF_GOOD:
639 zfcp_fsf_exchange_port_evaluate(req);
640 break;
641 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
642 zfcp_fsf_exchange_port_evaluate(req);
643 zfcp_fsf_link_down_info_eval(req,
644 &qtcb->header.fsf_status_qual.link_down_info);
645 break;
646 }
647}
648
649static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
650{
651 struct zfcp_fsf_req *req;
652
653 if (likely(pool))
654 req = mempool_alloc(pool, GFP_ATOMIC);
655 else
656 req = kmalloc(sizeof(*req), GFP_ATOMIC);
657
658 if (unlikely(!req))
659 return NULL;
660
661 memset(req, 0, sizeof(*req));
662 req->pool = pool;
663 return req;
664}
665
666static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
667{
668 struct fsf_qtcb *qtcb;
669
670 if (likely(pool))
671 qtcb = mempool_alloc(pool, GFP_ATOMIC);
672 else
673 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
674
675 if (unlikely(!qtcb))
676 return NULL;
677
678 memset(qtcb, 0, sizeof(*qtcb));
679 return qtcb;
680}
681
682static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
683 u32 fsf_cmd, u8 sbtype,
684 mempool_t *pool)
685{
686 struct zfcp_adapter *adapter = qdio->adapter;
687 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
688
689 if (unlikely(!req))
690 return ERR_PTR(-ENOMEM);
691
692 if (adapter->req_no == 0)
693 adapter->req_no++;
694
695 INIT_LIST_HEAD(&req->list);
696 init_timer(&req->timer);
697 init_completion(&req->completion);
698
699 req->adapter = adapter;
700 req->fsf_command = fsf_cmd;
701 req->req_id = adapter->req_no;
702
703 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
704 if (likely(pool))
705 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
706 else
707 req->qtcb = zfcp_qtcb_alloc(NULL);
708
709 if (unlikely(!req->qtcb)) {
710 zfcp_fsf_req_free(req);
711 return ERR_PTR(-ENOMEM);
712 }
713
714 req->seq_no = adapter->fsf_req_seq_no;
715 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
716 req->qtcb->prefix.req_id = req->req_id;
717 req->qtcb->prefix.ulp_info = 26;
718 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
719 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
720 req->qtcb->header.req_handle = req->req_id;
721 req->qtcb->header.fsf_command = req->fsf_command;
722 }
723
724 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
725 req->qtcb, sizeof(struct fsf_qtcb));
726
727 return req;
728}
729
730static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
731{
732 struct zfcp_adapter *adapter = req->adapter;
733 struct zfcp_qdio *qdio = adapter->qdio;
734 int with_qtcb = (req->qtcb != NULL);
735 int req_id = req->req_id;
736
737 zfcp_reqlist_add(adapter->req_list, req);
738
739 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
740 req->issued = get_tod_clock();
741 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
742 del_timer(&req->timer);
743
744 zfcp_reqlist_find_rm(adapter->req_list, req_id);
745 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
746 return -EIO;
747 }
748
749
750 if (with_qtcb)
751 adapter->fsf_req_seq_no++;
752 adapter->req_no++;
753
754 return 0;
755}
756
757
758
759
760
761
762
763int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
764{
765 struct zfcp_adapter *adapter = qdio->adapter;
766 struct zfcp_fsf_req *req;
767 struct fsf_status_read_buffer *sr_buf;
768 struct page *page;
769 int retval = -EIO;
770
771 spin_lock_irq(&qdio->req_q_lock);
772 if (zfcp_qdio_sbal_get(qdio))
773 goto out;
774
775 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
776 SBAL_SFLAGS0_TYPE_STATUS,
777 adapter->pool.status_read_req);
778 if (IS_ERR(req)) {
779 retval = PTR_ERR(req);
780 goto out;
781 }
782
783 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
784 if (!page) {
785 retval = -ENOMEM;
786 goto failed_buf;
787 }
788 sr_buf = page_address(page);
789 memset(sr_buf, 0, sizeof(*sr_buf));
790 req->data = sr_buf;
791
792 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
793 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
794
795 retval = zfcp_fsf_req_send(req);
796 if (retval)
797 goto failed_req_send;
798
799 goto out;
800
801failed_req_send:
802 req->data = NULL;
803 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
804failed_buf:
805 zfcp_dbf_hba_fsf_uss("fssr__1", req);
806 zfcp_fsf_req_free(req);
807out:
808 spin_unlock_irq(&qdio->req_q_lock);
809 return retval;
810}
811
812static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
813{
814 struct scsi_device *sdev = req->data;
815 struct zfcp_scsi_dev *zfcp_sdev;
816 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
817
818 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
819 return;
820
821 zfcp_sdev = sdev_to_zfcp(sdev);
822
823 switch (req->qtcb->header.fsf_status) {
824 case FSF_PORT_HANDLE_NOT_VALID:
825 if (fsq->word[0] == fsq->word[1]) {
826 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
827 "fsafch1");
828 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
829 }
830 break;
831 case FSF_LUN_HANDLE_NOT_VALID:
832 if (fsq->word[0] == fsq->word[1]) {
833 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
834 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
835 }
836 break;
837 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
838 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
839 break;
840 case FSF_PORT_BOXED:
841 zfcp_erp_set_port_status(zfcp_sdev->port,
842 ZFCP_STATUS_COMMON_ACCESS_BOXED);
843 zfcp_erp_port_reopen(zfcp_sdev->port,
844 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
845 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
846 break;
847 case FSF_LUN_BOXED:
848 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
849 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
850 "fsafch4");
851 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
852 break;
853 case FSF_ADAPTER_STATUS_AVAILABLE:
854 switch (fsq->word[0]) {
855 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
856 zfcp_fc_test_link(zfcp_sdev->port);
857
858 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
859 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
860 break;
861 }
862 break;
863 case FSF_GOOD:
864 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
865 break;
866 }
867}
868
869
870
871
872
873
874
875struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
876{
877 struct zfcp_fsf_req *req = NULL;
878 struct scsi_device *sdev = scmnd->device;
879 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
880 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
881 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
882
883 spin_lock_irq(&qdio->req_q_lock);
884 if (zfcp_qdio_sbal_get(qdio))
885 goto out;
886 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
887 SBAL_SFLAGS0_TYPE_READ,
888 qdio->adapter->pool.scsi_abort);
889 if (IS_ERR(req)) {
890 req = NULL;
891 goto out;
892 }
893
894 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
895 ZFCP_STATUS_COMMON_UNBLOCKED)))
896 goto out_error_free;
897
898 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
899
900 req->data = sdev;
901 req->handler = zfcp_fsf_abort_fcp_command_handler;
902 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
903 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
904 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
905
906 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
907 if (!zfcp_fsf_req_send(req))
908 goto out;
909
910out_error_free:
911 zfcp_fsf_req_free(req);
912 req = NULL;
913out:
914 spin_unlock_irq(&qdio->req_q_lock);
915 return req;
916}
917
918static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
919{
920 struct zfcp_adapter *adapter = req->adapter;
921 struct zfcp_fsf_ct_els *ct = req->data;
922 struct fsf_qtcb_header *header = &req->qtcb->header;
923
924 ct->status = -EINVAL;
925
926 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
927 goto skip_fsfstatus;
928
929 switch (header->fsf_status) {
930 case FSF_GOOD:
931 zfcp_dbf_san_res("fsscth2", req);
932 ct->status = 0;
933 break;
934 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
935 zfcp_fsf_class_not_supp(req);
936 break;
937 case FSF_ADAPTER_STATUS_AVAILABLE:
938 switch (header->fsf_status_qual.word[0]){
939 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
940 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
941 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
942 break;
943 }
944 break;
945 case FSF_PORT_BOXED:
946 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
947 break;
948 case FSF_PORT_HANDLE_NOT_VALID:
949 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
950
951 case FSF_GENERIC_COMMAND_REJECTED:
952 case FSF_PAYLOAD_SIZE_MISMATCH:
953 case FSF_REQUEST_SIZE_TOO_LARGE:
954 case FSF_RESPONSE_SIZE_TOO_LARGE:
955 case FSF_SBAL_MISMATCH:
956 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
957 break;
958 }
959
960skip_fsfstatus:
961 if (ct->handler)
962 ct->handler(ct->handler_data);
963}
964
965static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
966 struct zfcp_qdio_req *q_req,
967 struct scatterlist *sg_req,
968 struct scatterlist *sg_resp)
969{
970 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
971 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
972 zfcp_qdio_set_sbale_last(qdio, q_req);
973}
974
975static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
976 struct scatterlist *sg_req,
977 struct scatterlist *sg_resp)
978{
979 struct zfcp_adapter *adapter = req->adapter;
980 struct zfcp_qdio *qdio = adapter->qdio;
981 struct fsf_qtcb *qtcb = req->qtcb;
982 u32 feat = adapter->adapter_features;
983
984 if (zfcp_adapter_multi_buffer_active(adapter)) {
985 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
986 return -EIO;
987 qtcb->bottom.support.req_buf_length =
988 zfcp_qdio_real_bytes(sg_req);
989 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
990 return -EIO;
991 qtcb->bottom.support.resp_buf_length =
992 zfcp_qdio_real_bytes(sg_resp);
993
994 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
995 zfcp_qdio_sbale_count(sg_req));
996 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
997 zfcp_qdio_set_scount(qdio, &req->qdio_req);
998 return 0;
999 }
1000
1001
1002 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1003 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1004 sg_req, sg_resp);
1005 return 0;
1006 }
1007
1008 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1009 return -EOPNOTSUPP;
1010
1011 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1012 return -EIO;
1013
1014 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1015
1016 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1017 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1018
1019 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1020 return -EIO;
1021
1022 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1023
1024 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1025
1026 return 0;
1027}
1028
1029static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1030 struct scatterlist *sg_req,
1031 struct scatterlist *sg_resp,
1032 unsigned int timeout)
1033{
1034 int ret;
1035
1036 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1037 if (ret)
1038 return ret;
1039
1040
1041 if (timeout > 255)
1042 timeout = 255;
1043 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1044 req->qtcb->bottom.support.timeout = timeout;
1045 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1046
1047 return 0;
1048}
1049
1050
1051
1052
1053
1054
1055int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1056 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1057 unsigned int timeout)
1058{
1059 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1060 struct zfcp_fsf_req *req;
1061 int ret = -EIO;
1062
1063 spin_lock_irq(&qdio->req_q_lock);
1064 if (zfcp_qdio_sbal_get(qdio))
1065 goto out;
1066
1067 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1068 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1069
1070 if (IS_ERR(req)) {
1071 ret = PTR_ERR(req);
1072 goto out;
1073 }
1074
1075 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1076 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1077 if (ret)
1078 goto failed_send;
1079
1080 req->handler = zfcp_fsf_send_ct_handler;
1081 req->qtcb->header.port_handle = wka_port->handle;
1082 ct->d_id = wka_port->d_id;
1083 req->data = ct;
1084
1085 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1086
1087 ret = zfcp_fsf_req_send(req);
1088 if (ret)
1089 goto failed_send;
1090
1091 goto out;
1092
1093failed_send:
1094 zfcp_fsf_req_free(req);
1095out:
1096 spin_unlock_irq(&qdio->req_q_lock);
1097 return ret;
1098}
1099
1100static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1101{
1102 struct zfcp_fsf_ct_els *send_els = req->data;
1103 struct fsf_qtcb_header *header = &req->qtcb->header;
1104
1105 send_els->status = -EINVAL;
1106
1107 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1108 goto skip_fsfstatus;
1109
1110 switch (header->fsf_status) {
1111 case FSF_GOOD:
1112 zfcp_dbf_san_res("fsselh1", req);
1113 send_els->status = 0;
1114 break;
1115 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1116 zfcp_fsf_class_not_supp(req);
1117 break;
1118 case FSF_ADAPTER_STATUS_AVAILABLE:
1119 switch (header->fsf_status_qual.word[0]){
1120 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1121 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1122 case FSF_SQ_RETRY_IF_POSSIBLE:
1123 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1124 break;
1125 }
1126 break;
1127 case FSF_ELS_COMMAND_REJECTED:
1128 case FSF_PAYLOAD_SIZE_MISMATCH:
1129 case FSF_REQUEST_SIZE_TOO_LARGE:
1130 case FSF_RESPONSE_SIZE_TOO_LARGE:
1131 break;
1132 case FSF_SBAL_MISMATCH:
1133
1134
1135 default:
1136 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1137 break;
1138 }
1139skip_fsfstatus:
1140 if (send_els->handler)
1141 send_els->handler(send_els->handler_data);
1142}
1143
1144
1145
1146
1147
1148int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1149 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1150{
1151 struct zfcp_fsf_req *req;
1152 struct zfcp_qdio *qdio = adapter->qdio;
1153 int ret = -EIO;
1154
1155 spin_lock_irq(&qdio->req_q_lock);
1156 if (zfcp_qdio_sbal_get(qdio))
1157 goto out;
1158
1159 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1160 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1161
1162 if (IS_ERR(req)) {
1163 ret = PTR_ERR(req);
1164 goto out;
1165 }
1166
1167 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1168
1169 if (!zfcp_adapter_multi_buffer_active(adapter))
1170 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1171
1172 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1173
1174 if (ret)
1175 goto failed_send;
1176
1177 hton24(req->qtcb->bottom.support.d_id, d_id);
1178 req->handler = zfcp_fsf_send_els_handler;
1179 els->d_id = d_id;
1180 req->data = els;
1181
1182 zfcp_dbf_san_req("fssels1", req, d_id);
1183
1184 ret = zfcp_fsf_req_send(req);
1185 if (ret)
1186 goto failed_send;
1187
1188 goto out;
1189
1190failed_send:
1191 zfcp_fsf_req_free(req);
1192out:
1193 spin_unlock_irq(&qdio->req_q_lock);
1194 return ret;
1195}
1196
1197int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1198{
1199 struct zfcp_fsf_req *req;
1200 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1201 int retval = -EIO;
1202
1203 spin_lock_irq(&qdio->req_q_lock);
1204 if (zfcp_qdio_sbal_get(qdio))
1205 goto out;
1206
1207 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1208 SBAL_SFLAGS0_TYPE_READ,
1209 qdio->adapter->pool.erp_req);
1210
1211 if (IS_ERR(req)) {
1212 retval = PTR_ERR(req);
1213 goto out;
1214 }
1215
1216 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1217 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1218
1219 req->qtcb->bottom.config.feature_selection =
1220 FSF_FEATURE_NOTIFICATION_LOST |
1221 FSF_FEATURE_UPDATE_ALERT;
1222 req->erp_action = erp_action;
1223 req->handler = zfcp_fsf_exchange_config_data_handler;
1224 erp_action->fsf_req_id = req->req_id;
1225
1226 zfcp_fsf_start_erp_timer(req);
1227 retval = zfcp_fsf_req_send(req);
1228 if (retval) {
1229 zfcp_fsf_req_free(req);
1230 erp_action->fsf_req_id = 0;
1231 }
1232out:
1233 spin_unlock_irq(&qdio->req_q_lock);
1234 return retval;
1235}
1236
1237int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1238 struct fsf_qtcb_bottom_config *data)
1239{
1240 struct zfcp_fsf_req *req = NULL;
1241 int retval = -EIO;
1242
1243 spin_lock_irq(&qdio->req_q_lock);
1244 if (zfcp_qdio_sbal_get(qdio))
1245 goto out_unlock;
1246
1247 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1248 SBAL_SFLAGS0_TYPE_READ, NULL);
1249
1250 if (IS_ERR(req)) {
1251 retval = PTR_ERR(req);
1252 goto out_unlock;
1253 }
1254
1255 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1256 req->handler = zfcp_fsf_exchange_config_data_handler;
1257
1258 req->qtcb->bottom.config.feature_selection =
1259 FSF_FEATURE_NOTIFICATION_LOST |
1260 FSF_FEATURE_UPDATE_ALERT;
1261
1262 if (data)
1263 req->data = data;
1264
1265 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1266 retval = zfcp_fsf_req_send(req);
1267 spin_unlock_irq(&qdio->req_q_lock);
1268 if (!retval)
1269 wait_for_completion(&req->completion);
1270
1271 zfcp_fsf_req_free(req);
1272 return retval;
1273
1274out_unlock:
1275 spin_unlock_irq(&qdio->req_q_lock);
1276 return retval;
1277}
1278
1279
1280
1281
1282
1283
1284int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1285{
1286 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1287 struct zfcp_fsf_req *req;
1288 int retval = -EIO;
1289
1290 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1291 return -EOPNOTSUPP;
1292
1293 spin_lock_irq(&qdio->req_q_lock);
1294 if (zfcp_qdio_sbal_get(qdio))
1295 goto out;
1296
1297 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1298 SBAL_SFLAGS0_TYPE_READ,
1299 qdio->adapter->pool.erp_req);
1300
1301 if (IS_ERR(req)) {
1302 retval = PTR_ERR(req);
1303 goto out;
1304 }
1305
1306 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1307 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1308
1309 req->handler = zfcp_fsf_exchange_port_data_handler;
1310 req->erp_action = erp_action;
1311 erp_action->fsf_req_id = req->req_id;
1312
1313 zfcp_fsf_start_erp_timer(req);
1314 retval = zfcp_fsf_req_send(req);
1315 if (retval) {
1316 zfcp_fsf_req_free(req);
1317 erp_action->fsf_req_id = 0;
1318 }
1319out:
1320 spin_unlock_irq(&qdio->req_q_lock);
1321 return retval;
1322}
1323
1324
1325
1326
1327
1328
1329
1330int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1331 struct fsf_qtcb_bottom_port *data)
1332{
1333 struct zfcp_fsf_req *req = NULL;
1334 int retval = -EIO;
1335
1336 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1337 return -EOPNOTSUPP;
1338
1339 spin_lock_irq(&qdio->req_q_lock);
1340 if (zfcp_qdio_sbal_get(qdio))
1341 goto out_unlock;
1342
1343 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1344 SBAL_SFLAGS0_TYPE_READ, NULL);
1345
1346 if (IS_ERR(req)) {
1347 retval = PTR_ERR(req);
1348 goto out_unlock;
1349 }
1350
1351 if (data)
1352 req->data = data;
1353
1354 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1355
1356 req->handler = zfcp_fsf_exchange_port_data_handler;
1357 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1358 retval = zfcp_fsf_req_send(req);
1359 spin_unlock_irq(&qdio->req_q_lock);
1360
1361 if (!retval)
1362 wait_for_completion(&req->completion);
1363
1364 zfcp_fsf_req_free(req);
1365
1366 return retval;
1367
1368out_unlock:
1369 spin_unlock_irq(&qdio->req_q_lock);
1370 return retval;
1371}
1372
1373static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1374{
1375 struct zfcp_port *port = req->data;
1376 struct fsf_qtcb_header *header = &req->qtcb->header;
1377 struct fc_els_flogi *plogi;
1378
1379 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1380 goto out;
1381
1382 switch (header->fsf_status) {
1383 case FSF_PORT_ALREADY_OPEN:
1384 break;
1385 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1386 dev_warn(&req->adapter->ccw_device->dev,
1387 "Not enough FCP adapter resources to open "
1388 "remote port 0x%016Lx\n",
1389 (unsigned long long)port->wwpn);
1390 zfcp_erp_set_port_status(port,
1391 ZFCP_STATUS_COMMON_ERP_FAILED);
1392 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1393 break;
1394 case FSF_ADAPTER_STATUS_AVAILABLE:
1395 switch (header->fsf_status_qual.word[0]) {
1396 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1397 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1398 case FSF_SQ_NO_RETRY_POSSIBLE:
1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1400 break;
1401 }
1402 break;
1403 case FSF_GOOD:
1404 port->handle = header->port_handle;
1405 atomic_or(ZFCP_STATUS_COMMON_OPEN |
1406 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1407 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1408 &port->status);
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1425 if (req->qtcb->bottom.support.els1_length >=
1426 FSF_PLOGI_MIN_LEN)
1427 zfcp_fc_plogi_evaluate(port, plogi);
1428 break;
1429 case FSF_UNKNOWN_OP_SUBTYPE:
1430 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1431 break;
1432 }
1433
1434out:
1435 put_device(&port->dev);
1436}
1437
1438
1439
1440
1441
1442
1443int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1444{
1445 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1446 struct zfcp_port *port = erp_action->port;
1447 struct zfcp_fsf_req *req;
1448 int retval = -EIO;
1449
1450 spin_lock_irq(&qdio->req_q_lock);
1451 if (zfcp_qdio_sbal_get(qdio))
1452 goto out;
1453
1454 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1455 SBAL_SFLAGS0_TYPE_READ,
1456 qdio->adapter->pool.erp_req);
1457
1458 if (IS_ERR(req)) {
1459 retval = PTR_ERR(req);
1460 goto out;
1461 }
1462
1463 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1464 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1465
1466 req->handler = zfcp_fsf_open_port_handler;
1467 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1468 req->data = port;
1469 req->erp_action = erp_action;
1470 erp_action->fsf_req_id = req->req_id;
1471 get_device(&port->dev);
1472
1473 zfcp_fsf_start_erp_timer(req);
1474 retval = zfcp_fsf_req_send(req);
1475 if (retval) {
1476 zfcp_fsf_req_free(req);
1477 erp_action->fsf_req_id = 0;
1478 put_device(&port->dev);
1479 }
1480out:
1481 spin_unlock_irq(&qdio->req_q_lock);
1482 return retval;
1483}
1484
1485static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1486{
1487 struct zfcp_port *port = req->data;
1488
1489 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1490 return;
1491
1492 switch (req->qtcb->header.fsf_status) {
1493 case FSF_PORT_HANDLE_NOT_VALID:
1494 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1495 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1496 break;
1497 case FSF_ADAPTER_STATUS_AVAILABLE:
1498 break;
1499 case FSF_GOOD:
1500 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1501 break;
1502 }
1503}
1504
1505
1506
1507
1508
1509
1510int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1511{
1512 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1513 struct zfcp_fsf_req *req;
1514 int retval = -EIO;
1515
1516 spin_lock_irq(&qdio->req_q_lock);
1517 if (zfcp_qdio_sbal_get(qdio))
1518 goto out;
1519
1520 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1521 SBAL_SFLAGS0_TYPE_READ,
1522 qdio->adapter->pool.erp_req);
1523
1524 if (IS_ERR(req)) {
1525 retval = PTR_ERR(req);
1526 goto out;
1527 }
1528
1529 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1530 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1531
1532 req->handler = zfcp_fsf_close_port_handler;
1533 req->data = erp_action->port;
1534 req->erp_action = erp_action;
1535 req->qtcb->header.port_handle = erp_action->port->handle;
1536 erp_action->fsf_req_id = req->req_id;
1537
1538 zfcp_fsf_start_erp_timer(req);
1539 retval = zfcp_fsf_req_send(req);
1540 if (retval) {
1541 zfcp_fsf_req_free(req);
1542 erp_action->fsf_req_id = 0;
1543 }
1544out:
1545 spin_unlock_irq(&qdio->req_q_lock);
1546 return retval;
1547}
1548
1549static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1550{
1551 struct zfcp_fc_wka_port *wka_port = req->data;
1552 struct fsf_qtcb_header *header = &req->qtcb->header;
1553
1554 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1555 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1556 goto out;
1557 }
1558
1559 switch (header->fsf_status) {
1560 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1561 dev_warn(&req->adapter->ccw_device->dev,
1562 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1563
1564 case FSF_ADAPTER_STATUS_AVAILABLE:
1565 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1566 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1567 break;
1568 case FSF_GOOD:
1569 wka_port->handle = header->port_handle;
1570
1571 case FSF_PORT_ALREADY_OPEN:
1572 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1573 }
1574out:
1575 wake_up(&wka_port->completion_wq);
1576}
1577
1578
1579
1580
1581
1582
1583int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1584{
1585 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1586 struct zfcp_fsf_req *req;
1587 int retval = -EIO;
1588
1589 spin_lock_irq(&qdio->req_q_lock);
1590 if (zfcp_qdio_sbal_get(qdio))
1591 goto out;
1592
1593 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1594 SBAL_SFLAGS0_TYPE_READ,
1595 qdio->adapter->pool.erp_req);
1596
1597 if (IS_ERR(req)) {
1598 retval = PTR_ERR(req);
1599 goto out;
1600 }
1601
1602 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1603 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1604
1605 req->handler = zfcp_fsf_open_wka_port_handler;
1606 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1607 req->data = wka_port;
1608
1609 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1610 retval = zfcp_fsf_req_send(req);
1611 if (retval)
1612 zfcp_fsf_req_free(req);
1613out:
1614 spin_unlock_irq(&qdio->req_q_lock);
1615 if (!retval)
1616 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1617 return retval;
1618}
1619
1620static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1621{
1622 struct zfcp_fc_wka_port *wka_port = req->data;
1623
1624 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1625 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1626 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1627 }
1628
1629 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1630 wake_up(&wka_port->completion_wq);
1631}
1632
1633
1634
1635
1636
1637
1638int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1639{
1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1641 struct zfcp_fsf_req *req;
1642 int retval = -EIO;
1643
1644 spin_lock_irq(&qdio->req_q_lock);
1645 if (zfcp_qdio_sbal_get(qdio))
1646 goto out;
1647
1648 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1649 SBAL_SFLAGS0_TYPE_READ,
1650 qdio->adapter->pool.erp_req);
1651
1652 if (IS_ERR(req)) {
1653 retval = PTR_ERR(req);
1654 goto out;
1655 }
1656
1657 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1658 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1659
1660 req->handler = zfcp_fsf_close_wka_port_handler;
1661 req->data = wka_port;
1662 req->qtcb->header.port_handle = wka_port->handle;
1663
1664 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1665 retval = zfcp_fsf_req_send(req);
1666 if (retval)
1667 zfcp_fsf_req_free(req);
1668out:
1669 spin_unlock_irq(&qdio->req_q_lock);
1670 if (!retval)
1671 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1672 return retval;
1673}
1674
1675static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1676{
1677 struct zfcp_port *port = req->data;
1678 struct fsf_qtcb_header *header = &req->qtcb->header;
1679 struct scsi_device *sdev;
1680
1681 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1682 return;
1683
1684 switch (header->fsf_status) {
1685 case FSF_PORT_HANDLE_NOT_VALID:
1686 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1687 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1688 break;
1689 case FSF_PORT_BOXED:
1690
1691
1692 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1693 shost_for_each_device(sdev, port->adapter->scsi_host)
1694 if (sdev_to_zfcp(sdev)->port == port)
1695 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1696 &sdev_to_zfcp(sdev)->status);
1697 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1698 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1699 "fscpph2");
1700 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1701 break;
1702 case FSF_ADAPTER_STATUS_AVAILABLE:
1703 switch (header->fsf_status_qual.word[0]) {
1704 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1705
1706 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1707 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1708 break;
1709 }
1710 break;
1711 case FSF_GOOD:
1712
1713
1714
1715 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1716 shost_for_each_device(sdev, port->adapter->scsi_host)
1717 if (sdev_to_zfcp(sdev)->port == port)
1718 atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1719 &sdev_to_zfcp(sdev)->status);
1720 break;
1721 }
1722}
1723
1724
1725
1726
1727
1728
1729int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1730{
1731 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1732 struct zfcp_fsf_req *req;
1733 int retval = -EIO;
1734
1735 spin_lock_irq(&qdio->req_q_lock);
1736 if (zfcp_qdio_sbal_get(qdio))
1737 goto out;
1738
1739 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1740 SBAL_SFLAGS0_TYPE_READ,
1741 qdio->adapter->pool.erp_req);
1742
1743 if (IS_ERR(req)) {
1744 retval = PTR_ERR(req);
1745 goto out;
1746 }
1747
1748 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1749 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1750
1751 req->data = erp_action->port;
1752 req->qtcb->header.port_handle = erp_action->port->handle;
1753 req->erp_action = erp_action;
1754 req->handler = zfcp_fsf_close_physical_port_handler;
1755 erp_action->fsf_req_id = req->req_id;
1756
1757 zfcp_fsf_start_erp_timer(req);
1758 retval = zfcp_fsf_req_send(req);
1759 if (retval) {
1760 zfcp_fsf_req_free(req);
1761 erp_action->fsf_req_id = 0;
1762 }
1763out:
1764 spin_unlock_irq(&qdio->req_q_lock);
1765 return retval;
1766}
1767
1768static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1769{
1770 struct zfcp_adapter *adapter = req->adapter;
1771 struct scsi_device *sdev = req->data;
1772 struct zfcp_scsi_dev *zfcp_sdev;
1773 struct fsf_qtcb_header *header = &req->qtcb->header;
1774 union fsf_status_qual *qual = &header->fsf_status_qual;
1775
1776 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1777 return;
1778
1779 zfcp_sdev = sdev_to_zfcp(sdev);
1780
1781 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1782 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1783 &zfcp_sdev->status);
1784
1785 switch (header->fsf_status) {
1786
1787 case FSF_PORT_HANDLE_NOT_VALID:
1788 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1789
1790 case FSF_LUN_ALREADY_OPEN:
1791 break;
1792 case FSF_PORT_BOXED:
1793 zfcp_erp_set_port_status(zfcp_sdev->port,
1794 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1795 zfcp_erp_port_reopen(zfcp_sdev->port,
1796 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1797 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1798 break;
1799 case FSF_LUN_SHARING_VIOLATION:
1800 if (qual->word[0])
1801 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1802 "LUN 0x%Lx on port 0x%Lx is already in "
1803 "use by CSS%d, MIF Image ID %x\n",
1804 zfcp_scsi_dev_lun(sdev),
1805 (unsigned long long)zfcp_sdev->port->wwpn,
1806 qual->fsf_queue_designator.cssid,
1807 qual->fsf_queue_designator.hla);
1808 zfcp_erp_set_lun_status(sdev,
1809 ZFCP_STATUS_COMMON_ERP_FAILED |
1810 ZFCP_STATUS_COMMON_ACCESS_DENIED);
1811 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 break;
1813 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1814 dev_warn(&adapter->ccw_device->dev,
1815 "No handle is available for LUN "
1816 "0x%016Lx on port 0x%016Lx\n",
1817 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1818 (unsigned long long)zfcp_sdev->port->wwpn);
1819 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1820
1821 case FSF_INVALID_COMMAND_OPTION:
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1823 break;
1824 case FSF_ADAPTER_STATUS_AVAILABLE:
1825 switch (header->fsf_status_qual.word[0]) {
1826 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1827 zfcp_fc_test_link(zfcp_sdev->port);
1828
1829 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1830 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1831 break;
1832 }
1833 break;
1834
1835 case FSF_GOOD:
1836 zfcp_sdev->lun_handle = header->lun_handle;
1837 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1838 break;
1839 }
1840}
1841
1842
1843
1844
1845
1846
1847int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1848{
1849 struct zfcp_adapter *adapter = erp_action->adapter;
1850 struct zfcp_qdio *qdio = adapter->qdio;
1851 struct zfcp_fsf_req *req;
1852 int retval = -EIO;
1853
1854 spin_lock_irq(&qdio->req_q_lock);
1855 if (zfcp_qdio_sbal_get(qdio))
1856 goto out;
1857
1858 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1859 SBAL_SFLAGS0_TYPE_READ,
1860 adapter->pool.erp_req);
1861
1862 if (IS_ERR(req)) {
1863 retval = PTR_ERR(req);
1864 goto out;
1865 }
1866
1867 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1868 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1869
1870 req->qtcb->header.port_handle = erp_action->port->handle;
1871 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1872 req->handler = zfcp_fsf_open_lun_handler;
1873 req->data = erp_action->sdev;
1874 req->erp_action = erp_action;
1875 erp_action->fsf_req_id = req->req_id;
1876
1877 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1878 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1879
1880 zfcp_fsf_start_erp_timer(req);
1881 retval = zfcp_fsf_req_send(req);
1882 if (retval) {
1883 zfcp_fsf_req_free(req);
1884 erp_action->fsf_req_id = 0;
1885 }
1886out:
1887 spin_unlock_irq(&qdio->req_q_lock);
1888 return retval;
1889}
1890
1891static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1892{
1893 struct scsi_device *sdev = req->data;
1894 struct zfcp_scsi_dev *zfcp_sdev;
1895
1896 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1897 return;
1898
1899 zfcp_sdev = sdev_to_zfcp(sdev);
1900
1901 switch (req->qtcb->header.fsf_status) {
1902 case FSF_PORT_HANDLE_NOT_VALID:
1903 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1904 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1905 break;
1906 case FSF_LUN_HANDLE_NOT_VALID:
1907 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1908 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1909 break;
1910 case FSF_PORT_BOXED:
1911 zfcp_erp_set_port_status(zfcp_sdev->port,
1912 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1913 zfcp_erp_port_reopen(zfcp_sdev->port,
1914 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1915 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1916 break;
1917 case FSF_ADAPTER_STATUS_AVAILABLE:
1918 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1919 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1920 zfcp_fc_test_link(zfcp_sdev->port);
1921
1922 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1923 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1924 break;
1925 }
1926 break;
1927 case FSF_GOOD:
1928 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1929 break;
1930 }
1931}
1932
1933
1934
1935
1936
1937
1938int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1939{
1940 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1941 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1942 struct zfcp_fsf_req *req;
1943 int retval = -EIO;
1944
1945 spin_lock_irq(&qdio->req_q_lock);
1946 if (zfcp_qdio_sbal_get(qdio))
1947 goto out;
1948
1949 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1950 SBAL_SFLAGS0_TYPE_READ,
1951 qdio->adapter->pool.erp_req);
1952
1953 if (IS_ERR(req)) {
1954 retval = PTR_ERR(req);
1955 goto out;
1956 }
1957
1958 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1959 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1960
1961 req->qtcb->header.port_handle = erp_action->port->handle;
1962 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1963 req->handler = zfcp_fsf_close_lun_handler;
1964 req->data = erp_action->sdev;
1965 req->erp_action = erp_action;
1966 erp_action->fsf_req_id = req->req_id;
1967
1968 zfcp_fsf_start_erp_timer(req);
1969 retval = zfcp_fsf_req_send(req);
1970 if (retval) {
1971 zfcp_fsf_req_free(req);
1972 erp_action->fsf_req_id = 0;
1973 }
1974out:
1975 spin_unlock_irq(&qdio->req_q_lock);
1976 return retval;
1977}
1978
1979static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1980{
1981 lat_rec->sum += lat;
1982 lat_rec->min = min(lat_rec->min, lat);
1983 lat_rec->max = max(lat_rec->max, lat);
1984}
1985
1986static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1987{
1988 struct fsf_qual_latency_info *lat_in;
1989 struct latency_cont *lat = NULL;
1990 struct zfcp_scsi_dev *zfcp_sdev;
1991 struct zfcp_blk_drv_data blktrc;
1992 int ticks = req->adapter->timer_ticks;
1993
1994 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1995
1996 blktrc.flags = 0;
1997 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1998 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1999 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2000 blktrc.inb_usage = 0;
2001 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2002
2003 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2004 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2005 zfcp_sdev = sdev_to_zfcp(scsi->device);
2006 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2007 blktrc.channel_lat = lat_in->channel_lat * ticks;
2008 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2009
2010 switch (req->qtcb->bottom.io.data_direction) {
2011 case FSF_DATADIR_DIF_READ_STRIP:
2012 case FSF_DATADIR_DIF_READ_CONVERT:
2013 case FSF_DATADIR_READ:
2014 lat = &zfcp_sdev->latencies.read;
2015 break;
2016 case FSF_DATADIR_DIF_WRITE_INSERT:
2017 case FSF_DATADIR_DIF_WRITE_CONVERT:
2018 case FSF_DATADIR_WRITE:
2019 lat = &zfcp_sdev->latencies.write;
2020 break;
2021 case FSF_DATADIR_CMND:
2022 lat = &zfcp_sdev->latencies.cmd;
2023 break;
2024 }
2025
2026 if (lat) {
2027 spin_lock(&zfcp_sdev->latencies.lock);
2028 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2029 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2030 lat->counter++;
2031 spin_unlock(&zfcp_sdev->latencies.lock);
2032 }
2033 }
2034
2035 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2036 sizeof(blktrc));
2037}
2038
2039static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2040{
2041 struct scsi_cmnd *scmnd = req->data;
2042 struct scsi_device *sdev = scmnd->device;
2043 struct zfcp_scsi_dev *zfcp_sdev;
2044 struct fsf_qtcb_header *header = &req->qtcb->header;
2045
2046 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2047 return;
2048
2049 zfcp_sdev = sdev_to_zfcp(sdev);
2050
2051 switch (header->fsf_status) {
2052 case FSF_HANDLE_MISMATCH:
2053 case FSF_PORT_HANDLE_NOT_VALID:
2054 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2055 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2056 break;
2057 case FSF_FCPLUN_NOT_VALID:
2058 case FSF_LUN_HANDLE_NOT_VALID:
2059 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2060 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2061 break;
2062 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2063 zfcp_fsf_class_not_supp(req);
2064 break;
2065 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2066 dev_err(&req->adapter->ccw_device->dev,
2067 "Incorrect direction %d, LUN 0x%016Lx on port "
2068 "0x%016Lx closed\n",
2069 req->qtcb->bottom.io.data_direction,
2070 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2071 (unsigned long long)zfcp_sdev->port->wwpn);
2072 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2073 "fssfch3");
2074 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2075 break;
2076 case FSF_CMND_LENGTH_NOT_VALID:
2077 dev_err(&req->adapter->ccw_device->dev,
2078 "Incorrect CDB length %d, LUN 0x%016Lx on "
2079 "port 0x%016Lx closed\n",
2080 req->qtcb->bottom.io.fcp_cmnd_length,
2081 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2082 (unsigned long long)zfcp_sdev->port->wwpn);
2083 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2084 "fssfch4");
2085 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2086 break;
2087 case FSF_PORT_BOXED:
2088 zfcp_erp_set_port_status(zfcp_sdev->port,
2089 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2090 zfcp_erp_port_reopen(zfcp_sdev->port,
2091 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2092 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2093 break;
2094 case FSF_LUN_BOXED:
2095 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2096 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2097 "fssfch6");
2098 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2099 break;
2100 case FSF_ADAPTER_STATUS_AVAILABLE:
2101 if (header->fsf_status_qual.word[0] ==
2102 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2103 zfcp_fc_test_link(zfcp_sdev->port);
2104 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2105 break;
2106 }
2107}
2108
2109static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2110{
2111 struct scsi_cmnd *scpnt;
2112 struct fcp_resp_with_ext *fcp_rsp;
2113 unsigned long flags;
2114
2115 read_lock_irqsave(&req->adapter->abort_lock, flags);
2116
2117 scpnt = req->data;
2118 if (unlikely(!scpnt)) {
2119 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2120 return;
2121 }
2122
2123 zfcp_fsf_fcp_handler_common(req);
2124
2125 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2126 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2127 goto skip_fsfstatus;
2128 }
2129
2130 switch (req->qtcb->header.fsf_status) {
2131 case FSF_INCONSISTENT_PROT_DATA:
2132 case FSF_INVALID_PROT_PARM:
2133 set_host_byte(scpnt, DID_ERROR);
2134 goto skip_fsfstatus;
2135 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2136 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2137 goto skip_fsfstatus;
2138 case FSF_APP_TAG_CHECK_FAILURE:
2139 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2140 goto skip_fsfstatus;
2141 case FSF_REF_TAG_CHECK_FAILURE:
2142 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2143 goto skip_fsfstatus;
2144 }
2145 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2146 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2147
2148skip_fsfstatus:
2149 zfcp_fsf_req_trace(req, scpnt);
2150 zfcp_dbf_scsi_result(scpnt, req);
2151
2152 scpnt->host_scribble = NULL;
2153 (scpnt->scsi_done) (scpnt);
2154
2155
2156
2157
2158
2159
2160 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2161}
2162
2163static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2164{
2165 switch (scsi_get_prot_op(scsi_cmnd)) {
2166 case SCSI_PROT_NORMAL:
2167 switch (scsi_cmnd->sc_data_direction) {
2168 case DMA_NONE:
2169 *data_dir = FSF_DATADIR_CMND;
2170 break;
2171 case DMA_FROM_DEVICE:
2172 *data_dir = FSF_DATADIR_READ;
2173 break;
2174 case DMA_TO_DEVICE:
2175 *data_dir = FSF_DATADIR_WRITE;
2176 break;
2177 case DMA_BIDIRECTIONAL:
2178 return -EINVAL;
2179 }
2180 break;
2181
2182 case SCSI_PROT_READ_STRIP:
2183 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2184 break;
2185 case SCSI_PROT_WRITE_INSERT:
2186 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2187 break;
2188 case SCSI_PROT_READ_PASS:
2189 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2190 break;
2191 case SCSI_PROT_WRITE_PASS:
2192 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2193 break;
2194 default:
2195 return -EINVAL;
2196 }
2197
2198 return 0;
2199}
2200
2201
2202
2203
2204
2205int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2206{
2207 struct zfcp_fsf_req *req;
2208 struct fcp_cmnd *fcp_cmnd;
2209 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2210 int retval = -EIO;
2211 struct scsi_device *sdev = scsi_cmnd->device;
2212 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2213 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2214 struct zfcp_qdio *qdio = adapter->qdio;
2215 struct fsf_qtcb_bottom_io *io;
2216 unsigned long flags;
2217
2218 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2219 ZFCP_STATUS_COMMON_UNBLOCKED)))
2220 return -EBUSY;
2221
2222 spin_lock_irqsave(&qdio->req_q_lock, flags);
2223 if (atomic_read(&qdio->req_q_free) <= 0) {
2224 atomic_inc(&qdio->req_q_full);
2225 goto out;
2226 }
2227
2228 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2229 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2230
2231 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2232 sbtype, adapter->pool.scsi_req);
2233
2234 if (IS_ERR(req)) {
2235 retval = PTR_ERR(req);
2236 goto out;
2237 }
2238
2239 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2240
2241 io = &req->qtcb->bottom.io;
2242 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2243 req->data = scsi_cmnd;
2244 req->handler = zfcp_fsf_fcp_cmnd_handler;
2245 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2246 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2247 io->service_class = FSF_CLASS_3;
2248 io->fcp_cmnd_length = FCP_CMND_LEN;
2249
2250 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2251 io->data_block_length = scsi_cmnd->device->sector_size;
2252 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2253 }
2254
2255 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2256 goto failed_scsi_cmnd;
2257
2258 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2259 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2260
2261 if (scsi_prot_sg_count(scsi_cmnd)) {
2262 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2263 scsi_prot_sg_count(scsi_cmnd));
2264 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2265 scsi_prot_sglist(scsi_cmnd));
2266 if (retval)
2267 goto failed_scsi_cmnd;
2268 io->prot_data_length = zfcp_qdio_real_bytes(
2269 scsi_prot_sglist(scsi_cmnd));
2270 }
2271
2272 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2273 scsi_sglist(scsi_cmnd));
2274 if (unlikely(retval))
2275 goto failed_scsi_cmnd;
2276
2277 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2278 if (zfcp_adapter_multi_buffer_active(adapter))
2279 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2280
2281 retval = zfcp_fsf_req_send(req);
2282 if (unlikely(retval))
2283 goto failed_scsi_cmnd;
2284
2285 goto out;
2286
2287failed_scsi_cmnd:
2288 zfcp_fsf_req_free(req);
2289 scsi_cmnd->host_scribble = NULL;
2290out:
2291 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2292 return retval;
2293}
2294
2295static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2296{
2297 struct fcp_resp_with_ext *fcp_rsp;
2298 struct fcp_resp_rsp_info *rsp_info;
2299
2300 zfcp_fsf_fcp_handler_common(req);
2301
2302 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2303 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2304
2305 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2306 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2307 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2308}
2309
2310
2311
2312
2313
2314
2315
2316struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2317 u8 tm_flags)
2318{
2319 struct zfcp_fsf_req *req = NULL;
2320 struct fcp_cmnd *fcp_cmnd;
2321 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2322 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2323
2324 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2325 ZFCP_STATUS_COMMON_UNBLOCKED)))
2326 return NULL;
2327
2328 spin_lock_irq(&qdio->req_q_lock);
2329 if (zfcp_qdio_sbal_get(qdio))
2330 goto out;
2331
2332 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2333 SBAL_SFLAGS0_TYPE_WRITE,
2334 qdio->adapter->pool.scsi_req);
2335
2336 if (IS_ERR(req)) {
2337 req = NULL;
2338 goto out;
2339 }
2340
2341 req->data = scmnd;
2342 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2343 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2344 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2345 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2346 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2347 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2348
2349 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2350
2351 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2352 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2353
2354 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2355 if (!zfcp_fsf_req_send(req))
2356 goto out;
2357
2358 zfcp_fsf_req_free(req);
2359 req = NULL;
2360out:
2361 spin_unlock_irq(&qdio->req_q_lock);
2362 return req;
2363}
2364
2365
2366
2367
2368
2369
2370void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2371{
2372 struct zfcp_adapter *adapter = qdio->adapter;
2373 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2374 struct qdio_buffer_element *sbale;
2375 struct zfcp_fsf_req *fsf_req;
2376 unsigned long req_id;
2377 int idx;
2378
2379 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2380
2381 sbale = &sbal->element[idx];
2382 req_id = (unsigned long) sbale->addr;
2383 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2384
2385 if (!fsf_req) {
2386
2387
2388
2389
2390 zfcp_qdio_siosl(adapter);
2391 panic("error: unknown req_id (%lx) on adapter %s.\n",
2392 req_id, dev_name(&adapter->ccw_device->dev));
2393 }
2394
2395 fsf_req->qdio_req.sbal_response = sbal_idx;
2396 zfcp_fsf_req_complete(fsf_req);
2397
2398 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2399 break;
2400 }
2401}
2402