1
2
3
4
5
6#include "qla_def.h"
7#include "qla_edif.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12#include <scsi/scsi_tcq.h>
13
14static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
15 struct list_head *sa_list);
16static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
17 struct qla_sa_update_frame *sa_frame);
18static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
19 uint16_t sa_index);
20static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
21
22struct edb_node {
23 struct list_head list;
24 uint32_t ntype;
25 union {
26 port_id_t plogi_did;
27 uint32_t async;
28 port_id_t els_sid;
29 struct edif_sa_update_aen sa_aen;
30 } u;
31};
32
33static struct els_sub_cmd {
34 uint16_t cmd;
35 const char *str;
36} sc_str[] = {
37 {SEND_ELS, "send ELS"},
38 {SEND_ELS_REPLY, "send ELS Reply"},
39 {PULL_ELS, "retrieve ELS"},
40};
41
42const char *sc_to_str(uint16_t cmd)
43{
44 int i;
45 struct els_sub_cmd *e;
46
47 for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
48 e = sc_str + i;
49 if (cmd == e->cmd)
50 return e->str;
51 }
52 return "unknown";
53}
54
55static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
56 uint16_t handle)
57{
58 struct edif_list_entry *entry;
59 struct edif_list_entry *tentry;
60 struct list_head *indx_list = &fcport->edif.edif_indx_list;
61
62 list_for_each_entry_safe(entry, tentry, indx_list, next) {
63 if (entry->handle == handle)
64 return entry;
65 }
66 return NULL;
67}
68
69
70static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
71{
72 struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
73 fc_port_t *fcport = edif_entry->fcport;
74 struct scsi_qla_host *vha = fcport->vha;
75 struct edif_sa_ctl *sa_ctl;
76 uint16_t nport_handle;
77 unsigned long flags = 0;
78
79 ql_dbg(ql_dbg_edif, vha, 0x3069,
80 "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
81 __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
82
83
84
85
86
87 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
88
89
90
91
92
93
94
95
96 if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
97 uint16_t delete_sa_index = edif_entry->delete_sa_index;
98
99 edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
100 nport_handle = edif_entry->handle;
101 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
102
103 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
104 delete_sa_index, 0);
105
106 if (sa_ctl) {
107 ql_dbg(ql_dbg_edif, vha, 0x3063,
108 "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
109 __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
110 nport_handle);
111
112 sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
113 set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
114 qla_post_sa_replace_work(fcport->vha, fcport,
115 nport_handle, sa_ctl);
116
117 } else {
118 ql_dbg(ql_dbg_edif, vha, 0x3063,
119 "%s: sa_ctl not found for delete_sa_index: %d\n",
120 __func__, edif_entry->delete_sa_index);
121 }
122 } else {
123 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
124 }
125}
126
127
128
129
130
131static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
132 uint16_t sa_index, uint16_t handle)
133{
134 struct edif_list_entry *entry;
135 unsigned long flags = 0;
136
137
138 entry = qla_edif_list_find_sa_index(fcport, handle);
139 if (entry) {
140 entry->update_sa_index = sa_index;
141 entry->count = 0;
142 return 0;
143 }
144
145
146
147
148
149
150
151 entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
152 if (!entry)
153 return -ENOMEM;
154
155 INIT_LIST_HEAD(&entry->next);
156 entry->handle = handle;
157 entry->update_sa_index = sa_index;
158 entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
159 entry->count = 0;
160 entry->flags = 0;
161 timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
162 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
163 list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
164 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
165 return 0;
166}
167
168
169static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
170{
171 unsigned long flags = 0;
172
173 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
174 list_del(&entry->next);
175 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
176}
177
178int qla_post_sa_replace_work(struct scsi_qla_host *vha,
179 fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
180{
181 struct qla_work_evt *e;
182
183 e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
184 if (!e)
185 return QLA_FUNCTION_FAILED;
186
187 e->u.sa_update.fcport = fcport;
188 e->u.sa_update.sa_ctl = sa_ctl;
189 e->u.sa_update.nport_handle = nport_handle;
190 fcport->flags |= FCF_ASYNC_ACTIVE;
191 return qla2x00_post_work(vha, e);
192}
193
194static void
195qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
196{
197 ql_dbg(ql_dbg_edif, vha, 0x2058,
198 "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
199 fcport->node_name, fcport->port_name, fcport->d_id.b24);
200
201 fcport->edif.tx_rekey_cnt = 0;
202 fcport->edif.rx_rekey_cnt = 0;
203
204 fcport->edif.tx_bytes = 0;
205 fcport->edif.rx_bytes = 0;
206}
207
208static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
209fc_port_t *fcport)
210{
211 struct extra_auth_els *p;
212 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
213 struct qla_bsg_auth_els_request *req =
214 (struct qla_bsg_auth_els_request *)bsg_job->request;
215
216 if (!vha->hw->flags.edif_enabled) {
217 ql_dbg(ql_dbg_edif, vha, 0x9105,
218 "%s edif not enabled\n", __func__);
219 goto done;
220 }
221 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
222 ql_dbg(ql_dbg_edif, vha, 0x09102,
223 "%s doorbell not enabled\n", __func__);
224 goto done;
225 }
226
227 p = &req->e;
228
229
230 if (p->sub_cmd == PULL_ELS) {
231 struct qla_bsg_auth_els_reply *rpl =
232 (struct qla_bsg_auth_els_reply *)bsg_job->reply;
233
234 qla_pur_get_pending(vha, fcport, bsg_job);
235
236 ql_dbg(ql_dbg_edif, vha, 0x911d,
237 "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
238 __func__, sc_to_str(p->sub_cmd), fcport->port_name,
239 fcport->d_id.b24, rpl->rx_xchg_address,
240 rpl->r.reply_payload_rcv_len, bsg_job);
241
242 goto done;
243 }
244 return 0;
245
246done:
247
248 bsg_job_done(bsg_job, bsg_reply->result,
249 bsg_reply->reply_payload_rcv_len);
250 return -EIO;
251}
252
253fc_port_t *
254qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
255{
256 fc_port_t *f, *tf;
257
258 f = NULL;
259 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
260 if ((f->flags & FCF_FCSP_DEVICE)) {
261 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
262 "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
263 f->node_name, f->port_name,
264 f->d_id.b24, id->b24);
265 if (f->d_id.b24 == id->b24)
266 return f;
267 }
268 }
269 return NULL;
270}
271
272
273
274
275
276
277
278static bool
279qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
280{
281
282
283 if (appid.app_vid == EDIF_APP_ID) {
284 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
285 return true;
286 }
287 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
288 __func__, appid.app_vid);
289
290 return false;
291}
292
293static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state,
294 int waitonly)
295{
296 int cnt, max_cnt = 200;
297 bool traced = false;
298
299 fcport->keep_nport_handle = 1;
300
301 if (!waitonly) {
302 qla2x00_set_fcport_disc_state(fcport, state);
303 qlt_schedule_sess_for_deletion(fcport);
304 } else {
305 qla2x00_set_fcport_disc_state(fcport, state);
306 }
307
308 ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
309 "%s: waiting for session, max_cnt=%u\n",
310 __func__, max_cnt);
311
312 cnt = 0;
313
314 if (waitonly) {
315
316 msleep(50);
317 cnt += 50;
318 }
319 while (1) {
320 if (!traced) {
321 ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
322 "%s: session sleep.\n",
323 __func__);
324 traced = true;
325 }
326 msleep(20);
327 cnt++;
328 if (waitonly && (fcport->disc_state == state ||
329 fcport->disc_state == DSC_LOGIN_COMPLETE))
330 break;
331 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
332 break;
333 if (cnt > max_cnt)
334 break;
335 }
336
337 if (!waitonly) {
338 ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
339 "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
340 __func__, fcport->port_name, fcport->loop_id,
341 fcport->d_id.b24, fcport, fcport->disc_state, cnt);
342 } else {
343 ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
344 "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
345 __func__, fcport->port_name, fcport->loop_id,
346 fcport->d_id.b24, fcport, fcport->disc_state, cnt);
347 }
348}
349
350static void
351qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
352 int index)
353{
354 unsigned long flags = 0;
355
356 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
357 list_del(&sa_ctl->next);
358 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
359 if (index >= 512)
360 fcport->edif.tx_rekey_cnt--;
361 else
362 fcport->edif.rx_rekey_cnt--;
363 kfree(sa_ctl);
364}
365
366
367static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
368 uint16_t sa_index)
369{
370 void *sa_id_map;
371 struct scsi_qla_host *vha = fcport->vha;
372 struct qla_hw_data *ha = vha->hw;
373 unsigned long flags = 0;
374 u16 lsa_index = sa_index;
375
376 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
377 "%s: entry\n", __func__);
378
379 if (dir) {
380 sa_id_map = ha->edif_tx_sa_id_map;
381 lsa_index -= EDIF_TX_SA_INDEX_BASE;
382 } else {
383 sa_id_map = ha->edif_rx_sa_id_map;
384 }
385
386 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
387 clear_bit(lsa_index, sa_id_map);
388 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
389 ql_dbg(ql_dbg_edif, vha, 0x3063,
390 "%s: index %d added to free pool\n", __func__, sa_index);
391}
392
393static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
394 struct fc_port *fcport, struct edif_sa_index_entry *entry,
395 int pdir)
396{
397 struct edif_list_entry *edif_entry;
398 struct edif_sa_ctl *sa_ctl;
399 int i, dir;
400 int key_cnt = 0;
401
402 for (i = 0; i < 2; i++) {
403 if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
404 continue;
405
406 if (fcport->loop_id != entry->handle) {
407 ql_dbg(ql_dbg_edif, vha, 0x3063,
408 "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
409 __func__, i, entry->handle, fcport->loop_id,
410 entry->sa_pair[i].sa_index);
411 }
412
413
414 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
415 entry->sa_pair[i].sa_index, pdir);
416 if (sa_ctl &&
417 qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
418 ql_dbg(ql_dbg_edif, vha, 0x3063,
419 "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
420 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
421 } else {
422 ql_dbg(ql_dbg_edif, vha, 0x3063,
423 "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
424 }
425
426
427 ql_dbg(ql_dbg_edif, vha, 0x3063,
428 "%s: freeing sa_index %d, nph: 0x%x\n",
429 __func__, entry->sa_pair[i].sa_index, entry->handle);
430
431 dir = (entry->sa_pair[i].sa_index <
432 EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
433 qla_edif_add_sa_index_to_freepool(fcport, dir,
434 entry->sa_pair[i].sa_index);
435
436
437 if (pdir != SAU_FLG_TX) {
438 edif_entry =
439 qla_edif_list_find_sa_index(fcport, entry->handle);
440 if (edif_entry) {
441 ql_dbg(ql_dbg_edif, vha, 0x5033,
442 "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
443 __func__, edif_entry, edif_entry->update_sa_index,
444 edif_entry->delete_sa_index);
445 qla_edif_list_delete_sa_index(fcport, edif_entry);
446
447
448
449
450 if (edif_entry->delete_sa_index !=
451 INVALID_EDIF_SA_INDEX) {
452 del_timer(&edif_entry->timer);
453
454
455 fcport->edif.rx_sa_set = 1;
456 fcport->edif.rx_sa_pending = 0;
457 qla_edb_eventcreate(vha,
458 VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
459 QL_VND_SA_STAT_SUCCESS,
460 QL_VND_RX_SA_KEY, fcport);
461 }
462 ql_dbg(ql_dbg_edif, vha, 0x5033,
463 "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
464 __func__, edif_entry, edif_entry->update_sa_index,
465 edif_entry->delete_sa_index);
466
467 kfree(edif_entry);
468 }
469 }
470 key_cnt++;
471 }
472 ql_dbg(ql_dbg_edif, vha, 0x3063,
473 "%s: %d %s keys released\n",
474 __func__, key_cnt, pdir ? "tx" : "rx");
475}
476
477
478void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
479{
480 struct edif_sa_index_entry *entry, *tmp;
481 struct qla_hw_data *ha = vha->hw;
482 unsigned long flags;
483
484 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
485 "%s: Starting...\n", __func__);
486
487 spin_lock_irqsave(&ha->sadb_lock, flags);
488
489 list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
490 if (entry->fcport == fcport) {
491 list_del(&entry->next);
492 spin_unlock_irqrestore(&ha->sadb_lock, flags);
493 __qla2x00_release_all_sadb(vha, fcport, entry, 0);
494 kfree(entry);
495 spin_lock_irqsave(&ha->sadb_lock, flags);
496 break;
497 }
498 }
499
500 list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
501 if (entry->fcport == fcport) {
502 list_del(&entry->next);
503 spin_unlock_irqrestore(&ha->sadb_lock, flags);
504
505 __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
506
507 kfree(entry);
508 spin_lock_irqsave(&ha->sadb_lock, flags);
509 break;
510 }
511 }
512 spin_unlock_irqrestore(&ha->sadb_lock, flags);
513}
514
515
516
517
518
519
520
521
522
523static int
524qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
525{
526 int32_t rval = 0;
527 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
528 struct app_start appstart;
529 struct app_start_reply appreply;
530 struct fc_port *fcport, *tf;
531
532 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__);
533
534 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
535 bsg_job->request_payload.sg_cnt, &appstart,
536 sizeof(struct app_start));
537
538 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
539 __func__, appstart.app_info.app_vid, appstart.app_start_flags);
540
541 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
542
543 vha->e_dbell.db_flags = EDB_ACTIVE;
544 } else {
545 ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
546 __func__);
547 }
548
549 if (N2N_TOPO(vha->hw)) {
550 if (vha->hw->flags.n2n_fw_acc_sec)
551 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
552 else
553 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
554 qla2xxx_wake_dpc(vha);
555 } else {
556 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
557 ql_dbg(ql_dbg_edif, vha, 0xf084,
558 "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n",
559 __func__, fcport, fcport->port_name,
560 fcport->loop_id, fcport->d_id.b24,
561 fcport->logout_on_delete);
562
563 ql_dbg(ql_dbg_edif, vha, 0xf084,
564 "keep %d els_logo %d disc state %d auth state %d stop state %d\n",
565 fcport->keep_nport_handle,
566 fcport->send_els_logo, fcport->disc_state,
567 fcport->edif.auth_state, fcport->edif.app_stop);
568
569 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
570 break;
571 if (!(fcport->flags & FCF_FCSP_DEVICE))
572 continue;
573
574 fcport->edif.app_started = 1;
575 if (fcport->edif.app_stop ||
576 (fcport->disc_state != DSC_LOGIN_COMPLETE &&
577 fcport->disc_state != DSC_LOGIN_PEND &&
578 fcport->disc_state != DSC_DELETED)) {
579
580 fcport->edif.app_stop = 0;
581
582 ql_dbg(ql_dbg_edif, vha, 0x911e,
583 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
584 __func__, fcport->port_name);
585 fcport->edif.app_sess_online = 1;
586 qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
587 }
588 qla_edif_sa_ctl_init(vha, fcport);
589 }
590 }
591
592 if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
593
594 vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
595 } else {
596 ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
597 __func__);
598 }
599
600 appreply.host_support_edif = vha->hw->flags.edif_enabled;
601 appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
602 appreply.edif_edb_active = vha->e_dbell.db_flags;
603
604 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
605 sizeof(struct app_start_reply);
606
607 SET_DID_STATUS(bsg_reply->result, DID_OK);
608
609 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
610 bsg_job->reply_payload.sg_cnt, &appreply,
611 sizeof(struct app_start_reply));
612
613 ql_dbg(ql_dbg_edif, vha, 0x911d,
614 "%s app start completed with 0x%x\n",
615 __func__, rval);
616
617 return rval;
618}
619
620
621
622
623
624
625
626
627
628static int
629qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
630{
631 struct app_stop appstop;
632 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
633 struct fc_port *fcport, *tf;
634
635 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
636 bsg_job->request_payload.sg_cnt, &appstop,
637 sizeof(struct app_stop));
638
639 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
640 __func__, appstop.app_info.app_vid);
641
642
643
644
645 qla_enode_stop(vha);
646 qla_edb_stop(vha);
647
648 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
649 if (!(fcport->flags & FCF_FCSP_DEVICE))
650 continue;
651
652 if (fcport->flags & FCF_FCSP_DEVICE) {
653 ql_dbg(ql_dbg_edif, vha, 0xf084,
654 "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
655 __func__, fcport,
656 fcport->port_name, fcport->loop_id, fcport->d_id.b24,
657 fcport->logout_on_delete, fcport->keep_nport_handle,
658 fcport->send_els_logo);
659
660 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
661 break;
662
663 fcport->edif.app_stop = 1;
664 ql_dbg(ql_dbg_edif, vha, 0x911e,
665 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
666 __func__, fcport->port_name);
667
668 fcport->send_els_logo = 1;
669 qlt_schedule_sess_for_deletion(fcport);
670
671
672 fcport->edif.app_started = 0;
673 }
674 }
675
676 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
677 SET_DID_STATUS(bsg_reply->result, DID_OK);
678
679
680
681 return 0;
682}
683
684static int
685qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
686 struct app_plogi_reply *appplogireply)
687{
688 int ret = 0;
689
690 if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
691 ql_dbg(ql_dbg_edif, vha, 0x911e,
692 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
693 __func__, fcport->port_name, fcport->edif.tx_sa_set,
694 fcport->edif.rx_sa_set);
695 appplogireply->prli_status = 0;
696 ret = 1;
697 } else {
698 ql_dbg(ql_dbg_edif, vha, 0x911e,
699 "%s wwpn %8phC Both SA(s) updated.\n", __func__,
700 fcport->port_name);
701 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
702 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
703 appplogireply->prli_status = 1;
704 }
705 return ret;
706}
707
708
709
710
711
712
713
714static int
715qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
716{
717 int32_t rval = 0;
718 struct auth_complete_cmd appplogiok;
719 struct app_plogi_reply appplogireply = {0};
720 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
721 fc_port_t *fcport = NULL;
722 port_id_t portid = {0};
723
724 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
725 bsg_job->request_payload.sg_cnt, &appplogiok,
726 sizeof(struct auth_complete_cmd));
727
728 switch (appplogiok.type) {
729 case PL_TYPE_WWPN:
730 fcport = qla2x00_find_fcport_by_wwpn(vha,
731 appplogiok.u.wwpn, 0);
732 if (!fcport)
733 ql_dbg(ql_dbg_edif, vha, 0x911d,
734 "%s wwpn lookup failed: %8phC\n",
735 __func__, appplogiok.u.wwpn);
736 break;
737 case PL_TYPE_DID:
738 fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
739 if (!fcport)
740 ql_dbg(ql_dbg_edif, vha, 0x911d,
741 "%s d_id lookup failed: %x\n", __func__,
742 portid.b24);
743 break;
744 default:
745 ql_dbg(ql_dbg_edif, vha, 0x911d,
746 "%s undefined type: %x\n", __func__,
747 appplogiok.type);
748 break;
749 }
750
751 if (!fcport) {
752 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
753 goto errstate_exit;
754 }
755
756
757
758
759
760 if (atomic_read(&fcport->state) == FCS_ONLINE) {
761 ql_dbg(ql_dbg_edif, vha, 0x911d,
762 "%s Skipping PRLI complete based on rekey\n", __func__);
763 appplogireply.prli_status = 1;
764 SET_DID_STATUS(bsg_reply->result, DID_OK);
765 qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
766 goto errstate_exit;
767 }
768
769
770 if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
771 ql_dbg(ql_dbg_edif, vha, 0x911e,
772 "%s wwpn %8phC is not in auth pending state (%x)\n",
773 __func__, fcport->port_name, fcport->disc_state);
774 SET_DID_STATUS(bsg_reply->result, DID_OK);
775 appplogireply.prli_status = 0;
776 goto errstate_exit;
777 }
778
779 SET_DID_STATUS(bsg_reply->result, DID_OK);
780 appplogireply.prli_status = 1;
781 fcport->edif.authok = 1;
782 if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
783 ql_dbg(ql_dbg_edif, vha, 0x911e,
784 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
785 __func__, fcport->port_name, fcport->edif.tx_sa_set,
786 fcport->edif.rx_sa_set);
787 SET_DID_STATUS(bsg_reply->result, DID_OK);
788 appplogireply.prli_status = 0;
789 goto errstate_exit;
790
791 } else {
792 ql_dbg(ql_dbg_edif, vha, 0x911e,
793 "%s wwpn %8phC Both SA(s) updated.\n", __func__,
794 fcport->port_name);
795 fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
796 fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
797 }
798
799 if (qla_ini_mode_enabled(vha)) {
800 ql_dbg(ql_dbg_edif, vha, 0x911e,
801 "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
802 __func__, fcport->port_name);
803 qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1);
804 qla24xx_post_prli_work(vha, fcport);
805 }
806
807errstate_exit:
808 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
809 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
810 bsg_job->reply_payload.sg_cnt, &appplogireply,
811 sizeof(struct app_plogi_reply));
812
813 return rval;
814}
815
816
817
818
819
820
821
822static int
823qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
824{
825 int32_t rval = 0;
826 struct auth_complete_cmd appplogifail;
827 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
828 fc_port_t *fcport = NULL;
829 port_id_t portid = {0};
830
831 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
832
833 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
834 bsg_job->request_payload.sg_cnt, &appplogifail,
835 sizeof(struct auth_complete_cmd));
836
837
838
839
840
841 switch (appplogifail.type) {
842 case PL_TYPE_WWPN:
843 fcport = qla2x00_find_fcport_by_wwpn(vha,
844 appplogifail.u.wwpn, 0);
845 SET_DID_STATUS(bsg_reply->result, DID_OK);
846 break;
847 case PL_TYPE_DID:
848 fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
849 if (!fcport)
850 ql_dbg(ql_dbg_edif, vha, 0x911d,
851 "%s d_id lookup failed: %x\n", __func__,
852 portid.b24);
853 SET_DID_STATUS(bsg_reply->result, DID_OK);
854 break;
855 default:
856 ql_dbg(ql_dbg_edif, vha, 0x911e,
857 "%s undefined type: %x\n", __func__,
858 appplogifail.type);
859 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
860 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
861 rval = -1;
862 break;
863 }
864
865 ql_dbg(ql_dbg_edif, vha, 0x911d,
866 "%s fcport is 0x%p\n", __func__, fcport);
867
868 if (fcport) {
869
870 ql_dbg(ql_dbg_edif, vha, 0x911e,
871 "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
872 __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
873
874 if (qla_ini_mode_enabled(fcport->vha)) {
875 fcport->send_els_logo = 1;
876 qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
877 }
878 }
879
880 return rval;
881}
882
883
884
885
886
887
888
889
890static int
891qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
892{
893 int32_t rval = 0;
894 int32_t num_cnt;
895 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
896 struct app_pinfo_req app_req;
897 struct app_pinfo_reply *app_reply;
898 port_id_t tdid;
899
900 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
901
902 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
903 bsg_job->request_payload.sg_cnt, &app_req,
904 sizeof(struct app_pinfo_req));
905
906 num_cnt = app_req.num_ports;
907
908 app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
909 sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL);
910 if (!app_reply) {
911 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
912 rval = -1;
913 } else {
914 struct fc_port *fcport = NULL, *tf;
915 uint32_t pcnt = 0;
916
917 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
918 if (!(fcport->flags & FCF_FCSP_DEVICE))
919 continue;
920
921 tdid = app_req.remote_pid;
922
923 ql_dbg(ql_dbg_edif, vha, 0x2058,
924 "APP request entry - portid=%06x.\n", tdid.b24);
925
926
927 if (pcnt > app_req.num_ports)
928 break;
929
930 if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
931 continue;
932
933 app_reply->ports[pcnt].rekey_count =
934 fcport->edif.rekey_cnt;
935
936 app_reply->ports[pcnt].remote_type =
937 VND_CMD_RTYPE_UNKNOWN;
938 if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
939 app_reply->ports[pcnt].remote_type |=
940 VND_CMD_RTYPE_TARGET;
941 if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
942 app_reply->ports[pcnt].remote_type |=
943 VND_CMD_RTYPE_INITIATOR;
944
945 app_reply->ports[pcnt].remote_pid = fcport->d_id;
946
947 ql_dbg(ql_dbg_edif, vha, 0x2058,
948 "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
949 fcport->node_name, fcport->port_name, pcnt,
950 fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
951
952 switch (fcport->edif.auth_state) {
953 case VND_CMD_AUTH_STATE_ELS_RCVD:
954 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
955 fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
956 app_reply->ports[pcnt].auth_state =
957 VND_CMD_AUTH_STATE_NEEDED;
958 } else {
959 app_reply->ports[pcnt].auth_state =
960 VND_CMD_AUTH_STATE_ELS_RCVD;
961 }
962 break;
963 default:
964 app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
965 break;
966 }
967
968 memcpy(app_reply->ports[pcnt].remote_wwpn,
969 fcport->port_name, 8);
970
971 app_reply->ports[pcnt].remote_state =
972 (atomic_read(&fcport->state) ==
973 FCS_ONLINE ? 1 : 0);
974
975 pcnt++;
976
977 if (tdid.b24 != 0)
978 break;
979 }
980 app_reply->port_count = pcnt;
981 SET_DID_STATUS(bsg_reply->result, DID_OK);
982 }
983
984 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
985 bsg_job->reply_payload.sg_cnt, app_reply,
986 sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt);
987
988 kfree(app_reply);
989
990 return rval;
991}
992
993
994
995
996
997
998static int32_t
999qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1000{
1001 int32_t rval = 0;
1002 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1003 uint32_t ret_size, size;
1004
1005 struct app_sinfo_req app_req;
1006 struct app_stats_reply *app_reply;
1007
1008 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1009 bsg_job->request_payload.sg_cnt, &app_req,
1010 sizeof(struct app_sinfo_req));
1011 if (app_req.num_ports == 0) {
1012 ql_dbg(ql_dbg_async, vha, 0x911d,
1013 "%s app did not indicate number of ports to return\n",
1014 __func__);
1015 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1016 rval = -1;
1017 }
1018
1019 size = sizeof(struct app_stats_reply) +
1020 (sizeof(struct app_sinfo) * app_req.num_ports);
1021
1022 if (size > bsg_job->reply_payload.payload_len)
1023 ret_size = bsg_job->reply_payload.payload_len;
1024 else
1025 ret_size = size;
1026
1027 app_reply = kzalloc(size, GFP_KERNEL);
1028 if (!app_reply) {
1029 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1030 rval = -1;
1031 } else {
1032 struct fc_port *fcport = NULL, *tf;
1033 uint32_t pcnt = 0;
1034
1035 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1036 if (fcport->edif.enable) {
1037 if (pcnt > app_req.num_ports)
1038 break;
1039
1040 app_reply->elem[pcnt].rekey_count =
1041 fcport->edif.rekey_cnt;
1042 app_reply->elem[pcnt].tx_bytes =
1043 fcport->edif.tx_bytes;
1044 app_reply->elem[pcnt].rx_bytes =
1045 fcport->edif.rx_bytes;
1046
1047 memcpy(app_reply->elem[pcnt].remote_wwpn,
1048 fcport->port_name, 8);
1049
1050 pcnt++;
1051 }
1052 }
1053 app_reply->elem_count = pcnt;
1054 SET_DID_STATUS(bsg_reply->result, DID_OK);
1055 }
1056
1057 bsg_reply->reply_payload_rcv_len =
1058 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1059 bsg_job->reply_payload.sg_cnt, app_reply, ret_size);
1060
1061 kfree(app_reply);
1062
1063 return rval;
1064}
1065
1066int32_t
1067qla_edif_app_mgmt(struct bsg_job *bsg_job)
1068{
1069 struct fc_bsg_request *bsg_request = bsg_job->request;
1070 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1071 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1072 scsi_qla_host_t *vha = shost_priv(host);
1073 struct app_id appcheck;
1074 bool done = true;
1075 int32_t rval = 0;
1076 uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1077
1078 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
1079 __func__, vnd_sc);
1080
1081 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1082 bsg_job->request_payload.sg_cnt, &appcheck,
1083 sizeof(struct app_id));
1084
1085 if (!vha->hw->flags.edif_enabled ||
1086 test_bit(VPORT_DELETE, &vha->dpc_flags)) {
1087 ql_dbg(ql_dbg_edif, vha, 0x911d,
1088 "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1089 __func__, bsg_job, vha->dpc_flags);
1090
1091 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1092 goto done;
1093 }
1094
1095 if (!qla_edif_app_check(vha, appcheck)) {
1096 ql_dbg(ql_dbg_edif, vha, 0x911d,
1097 "%s app checked failed.\n",
1098 __func__);
1099
1100 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1101 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1102 goto done;
1103 }
1104
1105 switch (vnd_sc) {
1106 case QL_VND_SC_SA_UPDATE:
1107 done = false;
1108 rval = qla24xx_sadb_update(bsg_job);
1109 break;
1110 case QL_VND_SC_APP_START:
1111 rval = qla_edif_app_start(vha, bsg_job);
1112 break;
1113 case QL_VND_SC_APP_STOP:
1114 rval = qla_edif_app_stop(vha, bsg_job);
1115 break;
1116 case QL_VND_SC_AUTH_OK:
1117 rval = qla_edif_app_authok(vha, bsg_job);
1118 break;
1119 case QL_VND_SC_AUTH_FAIL:
1120 rval = qla_edif_app_authfail(vha, bsg_job);
1121 break;
1122 case QL_VND_SC_GET_FCINFO:
1123 rval = qla_edif_app_getfcinfo(vha, bsg_job);
1124 break;
1125 case QL_VND_SC_GET_STATS:
1126 rval = qla_edif_app_getstats(vha, bsg_job);
1127 break;
1128 default:
1129 ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
1130 __func__,
1131 bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
1132 rval = EXT_STATUS_INVALID_PARAM;
1133 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1134 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1135 break;
1136 }
1137
1138done:
1139 if (done) {
1140 ql_dbg(ql_dbg_user, vha, 0x7009,
1141 "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
1142 bsg_job_done(bsg_job, bsg_reply->result,
1143 bsg_reply->reply_payload_rcv_len);
1144 }
1145
1146 return rval;
1147}
1148
1149static struct edif_sa_ctl *
1150qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
1151 int dir)
1152{
1153 struct edif_sa_ctl *sa_ctl;
1154 struct qla_sa_update_frame *sap;
1155 int index = sa_frame->fast_sa_index;
1156 unsigned long flags = 0;
1157
1158 sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
1159 if (!sa_ctl) {
1160
1161 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1162 "unable to allocate SA CTL\n");
1163 return NULL;
1164 }
1165
1166
1167
1168
1169
1170
1171 INIT_LIST_HEAD(&sa_ctl->next);
1172 sap = &sa_ctl->sa_frame;
1173 *sap = *sa_frame;
1174 sa_ctl->index = index;
1175 sa_ctl->fcport = fcport;
1176 sa_ctl->flags = 0;
1177 sa_ctl->state = 0L;
1178 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1179 "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1180 __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
1181 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1182 if (dir == SAU_FLG_TX)
1183 list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
1184 else
1185 list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
1186 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1187
1188 return sa_ctl;
1189}
1190
1191void
1192qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
1193{
1194 struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1195 unsigned long flags = 0;
1196
1197 spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1198
1199 list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
1200 next) {
1201 list_del(&sa_ctl->next);
1202 kfree(sa_ctl);
1203 }
1204
1205 list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
1206 next) {
1207 list_del(&sa_ctl->next);
1208 kfree(sa_ctl);
1209 }
1210
1211 spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1212}
1213
1214struct edif_sa_ctl *
1215qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
1216{
1217 struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1218 struct list_head *sa_list;
1219
1220 if (dir == SAU_FLG_TX)
1221 sa_list = &fcport->edif.tx_sa_list;
1222 else
1223 sa_list = &fcport->edif.rx_sa_list;
1224
1225 list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
1226 if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
1227 sa_ctl->index == index)
1228 return sa_ctl;
1229 }
1230 return NULL;
1231}
1232
1233
1234static int
1235qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
1236 struct qla_sa_update_frame *sa_frame)
1237{
1238 struct edif_sa_ctl *sa_ctl = NULL;
1239 int dir;
1240 uint16_t sa_index;
1241
1242 dir = (sa_frame->flags & SAU_FLG_TX);
1243
1244
1245 sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
1246 if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
1247
1248 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
1249 "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1250 __func__, fcport->loop_id, sa_frame->spi);
1251
1252
1253 fcport->edif.rx_sa_set = 1;
1254 fcport->edif.rx_sa_pending = 0;
1255 qla_edb_eventcreate(fcport->vha,
1256 VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
1257 QL_VND_SA_STAT_SUCCESS,
1258 QL_VND_RX_SA_KEY, fcport);
1259
1260
1261 return RX_DELETE_NO_EDIF_SA_INDEX;
1262 } else if (sa_index == INVALID_EDIF_SA_INDEX) {
1263 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1264 "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1265 __func__, sa_frame->spi, dir);
1266 return INVALID_EDIF_SA_INDEX;
1267 }
1268
1269 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1270 "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1271 __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
1272
1273
1274 sa_frame->fast_sa_index = sa_index;
1275
1276 sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
1277 if (!sa_ctl) {
1278 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1279 "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1280 __func__, sa_frame->spi, dir, sa_index);
1281 return -1;
1282 }
1283
1284 set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
1285
1286 if (dir == SAU_FLG_TX)
1287 fcport->edif.tx_rekey_cnt++;
1288 else
1289 fcport->edif.rx_rekey_cnt++;
1290
1291 ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1292 "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1293 __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
1294 fcport->edif.tx_rekey_cnt,
1295 fcport->edif.rx_rekey_cnt, fcport->loop_id);
1296
1297 return 0;
1298}
1299
1300#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
1301#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
1302
1303int
1304qla24xx_sadb_update(struct bsg_job *bsg_job)
1305{
1306 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1307 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1308 scsi_qla_host_t *vha = shost_priv(host);
1309 fc_port_t *fcport = NULL;
1310 srb_t *sp = NULL;
1311 struct edif_list_entry *edif_entry = NULL;
1312 int found = 0;
1313 int rval = 0;
1314 int result = 0;
1315 struct qla_sa_update_frame sa_frame;
1316 struct srb_iocb *iocb_cmd;
1317
1318 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
1319 "%s entered, vha: 0x%p\n", __func__, vha);
1320
1321 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1322 bsg_job->request_payload.sg_cnt, &sa_frame,
1323 sizeof(struct qla_sa_update_frame));
1324
1325
1326 if (!vha->flags.online) {
1327 ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
1328 rval = -EIO;
1329 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1330 goto done;
1331 }
1332
1333 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1334 ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
1335 rval = -EIO;
1336 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1337 goto done;
1338 }
1339
1340 fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
1341 if (fcport) {
1342 found = 1;
1343 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
1344 fcport->edif.tx_bytes = 0;
1345 if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
1346 fcport->edif.rx_bytes = 0;
1347 }
1348
1349 if (!found) {
1350 ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
1351 sa_frame.port_id.b24);
1352 rval = -EINVAL;
1353 SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
1354 goto done;
1355 }
1356
1357
1358 if (fcport->loop_id == FC_NO_LOOP_ID) {
1359 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1360 "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1361 __func__, fcport->port_name, sa_frame.spi,
1362 fcport->disc_state);
1363 rval = -EINVAL;
1364 SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1365 goto done;
1366 }
1367
1368
1369 result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
1370
1371
1372 if (result == INVALID_EDIF_SA_INDEX) {
1373 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1374 "%s: %8phN, skipping update.\n",
1375 __func__, fcport->port_name);
1376 rval = -EINVAL;
1377 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1378 goto done;
1379
1380
1381 } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
1382 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1383 "%s: %8phN, skipping rx delete.\n",
1384 __func__, fcport->port_name);
1385 SET_DID_STATUS(bsg_reply->result, DID_OK);
1386 goto done;
1387 }
1388
1389 ql_dbg(ql_dbg_edif, vha, 0x70e1,
1390 "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1391 __func__, fcport->port_name, sa_frame.fast_sa_index,
1392 sa_frame.flags);
1393
1394
1395 if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1396 (sa_frame.flags & SAU_FLG_INV)) {
1397 uint16_t nport_handle = fcport->loop_id;
1398 uint16_t sa_index = sa_frame.fast_sa_index;
1399
1400
1401
1402
1403
1404
1405
1406 edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
1407 if (!edif_entry) {
1408 ql_dbg(ql_dbg_edif, vha, 0x911d,
1409 "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1410 __func__, fcport->loop_id, sa_index);
1411 goto force_rx_delete;
1412 }
1413
1414
1415
1416
1417
1418 if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
1419 qla_edif_list_delete_sa_index(fcport, edif_entry);
1420 ql_dbg(ql_dbg_edif, vha, 0x911d,
1421 "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1422 __func__, fcport->loop_id, sa_index);
1423 kfree(edif_entry);
1424 goto force_rx_delete;
1425 }
1426
1427
1428
1429
1430
1431
1432
1433 if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
1434 struct edif_sa_ctl *sa_ctl;
1435
1436 ql_dbg(ql_dbg_edif, vha, 0x911d,
1437 "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1438 __func__, edif_entry->handle, edif_entry->delete_sa_index);
1439
1440
1441 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
1442 (sa_frame.flags & SAU_FLG_TX));
1443 if (sa_ctl) {
1444 ql_dbg(ql_dbg_edif, vha, 0x3063,
1445 "%s: freeing sa_ctl for index %d\n",
1446 __func__, sa_ctl->index);
1447 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
1448 }
1449
1450
1451 ql_dbg(ql_dbg_edif, vha, 0x3063,
1452 "%s: freeing sa_index %d, nph: 0x%x\n",
1453 __func__, sa_index, nport_handle);
1454 qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
1455
1456 rval = -EINVAL;
1457 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1458 goto done;
1459 }
1460
1461 fcport->edif.rekey_cnt++;
1462
1463
1464 edif_entry->fcport = fcport;
1465 edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
1466
1467 ql_dbg(ql_dbg_edif, vha, 0x911d,
1468 "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1469 __func__, edif_entry, sa_index, nport_handle);
1470
1471
1472
1473
1474
1475
1476 add_timer(&edif_entry->timer);
1477
1478
1479
1480
1481
1482
1483
1484
1485 ql_dbg(ql_dbg_edif, vha, 0x911d,
1486 "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1487 __func__, sa_index, nport_handle, bsg_job);
1488
1489 edif_entry->delete_sa_index = sa_index;
1490
1491 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1492 bsg_reply->result = DID_OK << 16;
1493
1494 goto done;
1495
1496
1497
1498
1499
1500 } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1501 ((sa_frame.flags & SAU_FLG_INV) == 0)) {
1502
1503 uint32_t nport_handle = fcport->loop_id;
1504 uint16_t sa_index = sa_frame.fast_sa_index;
1505 int result;
1506
1507
1508
1509
1510
1511
1512 ql_dbg(ql_dbg_edif, vha, 0x911d,
1513 "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
1514 __func__, sa_index, nport_handle);
1515
1516 result = qla_edif_list_add_sa_update_index(fcport, sa_index,
1517 nport_handle);
1518 if (result) {
1519 ql_dbg(ql_dbg_edif, vha, 0x911d,
1520 "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1521 __func__, sa_index, nport_handle);
1522 }
1523 }
1524 if (sa_frame.flags & SAU_FLG_GMAC_MODE)
1525 fcport->edif.aes_gmac = 1;
1526 else
1527 fcport->edif.aes_gmac = 0;
1528
1529force_rx_delete:
1530
1531
1532
1533
1534 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1535 if (!sp) {
1536 rval = -ENOMEM;
1537 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1538 goto done;
1539 }
1540
1541 sp->type = SRB_SA_UPDATE;
1542 sp->name = "bsg_sa_update";
1543 sp->u.bsg_job = bsg_job;
1544
1545 sp->free = qla2x00_rel_sp;
1546 sp->done = qla2x00_bsg_job_done;
1547 iocb_cmd = &sp->u.iocb_cmd;
1548 iocb_cmd->u.sa_update.sa_frame = sa_frame;
1549
1550 rval = qla2x00_start_sp(sp);
1551 if (rval != QLA_SUCCESS) {
1552 ql_log(ql_dbg_edif, vha, 0x70e3,
1553 "qla2x00_start_sp failed=%d.\n", rval);
1554
1555 qla2x00_rel_sp(sp);
1556 rval = -EIO;
1557 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1558 goto done;
1559 }
1560
1561 ql_dbg(ql_dbg_edif, vha, 0x911d,
1562 "%s: %s sent, hdl=%x, portid=%06x.\n",
1563 __func__, sp->name, sp->handle, fcport->d_id.b24);
1564
1565 fcport->edif.rekey_cnt++;
1566 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1567 SET_DID_STATUS(bsg_reply->result, DID_OK);
1568
1569 return 0;
1570
1571
1572
1573
1574done:
1575 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1576 ql_dbg(ql_dbg_edif, vha, 0x911d,
1577 "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1578 __func__, bsg_reply->result, bsg_job);
1579 bsg_job_done(bsg_job, bsg_reply->result,
1580 bsg_reply->reply_payload_rcv_len);
1581
1582 return 0;
1583}
1584
1585static void
1586qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
1587{
1588 node->ntype = N_UNDEF;
1589 kfree(node);
1590}
1591
1592
1593
1594
1595
1596
1597
1598void
1599qla_enode_init(scsi_qla_host_t *vha)
1600{
1601 struct qla_hw_data *ha = vha->hw;
1602 char name[32];
1603
1604 if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
1605
1606 ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
1607 __func__);
1608 return;
1609 }
1610
1611
1612 spin_lock_init(&vha->pur_cinfo.pur_lock);
1613 INIT_LIST_HEAD(&vha->pur_cinfo.head);
1614
1615 snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
1616 ha->pdev->device);
1617}
1618
1619
1620
1621
1622
1623
1624
1625void
1626qla_enode_stop(scsi_qla_host_t *vha)
1627{
1628 unsigned long flags;
1629 struct enode *node, *q;
1630
1631 if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1632
1633 ql_dbg(ql_dbg_edif, vha, 0x09102,
1634 "%s enode not active\n", __func__);
1635 return;
1636 }
1637
1638
1639 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1640
1641 vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE;
1642
1643
1644 list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
1645 ql_dbg(ql_dbg_edif, vha, 0x910f,
1646 "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
1647 node->dinfo.nodecnt);
1648 list_del_init(&node->list);
1649 qla_enode_free(vha, node);
1650 }
1651 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1652}
1653
1654
1655
1656
1657
1658
1659static struct enode *
1660qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1661{
1662 struct enode *node;
1663 struct purexevent *purex;
1664
1665 node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
1666 if (!node)
1667 return NULL;
1668
1669 purex = &node->u.purexinfo;
1670 purex->msgp = (u8 *)(node + 1);
1671 purex->msgp_len = ELS_MAX_PAYLOAD;
1672
1673 node->ntype = ntype;
1674 INIT_LIST_HEAD(&node->list);
1675 return node;
1676}
1677
1678static void
1679qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
1680{
1681 unsigned long flags;
1682
1683 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
1684 "%s add enode for type=%x, cnt=%x\n",
1685 __func__, ptr->ntype, ptr->dinfo.nodecnt);
1686
1687 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1688 list_add_tail(&ptr->list, &vha->pur_cinfo.head);
1689 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1690
1691 return;
1692}
1693
1694static struct enode *
1695qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
1696{
1697 struct enode *node_rtn = NULL;
1698 struct enode *list_node = NULL;
1699 unsigned long flags;
1700 struct list_head *pos, *q;
1701 uint32_t sid;
1702 uint32_t rw_flag;
1703 struct purexevent *purex;
1704
1705
1706 spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1707
1708 list_for_each_safe(pos, q, &vha->pur_cinfo.head) {
1709 list_node = list_entry(pos, struct enode, list);
1710
1711
1712 purex = &list_node->u.purexinfo;
1713 sid = p1;
1714 rw_flag = p2;
1715
1716 if (purex->pur_info.pur_sid.b24 == sid) {
1717 if (purex->pur_info.pur_pend == 1 &&
1718 rw_flag == PUR_GET) {
1719
1720
1721
1722
1723
1724 ql_dbg(ql_dbg_edif, vha, 0x9106,
1725 "%s purex xfer in progress for sid=%x\n",
1726 __func__, sid);
1727 } else {
1728
1729 node_rtn = list_node;
1730 list_del(pos);
1731 break;
1732 }
1733 }
1734 }
1735
1736 spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1737
1738 return node_rtn;
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748static int
1749qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
1750 struct bsg_job *bsg_job)
1751{
1752 struct enode *ptr;
1753 struct purexevent *purex;
1754 struct qla_bsg_auth_els_reply *rpl =
1755 (struct qla_bsg_auth_els_reply *)bsg_job->reply;
1756
1757 bsg_job->reply_len = sizeof(*rpl);
1758
1759 ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
1760 if (!ptr) {
1761 ql_dbg(ql_dbg_edif, vha, 0x9111,
1762 "%s no enode data found for %8phN sid=%06x\n",
1763 __func__, fcport->port_name, fcport->d_id.b24);
1764 SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
1765 return -EIO;
1766 }
1767
1768
1769
1770
1771 purex = &ptr->u.purexinfo;
1772
1773
1774 rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
1775
1776 SET_DID_STATUS(rpl->r.result, DID_OK);
1777 rpl->r.reply_payload_rcv_len =
1778 sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
1779 bsg_job->reply_payload.sg_cnt, purex->msgp,
1780 purex->pur_info.pur_bytes_rcvd, 0);
1781
1782
1783 qla_enode_free(vha, ptr);
1784
1785 return 0;
1786}
1787
1788
1789static int
1790qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
1791 struct qla_els_pt_arg *a)
1792{
1793 struct els_entry_24xx *els_iocb;
1794
1795 els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1796 if (!els_iocb) {
1797 ql_log(ql_log_warn, vha, 0x700c,
1798 "qla2x00_alloc_iocbs failed.\n");
1799 return QLA_FUNCTION_FAILED;
1800 }
1801
1802 qla_els_pt_iocb(vha, els_iocb, a);
1803
1804 ql_dbg(ql_dbg_edif, vha, 0x0183,
1805 "Sending ELS reject...\n");
1806 ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
1807 vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
1808
1809 wmb();
1810 qla2x00_start_iocbs(vha, qp->req);
1811 return 0;
1812}
1813
1814void
1815qla_edb_init(scsi_qla_host_t *vha)
1816{
1817 if (vha->e_dbell.db_flags == EDB_ACTIVE) {
1818
1819 ql_dbg(ql_dbg_edif, vha, 0x09102,
1820 "edif db already initialized, cannot reinit\n");
1821 return;
1822 }
1823
1824
1825 spin_lock_init(&vha->e_dbell.db_lock);
1826 INIT_LIST_HEAD(&vha->e_dbell.head);
1827
1828
1829 init_completion(&vha->e_dbell.dbell);
1830}
1831
1832static void
1833qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
1834{
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 if (!node) {
1845 ql_dbg(ql_dbg_edif, vha, 0x09122,
1846 "%s error - no valid node passed\n", __func__);
1847 return;
1848 }
1849
1850 node->ntype = N_UNDEF;
1851}
1852
1853
1854
1855void
1856qla_edb_stop(scsi_qla_host_t *vha)
1857{
1858 unsigned long flags;
1859 struct edb_node *node, *q;
1860
1861 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1862
1863 ql_dbg(ql_dbg_edif, vha, 0x09102,
1864 "%s doorbell not enabled\n", __func__);
1865 return;
1866 }
1867
1868
1869 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1870
1871 vha->e_dbell.db_flags &= ~EDB_ACTIVE;
1872
1873 list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
1874 ql_dbg(ql_dbg_edif, vha, 0x910f,
1875 "%s freeing edb_node type=%x\n",
1876 __func__, node->ntype);
1877 qla_edb_node_free(vha, node);
1878 list_del(&node->list);
1879
1880 kfree(node);
1881 }
1882 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1883
1884
1885 complete_all(&vha->e_dbell.dbell);
1886}
1887
1888static struct edb_node *
1889qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1890{
1891 struct edb_node *node;
1892
1893 node = kzalloc(sizeof(*node), GFP_ATOMIC);
1894 if (!node) {
1895
1896 ql_dbg(ql_dbg_edif, vha, 0x9100,
1897 "edb node unable to be allocated\n");
1898 return NULL;
1899 }
1900
1901 node->ntype = ntype;
1902 INIT_LIST_HEAD(&node->list);
1903 return node;
1904}
1905
1906
1907static bool
1908qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
1909{
1910 unsigned long flags;
1911
1912 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1913
1914 ql_dbg(ql_dbg_edif, vha, 0x09102,
1915 "%s doorbell not enabled\n", __func__);
1916 return false;
1917 }
1918
1919 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1920 list_add_tail(&ptr->list, &vha->e_dbell.head);
1921 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1922
1923
1924 complete(&vha->e_dbell.dbell);
1925
1926 return true;
1927}
1928
1929
1930void
1931qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
1932 uint32_t data, uint32_t data2, fc_port_t *sfcport)
1933{
1934 struct edb_node *edbnode;
1935 fc_port_t *fcport = sfcport;
1936 port_id_t id;
1937
1938 if (!vha->hw->flags.edif_enabled) {
1939
1940 return;
1941 }
1942
1943 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
1944 if (fcport)
1945 fcport->edif.auth_state = dbtype;
1946
1947 ql_dbg(ql_dbg_edif, vha, 0x09102,
1948 "%s doorbell not enabled (type=%d\n", __func__, dbtype);
1949 return;
1950 }
1951
1952 edbnode = qla_edb_node_alloc(vha, dbtype);
1953 if (!edbnode) {
1954 ql_dbg(ql_dbg_edif, vha, 0x09102,
1955 "%s unable to alloc db node\n", __func__);
1956 return;
1957 }
1958
1959 if (!fcport) {
1960 id.b.domain = (data >> 16) & 0xff;
1961 id.b.area = (data >> 8) & 0xff;
1962 id.b.al_pa = data & 0xff;
1963 ql_dbg(ql_dbg_edif, vha, 0x09222,
1964 "%s: Arrived s_id: %06x\n", __func__,
1965 id.b24);
1966 fcport = qla2x00_find_fcport_by_pid(vha, &id);
1967 if (!fcport) {
1968 ql_dbg(ql_dbg_edif, vha, 0x09102,
1969 "%s can't find fcport for sid= 0x%x - ignoring\n",
1970 __func__, id.b24);
1971 kfree(edbnode);
1972 return;
1973 }
1974 }
1975
1976
1977 switch (dbtype) {
1978 case VND_CMD_AUTH_STATE_NEEDED:
1979 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1980 edbnode->u.plogi_did.b24 = fcport->d_id.b24;
1981 break;
1982 case VND_CMD_AUTH_STATE_ELS_RCVD:
1983 edbnode->u.els_sid.b24 = fcport->d_id.b24;
1984 break;
1985 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
1986 edbnode->u.sa_aen.port_id = fcport->d_id;
1987 edbnode->u.sa_aen.status = data;
1988 edbnode->u.sa_aen.key_type = data2;
1989 break;
1990 default:
1991 ql_dbg(ql_dbg_edif, vha, 0x09102,
1992 "%s unknown type: %x\n", __func__, dbtype);
1993 qla_edb_node_free(vha, edbnode);
1994 kfree(edbnode);
1995 edbnode = NULL;
1996 break;
1997 }
1998
1999 if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
2000 ql_dbg(ql_dbg_edif, vha, 0x09102,
2001 "%s unable to add dbnode\n", __func__);
2002 qla_edb_node_free(vha, edbnode);
2003 kfree(edbnode);
2004 return;
2005 }
2006 if (edbnode && fcport)
2007 fcport->edif.auth_state = dbtype;
2008 ql_dbg(ql_dbg_edif, vha, 0x09102,
2009 "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
2010}
2011
2012static struct edb_node *
2013qla_edb_getnext(scsi_qla_host_t *vha)
2014{
2015 unsigned long flags;
2016 struct edb_node *edbnode = NULL;
2017
2018 spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2019
2020
2021 if (!list_empty(&vha->e_dbell.head)) {
2022 edbnode = list_first_entry(&vha->e_dbell.head,
2023 struct edb_node, list);
2024 list_del(&edbnode->list);
2025 }
2026
2027 spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2028
2029 return edbnode;
2030}
2031
2032void
2033qla_edif_timer(scsi_qla_host_t *vha)
2034{
2035 struct qla_hw_data *ha = vha->hw;
2036
2037 if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2038 if (vha->e_dbell.db_flags != EDB_ACTIVE &&
2039 ha->edif_post_stop_cnt_down) {
2040 ha->edif_post_stop_cnt_down--;
2041
2042
2043
2044
2045
2046
2047 if (ha->edif_post_stop_cnt_down == 0) {
2048 ql_dbg(ql_dbg_async, vha, 0x911d,
2049 "%s chip reset to turn off PLOGI ACC + secure\n",
2050 __func__);
2051 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2052 }
2053 } else {
2054 ha->edif_post_stop_cnt_down = 60;
2055 }
2056 }
2057}
2058
2059
2060
2061
2062
2063ssize_t
2064edif_doorbell_show(struct device *dev, struct device_attribute *attr,
2065 char *buf)
2066{
2067 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2068 struct edb_node *dbnode = NULL;
2069 struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
2070 uint32_t dat_siz, buf_size, sz;
2071
2072
2073 sz = 256;
2074
2075
2076 if (vha->e_dbell.db_flags != EDB_ACTIVE) {
2077 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2078 "%s error - edif db not enabled\n", __func__);
2079 return 0;
2080 }
2081
2082 if (!vha->hw->flags.edif_enabled) {
2083
2084 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
2085 "%s error - edif not enabled\n", __func__);
2086 return -1;
2087 }
2088
2089 buf_size = 0;
2090 while ((sz - buf_size) >= sizeof(struct edb_node)) {
2091
2092 dat_siz = 0;
2093 dbnode = qla_edb_getnext(vha);
2094 if (dbnode) {
2095 ap->event_code = dbnode->ntype;
2096 switch (dbnode->ntype) {
2097 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2098 case VND_CMD_AUTH_STATE_NEEDED:
2099 ap->port_id = dbnode->u.plogi_did;
2100 dat_siz += sizeof(ap->port_id);
2101 break;
2102 case VND_CMD_AUTH_STATE_ELS_RCVD:
2103 ap->port_id = dbnode->u.els_sid;
2104 dat_siz += sizeof(ap->port_id);
2105 break;
2106 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2107 ap->port_id = dbnode->u.sa_aen.port_id;
2108 memcpy(ap->event_data, &dbnode->u,
2109 sizeof(struct edif_sa_update_aen));
2110 dat_siz += sizeof(struct edif_sa_update_aen);
2111 break;
2112 default:
2113
2114 ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
2115 memcpy(ap->event_data, &dbnode->ntype, 4);
2116 dat_siz += 4;
2117 break;
2118 }
2119
2120 ql_dbg(ql_dbg_edif, vha, 0x09102,
2121 "%s Doorbell consumed : type=%d %p\n",
2122 __func__, dbnode->ntype, dbnode);
2123
2124 qla_edb_node_free(vha, dbnode);
2125 kfree(dbnode);
2126 } else {
2127 break;
2128 }
2129
2130 ap->event_data_size = dat_siz;
2131
2132 buf_size += dat_siz + 8;
2133 ap = (struct edif_app_dbell *)(buf + buf_size);
2134 }
2135 return buf_size;
2136}
2137
2138static void qla_noop_sp_done(srb_t *sp, int res)
2139{
2140 sp->free(sp);
2141}
2142
2143
2144
2145
2146
2147int
2148qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
2149{
2150 srb_t *sp;
2151 fc_port_t *fcport = NULL;
2152 struct srb_iocb *iocb_cmd = NULL;
2153 int rval = QLA_SUCCESS;
2154 struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
2155 uint16_t nport_handle = e->u.sa_update.nport_handle;
2156
2157 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2158 "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
2159
2160 if (!sa_ctl) {
2161 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2162 "sa_ctl allocation failed\n");
2163 return -ENOMEM;
2164 }
2165
2166 fcport = sa_ctl->fcport;
2167
2168
2169 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2170 if (!sp) {
2171 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2172 "SRB allocation failed\n");
2173 return -ENOMEM;
2174 }
2175
2176 fcport->flags |= FCF_ASYNC_SENT;
2177 iocb_cmd = &sp->u.iocb_cmd;
2178 iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
2179
2180 ql_dbg(ql_dbg_edif, vha, 0x3073,
2181 "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2182 fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
2183
2184
2185
2186
2187 if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
2188
2189 sp->flags |= SRB_EDIF_CLEANUP_DELETE;
2190 ql_dbg(ql_dbg_edif, vha, 0x70e6,
2191 "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
2192 }
2193
2194 sp->type = SRB_SA_REPLACE;
2195 sp->name = "SA_REPLACE";
2196 sp->fcport = fcport;
2197 sp->free = qla2x00_rel_sp;
2198 sp->done = qla_noop_sp_done;
2199
2200 rval = qla2x00_start_sp(sp);
2201
2202 if (rval != QLA_SUCCESS)
2203 rval = QLA_FUNCTION_FAILED;
2204
2205 return rval;
2206}
2207
2208void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2209{
2210 int itr = 0;
2211 struct scsi_qla_host *vha = sp->vha;
2212 struct qla_sa_update_frame *sa_frame =
2213 &sp->u.iocb_cmd.u.sa_update.sa_frame;
2214 u8 flags = 0;
2215
2216 switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
2217 case 0:
2218 ql_dbg(ql_dbg_edif, vha, 0x911d,
2219 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2220 __func__, vha, sa_frame->fast_sa_index);
2221 break;
2222 case 1:
2223 ql_dbg(ql_dbg_edif, vha, 0x911d,
2224 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2225 __func__, vha, sa_frame->fast_sa_index);
2226 flags |= SA_FLAG_INVALIDATE;
2227 break;
2228 case 2:
2229 ql_dbg(ql_dbg_edif, vha, 0x911d,
2230 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2231 __func__, vha, sa_frame->fast_sa_index);
2232 flags |= SA_FLAG_TX;
2233 break;
2234 case 3:
2235 ql_dbg(ql_dbg_edif, vha, 0x911d,
2236 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2237 __func__, vha, sa_frame->fast_sa_index);
2238 flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
2239 break;
2240 }
2241
2242 sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2243 sa_update_iocb->entry_count = 1;
2244 sa_update_iocb->sys_define = 0;
2245 sa_update_iocb->entry_status = 0;
2246 sa_update_iocb->handle = sp->handle;
2247 sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
2248 sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2249 sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2250 sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2251 sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2252
2253 sa_update_iocb->flags = flags;
2254 sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
2255 sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
2256 sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
2257
2258 sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
2259 if (sp->fcport->edif.aes_gmac)
2260 sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
2261
2262 if (sa_frame->flags & SAU_FLG_KEY256) {
2263 sa_update_iocb->sa_control |= SA_CNTL_KEY256;
2264 for (itr = 0; itr < 32; itr++)
2265 sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2266 } else {
2267 sa_update_iocb->sa_control |= SA_CNTL_KEY128;
2268 for (itr = 0; itr < 16; itr++)
2269 sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2270 }
2271
2272 ql_dbg(ql_dbg_edif, vha, 0x921d,
2273 "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2274 __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2275 sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
2276 sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
2277 sp->fcport->edif.aes_gmac);
2278
2279 if (sa_frame->flags & SAU_FLG_TX)
2280 sp->fcport->edif.tx_sa_pending = 1;
2281 else
2282 sp->fcport->edif.rx_sa_pending = 1;
2283
2284 sp->fcport->vha->qla_stats.control_requests++;
2285}
2286
2287void
2288qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2289{
2290 struct scsi_qla_host *vha = sp->vha;
2291 struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
2292 struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
2293 uint16_t nport_handle = sp->fcport->loop_id;
2294
2295 sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2296 sa_update_iocb->entry_count = 1;
2297 sa_update_iocb->sys_define = 0;
2298 sa_update_iocb->entry_status = 0;
2299 sa_update_iocb->handle = sp->handle;
2300
2301 sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
2302
2303 sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2304 sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2305 sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2306 sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2307
2308
2309 sa_update_iocb->flags = SA_FLAG_INVALIDATE;
2310 sa_update_iocb->salt = 0;
2311 sa_update_iocb->spi = 0;
2312 sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
2313 sa_update_iocb->sa_control = 0;
2314
2315 ql_dbg(ql_dbg_edif, vha, 0x921d,
2316 "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2317 __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2318 sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
2319 sa_update_iocb->sa_index, sp->handle);
2320
2321 sp->fcport->vha->qla_stats.control_requests++;
2322}
2323
2324void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
2325{
2326 struct purex_entry_24xx *p = *pkt;
2327 struct enode *ptr;
2328 int sid;
2329 u16 totlen;
2330 struct purexevent *purex;
2331 struct scsi_qla_host *host = NULL;
2332 int rc;
2333 struct fc_port *fcport;
2334 struct qla_els_pt_arg a;
2335 be_id_t beid;
2336
2337 memset(&a, 0, sizeof(a));
2338
2339 a.els_opcode = ELS_AUTH_ELS;
2340 a.nport_handle = p->nport_handle;
2341 a.rx_xchg_address = p->rx_xchg_addr;
2342 a.did.b.domain = p->s_id[2];
2343 a.did.b.area = p->s_id[1];
2344 a.did.b.al_pa = p->s_id[0];
2345 a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
2346 a.tx_addr = vha->hw->elsrej.cdma;
2347 a.vp_idx = vha->vp_idx;
2348 a.control_flags = EPD_ELS_RJT;
2349
2350 sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
2351
2352 totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
2353 if (le16_to_cpu(p->status_flags) & 0x8000) {
2354 totlen = le16_to_cpu(p->trunc_frame_size);
2355 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2356 __qla_consume_iocb(vha, pkt, rsp);
2357 return;
2358 }
2359
2360 if (totlen > MAX_PAYLOAD) {
2361 ql_dbg(ql_dbg_edif, vha, 0x0910d,
2362 "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2363 __func__, totlen);
2364 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2365 __qla_consume_iocb(vha, pkt, rsp);
2366 return;
2367 }
2368
2369 if (!vha->hw->flags.edif_enabled) {
2370
2371 ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
2372 __func__);
2373 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2374 __qla_consume_iocb(vha, pkt, rsp);
2375 return;
2376 }
2377
2378 ptr = qla_enode_alloc(vha, N_PUREX);
2379 if (!ptr) {
2380 ql_dbg(ql_dbg_edif, vha, 0x09109,
2381 "WARNING: enode alloc failed for sid=%x\n",
2382 sid);
2383 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2384 __qla_consume_iocb(vha, pkt, rsp);
2385 return;
2386 }
2387
2388 purex = &ptr->u.purexinfo;
2389 purex->pur_info.pur_sid = a.did;
2390 purex->pur_info.pur_pend = 0;
2391 purex->pur_info.pur_bytes_rcvd = totlen;
2392 purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
2393 purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
2394 purex->pur_info.pur_did.b.domain = p->d_id[2];
2395 purex->pur_info.pur_did.b.area = p->d_id[1];
2396 purex->pur_info.pur_did.b.al_pa = p->d_id[0];
2397 purex->pur_info.vp_idx = p->vp_idx;
2398
2399 rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
2400 purex->msgp_len);
2401 if (rc) {
2402 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2403 qla_enode_free(vha, ptr);
2404 return;
2405 }
2406 beid.al_pa = purex->pur_info.pur_did.b.al_pa;
2407 beid.area = purex->pur_info.pur_did.b.area;
2408 beid.domain = purex->pur_info.pur_did.b.domain;
2409 host = qla_find_host_by_d_id(vha, beid);
2410 if (!host) {
2411 ql_log(ql_log_fatal, vha, 0x508b,
2412 "%s Drop ELS due to unable to find host %06x\n",
2413 __func__, purex->pur_info.pur_did.b24);
2414
2415 qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2416 qla_enode_free(vha, ptr);
2417 return;
2418 }
2419
2420 fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
2421
2422 if (host->e_dbell.db_flags != EDB_ACTIVE ||
2423 (fcport && EDIF_SESSION_DOWN(fcport))) {
2424 ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2425 __func__, host->e_dbell.db_flags,
2426 fcport ? fcport->d_id.b24 : 0);
2427
2428 qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2429 qla_enode_free(host, ptr);
2430 return;
2431 }
2432
2433
2434 qla_enode_add(host, ptr);
2435
2436 ql_dbg(ql_dbg_edif, host, 0x0910c,
2437 "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2438 __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
2439 purex->pur_info.pur_did.b24, p->rx_xchg_addr);
2440
2441 qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
2442}
2443
2444static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
2445{
2446 struct scsi_qla_host *vha = fcport->vha;
2447 struct qla_hw_data *ha = vha->hw;
2448 void *sa_id_map;
2449 unsigned long flags = 0;
2450 u16 sa_index;
2451
2452 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2453 "%s: entry\n", __func__);
2454
2455 if (dir)
2456 sa_id_map = ha->edif_tx_sa_id_map;
2457 else
2458 sa_id_map = ha->edif_rx_sa_id_map;
2459
2460 spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2461 sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
2462 if (sa_index >= EDIF_NUM_SA_INDEX) {
2463 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2464 return INVALID_EDIF_SA_INDEX;
2465 }
2466 set_bit(sa_index, sa_id_map);
2467 spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2468
2469 if (dir)
2470 sa_index += EDIF_TX_SA_INDEX_BASE;
2471
2472 ql_dbg(ql_dbg_edif, vha, 0x3063,
2473 "%s: index retrieved from free pool %d\n", __func__, sa_index);
2474
2475 return sa_index;
2476}
2477
2478
2479static struct edif_sa_index_entry *
2480qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
2481 struct list_head *sa_list)
2482{
2483 struct edif_sa_index_entry *entry;
2484 struct edif_sa_index_entry *tentry;
2485 struct list_head *indx_list = sa_list;
2486
2487 list_for_each_entry_safe(entry, tentry, indx_list, next) {
2488 if (entry->handle == nport_handle)
2489 return entry;
2490 }
2491 return NULL;
2492}
2493
2494
2495static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
2496 uint16_t sa_index)
2497{
2498 struct edif_sa_index_entry *entry;
2499 struct list_head *sa_list;
2500 int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
2501 int slot = 0;
2502 int free_slot_count = 0;
2503 scsi_qla_host_t *vha = fcport->vha;
2504 struct qla_hw_data *ha = vha->hw;
2505 unsigned long flags = 0;
2506
2507 ql_dbg(ql_dbg_edif, vha, 0x3063,
2508 "%s: entry\n", __func__);
2509
2510 if (dir)
2511 sa_list = &ha->sadb_tx_index_list;
2512 else
2513 sa_list = &ha->sadb_rx_index_list;
2514
2515 entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
2516 if (!entry) {
2517 ql_dbg(ql_dbg_edif, vha, 0x3063,
2518 "%s: no entry found for nport_handle 0x%x\n",
2519 __func__, nport_handle);
2520 return -1;
2521 }
2522
2523 spin_lock_irqsave(&ha->sadb_lock, flags);
2524
2525
2526
2527
2528 for (slot = 0; slot < 2; slot++) {
2529 if (entry->sa_pair[slot].sa_index == sa_index) {
2530 entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
2531 entry->sa_pair[slot].spi = 0;
2532 free_slot_count++;
2533 qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
2534 } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
2535 free_slot_count++;
2536 }
2537 }
2538
2539 if (free_slot_count == 2) {
2540 list_del(&entry->next);
2541 kfree(entry);
2542 }
2543 spin_unlock_irqrestore(&ha->sadb_lock, flags);
2544
2545 ql_dbg(ql_dbg_edif, vha, 0x3063,
2546 "%s: sa_index %d removed, free_slot_count: %d\n",
2547 __func__, sa_index, free_slot_count);
2548
2549 return 0;
2550}
2551
2552void
2553qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
2554 struct sa_update_28xx *pkt)
2555{
2556 const char *func = "SA_UPDATE_RESPONSE_IOCB";
2557 srb_t *sp;
2558 struct edif_sa_ctl *sa_ctl;
2559 int old_sa_deleted = 1;
2560 uint16_t nport_handle;
2561 struct scsi_qla_host *vha;
2562
2563 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2564
2565 if (!sp) {
2566 ql_dbg(ql_dbg_edif, v, 0x3063,
2567 "%s: no sp found for pkt\n", __func__);
2568 return;
2569 }
2570
2571 vha = sp->vha;
2572
2573 switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
2574 case 0:
2575 ql_dbg(ql_dbg_edif, vha, 0x3063,
2576 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2577 __func__, vha, pkt->sa_index);
2578 break;
2579 case 1:
2580 ql_dbg(ql_dbg_edif, vha, 0x3063,
2581 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2582 __func__, vha, pkt->sa_index);
2583 break;
2584 case 2:
2585 ql_dbg(ql_dbg_edif, vha, 0x3063,
2586 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2587 __func__, vha, pkt->sa_index);
2588 break;
2589 case 3:
2590 ql_dbg(ql_dbg_edif, vha, 0x3063,
2591 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2592 __func__, vha, pkt->sa_index);
2593 break;
2594 }
2595
2596
2597
2598
2599
2600 nport_handle = sp->fcport->loop_id;
2601
2602 ql_dbg(ql_dbg_edif, vha, 0x3063,
2603 "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2604 __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
2605 nport_handle, pkt->sa_index, pkt->flags, sp->handle);
2606
2607
2608 if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
2609 struct edif_list_entry *edif_entry;
2610
2611 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2612
2613 edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
2614 if (edif_entry) {
2615 ql_dbg(ql_dbg_edif, vha, 0x5033,
2616 "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2617 __func__, edif_entry, pkt->sa_index);
2618 qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
2619 del_timer(&edif_entry->timer);
2620
2621 ql_dbg(ql_dbg_edif, vha, 0x5033,
2622 "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2623 __func__, edif_entry, pkt->sa_index);
2624
2625 kfree(edif_entry);
2626 }
2627 }
2628
2629
2630
2631
2632
2633 if (pkt->flags & SA_FLAG_INVALIDATE)
2634 old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
2635
2636
2637
2638
2639 if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
2640 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2641 ql_dbg(ql_dbg_edif, vha, 0x3063,
2642 "%s: nph 0x%x, sa_index %d removed from fw\n",
2643 __func__, sp->fcport->loop_id, pkt->sa_index);
2644
2645 } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
2646 old_sa_deleted) {
2647
2648
2649
2650
2651
2652
2653
2654
2655 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2656 "SA(%x)updated for s_id %02x%02x%02x\n",
2657 pkt->new_sa_info,
2658 pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2659 sp->fcport->edif.enable = 1;
2660 if (pkt->flags & SA_FLAG_TX) {
2661 sp->fcport->edif.tx_sa_set = 1;
2662 sp->fcport->edif.tx_sa_pending = 0;
2663 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2664 QL_VND_SA_STAT_SUCCESS,
2665 QL_VND_TX_SA_KEY, sp->fcport);
2666 } else {
2667 sp->fcport->edif.rx_sa_set = 1;
2668 sp->fcport->edif.rx_sa_pending = 0;
2669 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2670 QL_VND_SA_STAT_SUCCESS,
2671 QL_VND_RX_SA_KEY, sp->fcport);
2672 }
2673 } else {
2674 ql_dbg(ql_dbg_edif, vha, 0x3063,
2675 "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2676 __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
2677 pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2678
2679 if (pkt->flags & SA_FLAG_TX)
2680 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2681 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2682 QL_VND_TX_SA_KEY, sp->fcport);
2683 else
2684 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2685 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2686 QL_VND_RX_SA_KEY, sp->fcport);
2687 }
2688
2689
2690 if (pkt->flags & SA_FLAG_INVALIDATE) {
2691
2692 sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
2693 le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
2694 if (sa_ctl &&
2695 qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
2696 (pkt->flags & SA_FLAG_TX)) != NULL) {
2697 ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2698 "%s: freeing sa_ctl for index %d\n",
2699 __func__, sa_ctl->index);
2700 qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
2701 } else {
2702 ql_dbg(ql_dbg_edif, vha, 0x3063,
2703 "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2704 __func__, sa_ctl);
2705 }
2706 ql_dbg(ql_dbg_edif, vha, 0x3063,
2707 "%s: freeing sa_index %d, nph: 0x%x\n",
2708 __func__, le16_to_cpu(pkt->sa_index), nport_handle);
2709 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2710 le16_to_cpu(pkt->sa_index));
2711
2712
2713
2714
2715 } else if (pkt->u.comp_sts) {
2716 ql_dbg(ql_dbg_edif, vha, 0x3063,
2717 "%s: freeing sa_index %d, nph: 0x%x\n",
2718 __func__, pkt->sa_index, nport_handle);
2719 qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2720 le16_to_cpu(pkt->sa_index));
2721 switch (le16_to_cpu(pkt->u.comp_sts)) {
2722 case CS_PORT_EDIF_UNAVAIL:
2723 case CS_PORT_EDIF_LOGOUT:
2724 qlt_schedule_sess_for_deletion(sp->fcport);
2725 break;
2726 default:
2727 break;
2728 }
2729 }
2730
2731 sp->done(sp, 0);
2732}
2733
2734
2735
2736
2737
2738
2739
2740int
2741qla28xx_start_scsi_edif(srb_t *sp)
2742{
2743 int nseg;
2744 unsigned long flags;
2745 struct scsi_cmnd *cmd;
2746 uint32_t *clr_ptr;
2747 uint32_t index, i;
2748 uint32_t handle;
2749 uint16_t cnt;
2750 int16_t req_cnt;
2751 uint16_t tot_dsds;
2752 __be32 *fcp_dl;
2753 uint8_t additional_cdb_len;
2754 struct ct6_dsd *ctx;
2755 struct scsi_qla_host *vha = sp->vha;
2756 struct qla_hw_data *ha = vha->hw;
2757 struct cmd_type_6 *cmd_pkt;
2758 struct dsd64 *cur_dsd;
2759 uint8_t avail_dsds = 0;
2760 struct scatterlist *sg;
2761 struct req_que *req = sp->qpair->req;
2762 spinlock_t *lock = sp->qpair->qp_lock_ptr;
2763
2764
2765 cmd = GET_CMD_SP(sp);
2766
2767
2768 tot_dsds = 0;
2769
2770
2771 if (vha->marker_needed != 0) {
2772 if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
2773 QLA_SUCCESS) {
2774 ql_log(ql_log_warn, vha, 0x300c,
2775 "qla2x00_marker failed for cmd=%p.\n", cmd);
2776 return QLA_FUNCTION_FAILED;
2777 }
2778 vha->marker_needed = 0;
2779 }
2780
2781
2782 spin_lock_irqsave(lock, flags);
2783
2784
2785 handle = req->current_outstanding_cmd;
2786 for (index = 1; index < req->num_outstanding_cmds; index++) {
2787 handle++;
2788 if (handle == req->num_outstanding_cmds)
2789 handle = 1;
2790 if (!req->outstanding_cmds[handle])
2791 break;
2792 }
2793 if (index == req->num_outstanding_cmds)
2794 goto queuing_error;
2795
2796
2797 if (scsi_sg_count(cmd)) {
2798 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2799 scsi_sg_count(cmd), cmd->sc_data_direction);
2800 if (unlikely(!nseg))
2801 goto queuing_error;
2802 } else {
2803 nseg = 0;
2804 }
2805
2806 tot_dsds = nseg;
2807 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2808 if (req->cnt < (req_cnt + 2)) {
2809 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2810 rd_reg_dword(req->req_q_out);
2811 if (req->ring_index < cnt)
2812 req->cnt = cnt - req->ring_index;
2813 else
2814 req->cnt = req->length -
2815 (req->ring_index - cnt);
2816 if (req->cnt < (req_cnt + 2))
2817 goto queuing_error;
2818 }
2819
2820 ctx = sp->u.scmd.ct6_ctx =
2821 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2822 if (!ctx) {
2823 ql_log(ql_log_fatal, vha, 0x3010,
2824 "Failed to allocate ctx for cmd=%p.\n", cmd);
2825 goto queuing_error;
2826 }
2827
2828 memset(ctx, 0, sizeof(struct ct6_dsd));
2829 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
2830 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2831 if (!ctx->fcp_cmnd) {
2832 ql_log(ql_log_fatal, vha, 0x3011,
2833 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2834 goto queuing_error;
2835 }
2836
2837
2838 INIT_LIST_HEAD(&ctx->dsd_list);
2839 ctx->dsd_use_cnt = 0;
2840
2841 if (cmd->cmd_len > 16) {
2842 additional_cdb_len = cmd->cmd_len - 16;
2843 if ((cmd->cmd_len % 4) != 0) {
2844
2845
2846
2847
2848 ql_log(ql_log_warn, vha, 0x3012,
2849 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
2850 cmd->cmd_len, cmd);
2851 goto queuing_error_fcp_cmnd;
2852 }
2853 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2854 } else {
2855 additional_cdb_len = 0;
2856 ctx->fcp_cmnd_len = 12 + 16 + 4;
2857 }
2858
2859 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2860 cmd_pkt->handle = make_handle(req->id, handle);
2861
2862
2863
2864
2865
2866 clr_ptr = (uint32_t *)cmd_pkt + 2;
2867 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2868 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2869
2870
2871 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2872 cmd_pkt->byte_count = cpu_to_le32(0);
2873 goto no_dsds;
2874 }
2875
2876
2877 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2878 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2879 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
2880 vha->qla_stats.output_requests++;
2881 sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
2882 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2883 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2884 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
2885 vha->qla_stats.input_requests++;
2886 sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
2887 }
2888
2889 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
2890 cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
2891
2892
2893 avail_dsds = 1;
2894 cur_dsd = &cmd_pkt->fcp_dsd;
2895
2896
2897 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
2898 dma_addr_t sle_dma;
2899 cont_a64_entry_t *cont_pkt;
2900
2901
2902 if (avail_dsds == 0) {
2903
2904
2905
2906
2907 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
2908 cur_dsd = cont_pkt->dsd;
2909 avail_dsds = 5;
2910 }
2911
2912 sle_dma = sg_dma_address(sg);
2913 put_unaligned_le64(sle_dma, &cur_dsd->address);
2914 cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
2915 cur_dsd++;
2916 avail_dsds--;
2917 }
2918
2919no_dsds:
2920
2921 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2922 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2923 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2924 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2925 cmd_pkt->vp_index = sp->vha->vp_idx;
2926
2927 cmd_pkt->entry_type = COMMAND_TYPE_6;
2928
2929
2930 cmd_pkt->entry_count = (uint8_t)req_cnt;
2931
2932 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2933 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2934
2935
2936 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2937 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2938
2939 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2940 ctx->fcp_cmnd->additional_cdb_len |= 1;
2941 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2942 ctx->fcp_cmnd->additional_cdb_len |= 2;
2943
2944
2945 if (ha->flags.fcp_prio_enabled)
2946 ctx->fcp_cmnd->task_attribute |=
2947 sp->fcport->fcp_prio << 3;
2948
2949 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2950
2951 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
2952 additional_cdb_len);
2953 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2954
2955 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2956 put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
2957
2958 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2959 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2960
2961 cmd_pkt->entry_count = (uint8_t)req_cnt;
2962 cmd_pkt->entry_status = 0;
2963
2964
2965 req->current_outstanding_cmd = handle;
2966 req->outstanding_cmds[handle] = sp;
2967 sp->handle = handle;
2968 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2969 req->cnt -= req_cnt;
2970
2971
2972 wmb();
2973 req->ring_index++;
2974 if (req->ring_index == req->length) {
2975 req->ring_index = 0;
2976 req->ring_ptr = req->ring;
2977 } else {
2978 req->ring_ptr++;
2979 }
2980
2981 sp->qpair->cmd_cnt++;
2982
2983 wrt_reg_dword(req->req_q_in, req->ring_index);
2984
2985 spin_unlock_irqrestore(lock, flags);
2986
2987 return QLA_SUCCESS;
2988
2989queuing_error_fcp_cmnd:
2990 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2991queuing_error:
2992 if (tot_dsds)
2993 scsi_dma_unmap(cmd);
2994
2995 if (sp->u.scmd.ct6_ctx) {
2996 mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
2997 sp->u.scmd.ct6_ctx = NULL;
2998 }
2999 spin_unlock_irqrestore(lock, flags);
3000
3001 return QLA_FUNCTION_FAILED;
3002}
3003
3004
3005
3006
3007
3008
3009void qla_edif_list_del(fc_port_t *fcport)
3010{
3011 struct edif_list_entry *indx_lst;
3012 struct edif_list_entry *tindx_lst;
3013 struct list_head *indx_list = &fcport->edif.edif_indx_list;
3014 unsigned long flags = 0;
3015
3016 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3017 list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
3018 list_del(&indx_lst->next);
3019 kfree(indx_lst);
3020 }
3021 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3022}
3023
3024
3025
3026
3027
3028
3029static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
3030 struct qla_sa_update_frame *sa_frame)
3031{
3032 struct edif_sa_index_entry *entry;
3033 struct list_head *sa_list;
3034 uint16_t sa_index;
3035 int dir = sa_frame->flags & SAU_FLG_TX;
3036 int slot = 0;
3037 int free_slot = -1;
3038 scsi_qla_host_t *vha = fcport->vha;
3039 struct qla_hw_data *ha = vha->hw;
3040 unsigned long flags = 0;
3041 uint16_t nport_handle = fcport->loop_id;
3042
3043 ql_dbg(ql_dbg_edif, vha, 0x3063,
3044 "%s: entry fc_port: %p, nport_handle: 0x%x\n",
3045 __func__, fcport, nport_handle);
3046
3047 if (dir)
3048 sa_list = &ha->sadb_tx_index_list;
3049 else
3050 sa_list = &ha->sadb_rx_index_list;
3051
3052 entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
3053 if (!entry) {
3054 if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
3055 ql_dbg(ql_dbg_edif, vha, 0x3063,
3056 "%s: rx delete request with no entry\n", __func__);
3057 return RX_DELETE_NO_EDIF_SA_INDEX;
3058 }
3059
3060
3061 entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
3062 if (!entry)
3063 return INVALID_EDIF_SA_INDEX;
3064
3065 sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3066 if (sa_index == INVALID_EDIF_SA_INDEX) {
3067 kfree(entry);
3068 return INVALID_EDIF_SA_INDEX;
3069 }
3070
3071 INIT_LIST_HEAD(&entry->next);
3072 entry->handle = nport_handle;
3073 entry->fcport = fcport;
3074 entry->sa_pair[0].spi = sa_frame->spi;
3075 entry->sa_pair[0].sa_index = sa_index;
3076 entry->sa_pair[1].spi = 0;
3077 entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
3078 spin_lock_irqsave(&ha->sadb_lock, flags);
3079 list_add_tail(&entry->next, sa_list);
3080 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3081 ql_dbg(ql_dbg_edif, vha, 0x3063,
3082 "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3083 __func__, nport_handle, sa_frame->spi, sa_index);
3084
3085 return sa_index;
3086 }
3087
3088 spin_lock_irqsave(&ha->sadb_lock, flags);
3089
3090
3091 for (slot = 0; slot < 2; slot++) {
3092 if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
3093 free_slot = slot;
3094 } else {
3095 if (entry->sa_pair[slot].spi == sa_frame->spi) {
3096 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3097 ql_dbg(ql_dbg_edif, vha, 0x3063,
3098 "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3099 __func__, slot, entry->handle, sa_frame->spi,
3100 entry->sa_pair[slot].sa_index);
3101 return entry->sa_pair[slot].sa_index;
3102 }
3103 }
3104 }
3105 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3106
3107
3108 if (free_slot == -1) {
3109 ql_dbg(ql_dbg_edif, vha, 0x3063,
3110 "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3111 __func__, entry->handle, sa_frame->spi);
3112 ql_dbg(ql_dbg_edif, vha, 0x3063,
3113 "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
3114 __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
3115 entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
3116
3117 return INVALID_EDIF_SA_INDEX;
3118 }
3119
3120
3121 sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3122 if (sa_index == INVALID_EDIF_SA_INDEX) {
3123 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3124 "%s: empty freepool!!\n", __func__);
3125 return INVALID_EDIF_SA_INDEX;
3126 }
3127
3128 spin_lock_irqsave(&ha->sadb_lock, flags);
3129 entry->sa_pair[free_slot].spi = sa_frame->spi;
3130 entry->sa_pair[free_slot].sa_index = sa_index;
3131 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3132 ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3133 "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3134 __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
3135
3136 return sa_index;
3137}
3138
3139
3140void qla_edif_sadb_release(struct qla_hw_data *ha)
3141{
3142 struct list_head *pos;
3143 struct list_head *tmp;
3144 struct edif_sa_index_entry *entry;
3145
3146 list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) {
3147 entry = list_entry(pos, struct edif_sa_index_entry, next);
3148 list_del(&entry->next);
3149 kfree(entry);
3150 }
3151
3152 list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) {
3153 entry = list_entry(pos, struct edif_sa_index_entry, next);
3154 list_del(&entry->next);
3155 kfree(entry);
3156 }
3157}
3158
3159
3160
3161
3162
3163
3164int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3165{
3166 ha->edif_tx_sa_id_map =
3167 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3168
3169 if (!ha->edif_tx_sa_id_map) {
3170 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3171 "Unable to allocate memory for sadb tx.\n");
3172 return -ENOMEM;
3173 }
3174
3175 ha->edif_rx_sa_id_map =
3176 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3177 if (!ha->edif_rx_sa_id_map) {
3178 kfree(ha->edif_tx_sa_id_map);
3179 ha->edif_tx_sa_id_map = NULL;
3180 ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3181 "Unable to allocate memory for sadb rx.\n");
3182 return -ENOMEM;
3183 }
3184 return 0;
3185}
3186
3187
3188void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3189{
3190 kfree(ha->edif_tx_sa_id_map);
3191 ha->edif_tx_sa_id_map = NULL;
3192 kfree(ha->edif_rx_sa_id_map);
3193 ha->edif_rx_sa_id_map = NULL;
3194}
3195
3196static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3197 fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
3198{
3199 struct edif_list_entry *edif_entry;
3200 struct edif_sa_ctl *sa_ctl;
3201 uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
3202 unsigned long flags = 0;
3203 uint16_t nport_handle = fcport->loop_id;
3204 uint16_t cached_nport_handle;
3205
3206 spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3207 edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
3208 if (!edif_entry) {
3209 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3210 return;
3211 }
3212
3213
3214
3215
3216
3217 if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
3218 edif_entry->update_sa_index != sa_index) {
3219 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3220 return;
3221 }
3222
3223
3224
3225
3226
3227 if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
3228 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3229 return;
3230 }
3231
3232 ql_dbg(ql_dbg_edif, vha, 0x5033,
3233 "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3234 __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
3235
3236 delete_sa_index = edif_entry->delete_sa_index;
3237 edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
3238 cached_nport_handle = edif_entry->handle;
3239 spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3240
3241
3242 if (nport_handle != cached_nport_handle) {
3243 ql_dbg(ql_dbg_edif, vha, 0x3063,
3244 "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3245 __func__, nport_handle, cached_nport_handle);
3246 }
3247
3248
3249 sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
3250 if (sa_ctl) {
3251 ql_dbg(ql_dbg_edif, vha, 0x3063,
3252 "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3253 __func__, sa_ctl, sa_index);
3254 ql_dbg(ql_dbg_edif, vha, 0x3063,
3255 "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3256 delete_sa_index,
3257 edif_entry->update_sa_index, nport_handle, handle);
3258
3259 sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
3260 set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
3261 qla_post_sa_replace_work(fcport->vha, fcport,
3262 nport_handle, sa_ctl);
3263 } else {
3264 ql_dbg(ql_dbg_edif, vha, 0x3063,
3265 "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3266 __func__, delete_sa_index);
3267 }
3268}
3269
3270void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3271 srb_t *sp, struct sts_entry_24xx *sts24)
3272{
3273 fc_port_t *fcport = sp->fcport;
3274
3275 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3276 uint32_t handle;
3277
3278 handle = (uint32_t)LSW(sts24->handle);
3279
3280
3281 if (cmd->sc_data_direction != DMA_FROM_DEVICE)
3282 return;
3283
3284 return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
3285 le16_to_cpu(sts24->edif_sa_index));
3286}
3287
3288void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
3289 struct ctio7_from_24xx *pkt)
3290{
3291 __chk_edif_rx_sa_delete_pending(vha, fcport,
3292 pkt->handle, le16_to_cpu(pkt->edif_sa_index));
3293}
3294
3295static void qla_parse_auth_els_ctl(struct srb *sp)
3296{
3297 struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
3298 struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
3299 struct fc_bsg_request *request = bsg_job->request;
3300 struct qla_bsg_auth_els_request *p =
3301 (struct qla_bsg_auth_els_request *)bsg_job->request;
3302
3303 a->tx_len = a->tx_byte_count = sp->remap.req.len;
3304 a->tx_addr = sp->remap.req.dma;
3305 a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
3306 a->rx_addr = sp->remap.rsp.dma;
3307
3308 if (p->e.sub_cmd == SEND_ELS_REPLY) {
3309 a->control_flags = p->e.extra_control_flags << 13;
3310 a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
3311 if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
3312 a->els_opcode = ELS_LS_ACC;
3313 else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
3314 a->els_opcode = ELS_LS_RJT;
3315 }
3316 a->did = sp->fcport->d_id;
3317 a->els_opcode = request->rqst_data.h_els.command_code;
3318 a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3319 a->vp_idx = sp->vha->vp_idx;
3320}
3321
3322int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3323{
3324 struct fc_bsg_request *bsg_request = bsg_job->request;
3325 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3326 fc_port_t *fcport = NULL;
3327 struct qla_hw_data *ha = vha->hw;
3328 srb_t *sp;
3329 int rval = (DID_ERROR << 16);
3330 port_id_t d_id;
3331 struct qla_bsg_auth_els_request *p =
3332 (struct qla_bsg_auth_els_request *)bsg_job->request;
3333
3334 d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
3335 d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
3336 d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
3337
3338
3339 fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
3340 if (!fcport) {
3341 ql_dbg(ql_dbg_edif, vha, 0x911a,
3342 "%s fcport not find online portid=%06x.\n",
3343 __func__, d_id.b24);
3344 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3345 return -EIO;
3346 }
3347
3348 if (qla_bsg_check(vha, bsg_job, fcport))
3349 return 0;
3350
3351 if (fcport->loop_id == FC_NO_LOOP_ID) {
3352 ql_dbg(ql_dbg_edif, vha, 0x910d,
3353 "%s ELS code %x, no loop id.\n", __func__,
3354 bsg_request->rqst_data.r_els.els_code);
3355 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3356 return -ENXIO;
3357 }
3358
3359 if (!vha->flags.online) {
3360 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
3361 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3362 rval = -EIO;
3363 goto done;
3364 }
3365
3366
3367 if (!IS_FWI2_CAPABLE(ha)) {
3368 ql_dbg(ql_dbg_user, vha, 0x7001,
3369 "ELS passthru not supported for ISP23xx based adapters.\n");
3370 SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3371 rval = -EPERM;
3372 goto done;
3373 }
3374
3375 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3376 if (!sp) {
3377 ql_dbg(ql_dbg_user, vha, 0x7004,
3378 "Failed get sp pid=%06x\n", fcport->d_id.b24);
3379 rval = -ENOMEM;
3380 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3381 goto done;
3382 }
3383
3384 sp->remap.req.len = bsg_job->request_payload.payload_len;
3385 sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3386 GFP_KERNEL, &sp->remap.req.dma);
3387 if (!sp->remap.req.buf) {
3388 ql_dbg(ql_dbg_user, vha, 0x7005,
3389 "Failed allocate request dma len=%x\n",
3390 bsg_job->request_payload.payload_len);
3391 rval = -ENOMEM;
3392 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3393 goto done_free_sp;
3394 }
3395
3396 sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
3397 sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3398 GFP_KERNEL, &sp->remap.rsp.dma);
3399 if (!sp->remap.rsp.buf) {
3400 ql_dbg(ql_dbg_user, vha, 0x7006,
3401 "Failed allocate response dma len=%x\n",
3402 bsg_job->reply_payload.payload_len);
3403 rval = -ENOMEM;
3404 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3405 goto done_free_remap_req;
3406 }
3407 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3408 bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
3409 sp->remap.req.len);
3410 sp->remap.remapped = true;
3411
3412 sp->type = SRB_ELS_CMD_HST_NOLOGIN;
3413 sp->name = "SPCN_BSG_HST_NOLOGIN";
3414 sp->u.bsg_cmd.bsg_job = bsg_job;
3415 qla_parse_auth_els_ctl(sp);
3416
3417 sp->free = qla2x00_bsg_sp_free;
3418 sp->done = qla2x00_bsg_job_done;
3419
3420 rval = qla2x00_start_sp(sp);
3421
3422 ql_dbg(ql_dbg_edif, vha, 0x700a,
3423 "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3424 __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
3425 p->e.extra_rx_xchg_address, p->e.extra_control_flags,
3426 sp->handle, sp->remap.req.len, bsg_job);
3427
3428 if (rval != QLA_SUCCESS) {
3429 ql_log(ql_log_warn, vha, 0x700e,
3430 "qla2x00_start_sp failed = %d\n", rval);
3431 SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3432 rval = -EIO;
3433 goto done_free_remap_rsp;
3434 }
3435 return rval;
3436
3437done_free_remap_rsp:
3438 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3439 sp->remap.rsp.dma);
3440done_free_remap_req:
3441 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
3442 sp->remap.req.dma);
3443done_free_sp:
3444 qla2x00_rel_sp(sp);
3445
3446done:
3447 return rval;
3448}
3449
3450void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
3451{
3452 if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
3453 ql_dbg(ql_dbg_disc, vha, 0xf09c,
3454 "%s: sess %8phN send port_offline event\n",
3455 __func__, sess->port_name);
3456 sess->edif.app_sess_online = 0;
3457 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
3458 sess->d_id.b24, 0, sess);
3459 qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
3460 }
3461}
3462