1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/timer.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/export.h>
18#include <linux/log2.h>
19
20#include <scsi/fc/fc_fc2.h>
21
22#include <scsi/libfc.h>
23
24#include "fc_libfc.h"
25
26u16 fc_cpu_mask;
27EXPORT_SYMBOL(fc_cpu_mask);
28static u16 fc_cpu_order;
29static struct kmem_cache *fc_em_cachep;
30static struct workqueue_struct *fc_exch_workqueue;
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58struct fc_exch_pool {
59 spinlock_t lock;
60 struct list_head ex_list;
61 u16 next_index;
62 u16 total_exches;
63
64 u16 left;
65 u16 right;
66} ____cacheline_aligned_in_smp;
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83struct fc_exch_mgr {
84 struct fc_exch_pool __percpu *pool;
85 mempool_t *ep_pool;
86 struct fc_lport *lport;
87 enum fc_class class;
88 struct kref kref;
89 u16 min_xid;
90 u16 max_xid;
91 u16 pool_max_index;
92
93 struct {
94 atomic_t no_free_exch;
95 atomic_t no_free_exch_xid;
96 atomic_t xid_not_found;
97 atomic_t xid_busy;
98 atomic_t seq_not_found;
99 atomic_t non_bls_resp;
100 } stats;
101};
102
103
104
105
106
107
108
109
110
111
112
113
114
115struct fc_exch_mgr_anchor {
116 struct list_head ema_list;
117 struct fc_exch_mgr *mp;
118 bool (*match)(struct fc_frame *);
119};
120
121static void fc_exch_rrq(struct fc_exch *);
122static void fc_seq_ls_acc(struct fc_frame *);
123static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
124 enum fc_els_rjt_explan);
125static void fc_exch_els_rec(struct fc_frame *);
126static void fc_exch_els_rrq(struct fc_frame *);
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
211
212
213
214
215
216
217
218
219
220
221static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
222 unsigned int max_index)
223{
224 const char *name = NULL;
225
226 if (op < max_index)
227 name = table[op];
228 if (!name)
229 name = "unknown";
230 return name;
231}
232
233
234
235
236
237static const char *fc_exch_rctl_name(unsigned int op)
238{
239 return fc_exch_name_lookup(op, fc_exch_rctl_names,
240 ARRAY_SIZE(fc_exch_rctl_names));
241}
242
243
244
245
246
247static inline void fc_exch_hold(struct fc_exch *ep)
248{
249 atomic_inc(&ep->ex_refcnt);
250}
251
252
253
254
255
256
257
258
259
260
261
262static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
263 u32 f_ctl)
264{
265 struct fc_frame_header *fh = fc_frame_header_get(fp);
266 u16 fill;
267
268 fr_sof(fp) = ep->class;
269 if (ep->seq.cnt)
270 fr_sof(fp) = fc_sof_normal(ep->class);
271
272 if (f_ctl & FC_FC_END_SEQ) {
273 fr_eof(fp) = FC_EOF_T;
274 if (fc_sof_needs_ack((enum fc_sof)ep->class))
275 fr_eof(fp) = FC_EOF_N;
276
277
278
279
280
281
282
283
284
285 fill = fr_len(fp) & 3;
286 if (fill) {
287 fill = 4 - fill;
288
289 skb_put(fp_skb(fp), fill);
290 hton24(fh->fh_f_ctl, f_ctl | fill);
291 }
292 } else {
293 WARN_ON(fr_len(fp) % 4 != 0);
294 fr_eof(fp) = FC_EOF_N;
295 }
296
297
298 fh->fh_ox_id = htons(ep->oxid);
299 fh->fh_rx_id = htons(ep->rxid);
300 fh->fh_seq_id = ep->seq.id;
301 fh->fh_seq_cnt = htons(ep->seq.cnt);
302}
303
304
305
306
307
308
309
310
311static void fc_exch_release(struct fc_exch *ep)
312{
313 struct fc_exch_mgr *mp;
314
315 if (atomic_dec_and_test(&ep->ex_refcnt)) {
316 mp = ep->em;
317 if (ep->destructor)
318 ep->destructor(&ep->seq, ep->arg);
319 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
320 mempool_free(ep, mp->ep_pool);
321 }
322}
323
324
325
326
327
328static inline void fc_exch_timer_cancel(struct fc_exch *ep)
329{
330 if (cancel_delayed_work(&ep->timeout_work)) {
331 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
332 atomic_dec(&ep->ex_refcnt);
333 }
334}
335
336
337
338
339
340
341
342
343
344
345static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
346 unsigned int timer_msec)
347{
348 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
349 return;
350
351 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
352
353 fc_exch_hold(ep);
354 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
355 msecs_to_jiffies(timer_msec))) {
356 FC_EXCH_DBG(ep, "Exchange already queued\n");
357 fc_exch_release(ep);
358 }
359}
360
361
362
363
364
365
366static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
367{
368 spin_lock_bh(&ep->ex_lock);
369 fc_exch_timer_set_locked(ep, timer_msec);
370 spin_unlock_bh(&ep->ex_lock);
371}
372
373
374
375
376
377
378
379static int fc_exch_done_locked(struct fc_exch *ep)
380{
381 int rc = 1;
382
383
384
385
386
387
388
389 if (ep->state & FC_EX_DONE)
390 return rc;
391 ep->esb_stat |= ESB_ST_COMPLETE;
392
393 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
394 ep->state |= FC_EX_DONE;
395 fc_exch_timer_cancel(ep);
396 rc = 0;
397 }
398 return rc;
399}
400
401static struct fc_exch fc_quarantine_exch;
402
403
404
405
406
407
408
409
410
411
412static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
413 u16 index)
414{
415 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
416 return exches[index];
417}
418
419
420
421
422
423
424
425static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
426 struct fc_exch *ep)
427{
428 ((struct fc_exch **)(pool + 1))[index] = ep;
429}
430
431
432
433
434
435static void fc_exch_delete(struct fc_exch *ep)
436{
437 struct fc_exch_pool *pool;
438 u16 index;
439
440 pool = ep->pool;
441 spin_lock_bh(&pool->lock);
442 WARN_ON(pool->total_exches <= 0);
443 pool->total_exches--;
444
445
446 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
447 if (!(ep->state & FC_EX_QUARANTINE)) {
448 if (pool->left == FC_XID_UNKNOWN)
449 pool->left = index;
450 else if (pool->right == FC_XID_UNKNOWN)
451 pool->right = index;
452 else
453 pool->next_index = index;
454 fc_exch_ptr_set(pool, index, NULL);
455 } else {
456 fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
457 }
458 list_del(&ep->ex_list);
459 spin_unlock_bh(&pool->lock);
460 fc_exch_release(ep);
461}
462
463static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
464 struct fc_frame *fp)
465{
466 struct fc_exch *ep;
467 struct fc_frame_header *fh = fc_frame_header_get(fp);
468 int error = -ENXIO;
469 u32 f_ctl;
470 u8 fh_type = fh->fh_type;
471
472 ep = fc_seq_exch(sp);
473
474 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
475 fc_frame_free(fp);
476 goto out;
477 }
478
479 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
480
481 f_ctl = ntoh24(fh->fh_f_ctl);
482 fc_exch_setup_hdr(ep, fp, f_ctl);
483 fr_encaps(fp) = ep->encaps;
484
485
486
487
488
489
490 if (fr_max_payload(fp))
491 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
492 fr_max_payload(fp));
493 else
494 sp->cnt++;
495
496
497
498
499 error = lport->tt.frame_send(lport, fp);
500
501 if (fh_type == FC_TYPE_BLS)
502 goto out;
503
504
505
506
507
508
509 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;
510 if (f_ctl & FC_FC_SEQ_INIT)
511 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
512out:
513 return error;
514}
515
516
517
518
519
520
521
522
523
524
525int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
526{
527 struct fc_exch *ep;
528 int error;
529 ep = fc_seq_exch(sp);
530 spin_lock_bh(&ep->ex_lock);
531 error = fc_seq_send_locked(lport, sp, fp);
532 spin_unlock_bh(&ep->ex_lock);
533 return error;
534}
535EXPORT_SYMBOL(fc_seq_send);
536
537
538
539
540
541
542
543
544
545
546static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
547{
548 struct fc_seq *sp;
549
550 sp = &ep->seq;
551 sp->ssb_stat = 0;
552 sp->cnt = 0;
553 sp->id = seq_id;
554 return sp;
555}
556
557
558
559
560
561
562static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
563{
564 struct fc_exch *ep = fc_seq_exch(sp);
565
566 sp = fc_seq_alloc(ep, ep->seq_id++);
567 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
568 ep->f_ctl, sp->id);
569 return sp;
570}
571
572
573
574
575
576
577struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
578{
579 struct fc_exch *ep = fc_seq_exch(sp);
580
581 spin_lock_bh(&ep->ex_lock);
582 sp = fc_seq_start_next_locked(sp);
583 spin_unlock_bh(&ep->ex_lock);
584
585 return sp;
586}
587EXPORT_SYMBOL(fc_seq_start_next);
588
589
590
591
592
593
594void fc_seq_set_resp(struct fc_seq *sp,
595 void (*resp)(struct fc_seq *, struct fc_frame *, void *),
596 void *arg)
597{
598 struct fc_exch *ep = fc_seq_exch(sp);
599 DEFINE_WAIT(wait);
600
601 spin_lock_bh(&ep->ex_lock);
602 while (ep->resp_active && ep->resp_task != current) {
603 prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
604 spin_unlock_bh(&ep->ex_lock);
605
606 schedule();
607
608 spin_lock_bh(&ep->ex_lock);
609 }
610 finish_wait(&ep->resp_wq, &wait);
611 ep->resp = resp;
612 ep->arg = arg;
613 spin_unlock_bh(&ep->ex_lock);
614}
615EXPORT_SYMBOL(fc_seq_set_resp);
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633static int fc_exch_abort_locked(struct fc_exch *ep,
634 unsigned int timer_msec)
635{
636 struct fc_seq *sp;
637 struct fc_frame *fp;
638 int error;
639
640 FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
641 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
642 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
643 FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
644 ep->esb_stat, ep->state);
645 return -ENXIO;
646 }
647
648
649
650
651 sp = fc_seq_start_next_locked(&ep->seq);
652 if (!sp)
653 return -ENOMEM;
654
655 if (timer_msec)
656 fc_exch_timer_set_locked(ep, timer_msec);
657
658 if (ep->sid) {
659
660
661
662 fp = fc_frame_alloc(ep->lp, 0);
663 if (fp) {
664 ep->esb_stat |= ESB_ST_SEQ_INIT;
665 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
666 FC_TYPE_BLS, FC_FC_END_SEQ |
667 FC_FC_SEQ_INIT, 0);
668 error = fc_seq_send_locked(ep->lp, sp, fp);
669 } else {
670 error = -ENOBUFS;
671 }
672 } else {
673
674
675
676
677 error = 0;
678 }
679 ep->esb_stat |= ESB_ST_ABNORMAL;
680 return error;
681}
682
683
684
685
686
687
688
689
690
691
692int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
693{
694 struct fc_exch *ep;
695 int error;
696
697 ep = fc_seq_exch(req_sp);
698 spin_lock_bh(&ep->ex_lock);
699 error = fc_exch_abort_locked(ep, timer_msec);
700 spin_unlock_bh(&ep->ex_lock);
701 return error;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
730 struct fc_frame *fp)
731{
732 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
733 void *arg;
734 bool res = false;
735
736 spin_lock_bh(&ep->ex_lock);
737 ep->resp_active++;
738 if (ep->resp_task != current)
739 ep->resp_task = !ep->resp_task ? current : NULL;
740 resp = ep->resp;
741 arg = ep->arg;
742 spin_unlock_bh(&ep->ex_lock);
743
744 if (resp) {
745 resp(sp, fp, arg);
746 res = true;
747 }
748
749 spin_lock_bh(&ep->ex_lock);
750 if (--ep->resp_active == 0)
751 ep->resp_task = NULL;
752 spin_unlock_bh(&ep->ex_lock);
753
754 if (ep->resp_active == 0)
755 wake_up(&ep->resp_wq);
756
757 return res;
758}
759
760
761
762
763
764static void fc_exch_timeout(struct work_struct *work)
765{
766 struct fc_exch *ep = container_of(work, struct fc_exch,
767 timeout_work.work);
768 struct fc_seq *sp = &ep->seq;
769 u32 e_stat;
770 int rc = 1;
771
772 FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
773
774 spin_lock_bh(&ep->ex_lock);
775 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
776 goto unlock;
777
778 e_stat = ep->esb_stat;
779 if (e_stat & ESB_ST_COMPLETE) {
780 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
781 spin_unlock_bh(&ep->ex_lock);
782 if (e_stat & ESB_ST_REC_QUAL)
783 fc_exch_rrq(ep);
784 goto done;
785 } else {
786 if (e_stat & ESB_ST_ABNORMAL)
787 rc = fc_exch_done_locked(ep);
788 spin_unlock_bh(&ep->ex_lock);
789 if (!rc)
790 fc_exch_delete(ep);
791 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
792 fc_seq_set_resp(sp, NULL, ep->arg);
793 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
794 goto done;
795 }
796unlock:
797 spin_unlock_bh(&ep->ex_lock);
798done:
799
800
801
802 fc_exch_release(ep);
803}
804
805
806
807
808
809
810
811
812static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
813 struct fc_exch_mgr *mp)
814{
815 struct fc_exch *ep;
816 unsigned int cpu;
817 u16 index;
818 struct fc_exch_pool *pool;
819
820
821 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
822 if (!ep) {
823 atomic_inc(&mp->stats.no_free_exch);
824 goto out;
825 }
826 memset(ep, 0, sizeof(*ep));
827
828 cpu = get_cpu();
829 pool = per_cpu_ptr(mp->pool, cpu);
830 spin_lock_bh(&pool->lock);
831 put_cpu();
832
833
834 if (pool->left != FC_XID_UNKNOWN) {
835 if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
836 index = pool->left;
837 pool->left = FC_XID_UNKNOWN;
838 goto hit;
839 }
840 }
841 if (pool->right != FC_XID_UNKNOWN) {
842 if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
843 index = pool->right;
844 pool->right = FC_XID_UNKNOWN;
845 goto hit;
846 }
847 }
848
849 index = pool->next_index;
850
851 while (fc_exch_ptr_get(pool, index)) {
852 index = index == mp->pool_max_index ? 0 : index + 1;
853 if (index == pool->next_index)
854 goto err;
855 }
856 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
857hit:
858 fc_exch_hold(ep);
859 spin_lock_init(&ep->ex_lock);
860
861
862
863
864
865 spin_lock_bh(&ep->ex_lock);
866
867 fc_exch_ptr_set(pool, index, ep);
868 list_add_tail(&ep->ex_list, &pool->ex_list);
869 fc_seq_alloc(ep, ep->seq_id++);
870 pool->total_exches++;
871 spin_unlock_bh(&pool->lock);
872
873
874
875
876 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
877 ep->em = mp;
878 ep->pool = pool;
879 ep->lp = lport;
880 ep->f_ctl = FC_FC_FIRST_SEQ;
881 ep->rxid = FC_XID_UNKNOWN;
882 ep->class = mp->class;
883 ep->resp_active = 0;
884 init_waitqueue_head(&ep->resp_wq);
885 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
886out:
887 return ep;
888err:
889 spin_unlock_bh(&pool->lock);
890 atomic_inc(&mp->stats.no_free_exch_xid);
891 mempool_free(ep, mp->ep_pool);
892 return NULL;
893}
894
895
896
897
898
899
900
901
902
903
904
905
906static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
907 struct fc_frame *fp)
908{
909 struct fc_exch_mgr_anchor *ema;
910 struct fc_exch *ep;
911
912 list_for_each_entry(ema, &lport->ema_list, ema_list) {
913 if (!ema->match || ema->match(fp)) {
914 ep = fc_exch_em_alloc(lport, ema->mp);
915 if (ep)
916 return ep;
917 }
918 }
919 return NULL;
920}
921
922
923
924
925
926
927static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
928{
929 struct fc_lport *lport = mp->lport;
930 struct fc_exch_pool *pool;
931 struct fc_exch *ep = NULL;
932 u16 cpu = xid & fc_cpu_mask;
933
934 if (xid == FC_XID_UNKNOWN)
935 return NULL;
936
937 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
938 pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
939 lport->host->host_no, lport->port_id, xid, cpu);
940 return NULL;
941 }
942
943 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
944 pool = per_cpu_ptr(mp->pool, cpu);
945 spin_lock_bh(&pool->lock);
946 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
947 if (ep == &fc_quarantine_exch) {
948 FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
949 ep = NULL;
950 }
951 if (ep) {
952 WARN_ON(ep->xid != xid);
953 fc_exch_hold(ep);
954 }
955 spin_unlock_bh(&pool->lock);
956 }
957 return ep;
958}
959
960
961
962
963
964
965
966
967
968void fc_exch_done(struct fc_seq *sp)
969{
970 struct fc_exch *ep = fc_seq_exch(sp);
971 int rc;
972
973 spin_lock_bh(&ep->ex_lock);
974 rc = fc_exch_done_locked(ep);
975 spin_unlock_bh(&ep->ex_lock);
976
977 fc_seq_set_resp(sp, NULL, ep->arg);
978 if (!rc)
979 fc_exch_delete(ep);
980}
981EXPORT_SYMBOL(fc_exch_done);
982
983
984
985
986
987
988
989
990
991static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
992 struct fc_exch_mgr *mp,
993 struct fc_frame *fp)
994{
995 struct fc_exch *ep;
996 struct fc_frame_header *fh;
997
998 ep = fc_exch_alloc(lport, fp);
999 if (ep) {
1000 ep->class = fc_frame_class(fp);
1001
1002
1003
1004
1005 ep->f_ctl |= FC_FC_EX_CTX;
1006 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
1007 fh = fc_frame_header_get(fp);
1008 ep->sid = ntoh24(fh->fh_d_id);
1009 ep->did = ntoh24(fh->fh_s_id);
1010 ep->oid = ep->did;
1011
1012
1013
1014
1015
1016
1017 ep->rxid = ep->xid;
1018 ep->oxid = ntohs(fh->fh_ox_id);
1019 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1020 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1021 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1022
1023 fc_exch_hold(ep);
1024 spin_unlock_bh(&ep->ex_lock);
1025 }
1026 return ep;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1040 struct fc_exch_mgr *mp,
1041 struct fc_frame *fp)
1042{
1043 struct fc_frame_header *fh = fc_frame_header_get(fp);
1044 struct fc_exch *ep = NULL;
1045 struct fc_seq *sp = NULL;
1046 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1047 u32 f_ctl;
1048 u16 xid;
1049
1050 f_ctl = ntoh24(fh->fh_f_ctl);
1051 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1052
1053
1054
1055
1056 if (f_ctl & FC_FC_EX_CTX) {
1057 xid = ntohs(fh->fh_ox_id);
1058 ep = fc_exch_find(mp, xid);
1059 if (!ep) {
1060 atomic_inc(&mp->stats.xid_not_found);
1061 reject = FC_RJT_OX_ID;
1062 goto out;
1063 }
1064 if (ep->rxid == FC_XID_UNKNOWN)
1065 ep->rxid = ntohs(fh->fh_rx_id);
1066 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1067 reject = FC_RJT_OX_ID;
1068 goto rel;
1069 }
1070 } else {
1071 xid = ntohs(fh->fh_rx_id);
1072
1073
1074
1075
1076
1077
1078 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1079 fc_frame_payload_op(fp) == ELS_TEST) {
1080 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1081 xid = FC_XID_UNKNOWN;
1082 }
1083
1084
1085
1086
1087 ep = fc_exch_find(mp, xid);
1088 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1089 if (ep) {
1090 atomic_inc(&mp->stats.xid_busy);
1091 reject = FC_RJT_RX_ID;
1092 goto rel;
1093 }
1094 ep = fc_exch_resp(lport, mp, fp);
1095 if (!ep) {
1096 reject = FC_RJT_EXCH_EST;
1097 goto out;
1098 }
1099 xid = ep->xid;
1100 } else if (!ep) {
1101 atomic_inc(&mp->stats.xid_not_found);
1102 reject = FC_RJT_RX_ID;
1103 goto out;
1104 }
1105 }
1106
1107 spin_lock_bh(&ep->ex_lock);
1108
1109
1110
1111
1112 if (fc_sof_is_init(fr_sof(fp))) {
1113 sp = &ep->seq;
1114 sp->ssb_stat |= SSB_ST_RESP;
1115 sp->id = fh->fh_seq_id;
1116 } else {
1117 sp = &ep->seq;
1118 if (sp->id != fh->fh_seq_id) {
1119 atomic_inc(&mp->stats.seq_not_found);
1120 if (f_ctl & FC_FC_END_SEQ) {
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 sp->ssb_stat |= SSB_ST_RESP;
1136 sp->id = fh->fh_seq_id;
1137 } else {
1138 spin_unlock_bh(&ep->ex_lock);
1139
1140
1141 reject = FC_RJT_SEQ_ID;
1142 goto rel;
1143 }
1144 }
1145 }
1146 WARN_ON(ep != fc_seq_exch(sp));
1147
1148 if (f_ctl & FC_FC_SEQ_INIT)
1149 ep->esb_stat |= ESB_ST_SEQ_INIT;
1150 spin_unlock_bh(&ep->ex_lock);
1151
1152 fr_seq(fp) = sp;
1153out:
1154 return reject;
1155rel:
1156 fc_exch_done(&ep->seq);
1157 fc_exch_release(ep);
1158 return reject;
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1170 struct fc_frame *fp)
1171{
1172 struct fc_frame_header *fh = fc_frame_header_get(fp);
1173 struct fc_exch *ep;
1174 struct fc_seq *sp = NULL;
1175 u32 f_ctl;
1176 u16 xid;
1177
1178 f_ctl = ntoh24(fh->fh_f_ctl);
1179 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1180 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1181 ep = fc_exch_find(mp, xid);
1182 if (!ep)
1183 return NULL;
1184 if (ep->seq.id == fh->fh_seq_id) {
1185
1186
1187
1188 sp = &ep->seq;
1189 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1190 ep->rxid == FC_XID_UNKNOWN) {
1191 ep->rxid = ntohs(fh->fh_rx_id);
1192 }
1193 }
1194 fc_exch_release(ep);
1195 return sp;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206static void fc_exch_set_addr(struct fc_exch *ep,
1207 u32 orig_id, u32 resp_id)
1208{
1209 ep->oid = orig_id;
1210 if (ep->esb_stat & ESB_ST_RESP) {
1211 ep->sid = resp_id;
1212 ep->did = orig_id;
1213 } else {
1214 ep->sid = orig_id;
1215 ep->did = resp_id;
1216 }
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1229 struct fc_seq_els_data *els_data)
1230{
1231 switch (els_cmd) {
1232 case ELS_LS_RJT:
1233 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1234 break;
1235 case ELS_LS_ACC:
1236 fc_seq_ls_acc(fp);
1237 break;
1238 case ELS_RRQ:
1239 fc_exch_els_rrq(fp);
1240 break;
1241 case ELS_REC:
1242 fc_exch_els_rec(fp);
1243 break;
1244 default:
1245 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1246 }
1247}
1248EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1249
1250
1251
1252
1253
1254
1255
1256
1257static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1258 enum fc_rctl rctl, enum fc_fh_type fh_type)
1259{
1260 u32 f_ctl;
1261 struct fc_exch *ep = fc_seq_exch(sp);
1262
1263 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1264 f_ctl |= ep->f_ctl;
1265 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1266 fc_seq_send_locked(ep->lp, sp, fp);
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1277{
1278 struct fc_frame *fp;
1279 struct fc_frame_header *rx_fh;
1280 struct fc_frame_header *fh;
1281 struct fc_exch *ep = fc_seq_exch(sp);
1282 struct fc_lport *lport = ep->lp;
1283 unsigned int f_ctl;
1284
1285
1286
1287
1288 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1289 fp = fc_frame_alloc(lport, 0);
1290 if (!fp) {
1291 FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1292 return;
1293 }
1294
1295 fh = fc_frame_header_get(fp);
1296 fh->fh_r_ctl = FC_RCTL_ACK_1;
1297 fh->fh_type = FC_TYPE_BLS;
1298
1299
1300
1301
1302
1303
1304
1305
1306 rx_fh = fc_frame_header_get(rx_fp);
1307 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1308 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1309 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1310 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1311 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1312 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1313 hton24(fh->fh_f_ctl, f_ctl);
1314
1315 fc_exch_setup_hdr(ep, fp, f_ctl);
1316 fh->fh_seq_id = rx_fh->fh_seq_id;
1317 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1318 fh->fh_parm_offset = htonl(1);
1319
1320 fr_sof(fp) = fr_sof(rx_fp);
1321 if (f_ctl & FC_FC_END_SEQ)
1322 fr_eof(fp) = FC_EOF_T;
1323 else
1324 fr_eof(fp) = FC_EOF_N;
1325
1326 lport->tt.frame_send(lport, fp);
1327 }
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1339 enum fc_ba_rjt_reason reason,
1340 enum fc_ba_rjt_explan explan)
1341{
1342 struct fc_frame *fp;
1343 struct fc_frame_header *rx_fh;
1344 struct fc_frame_header *fh;
1345 struct fc_ba_rjt *rp;
1346 struct fc_seq *sp;
1347 struct fc_lport *lport;
1348 unsigned int f_ctl;
1349
1350 lport = fr_dev(rx_fp);
1351 sp = fr_seq(rx_fp);
1352 fp = fc_frame_alloc(lport, sizeof(*rp));
1353 if (!fp) {
1354 FC_EXCH_DBG(fc_seq_exch(sp),
1355 "Drop BA_RJT request, out of memory\n");
1356 return;
1357 }
1358 fh = fc_frame_header_get(fp);
1359 rx_fh = fc_frame_header_get(rx_fp);
1360
1361 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1362
1363 rp = fc_frame_payload_get(fp, sizeof(*rp));
1364 rp->br_reason = reason;
1365 rp->br_explan = explan;
1366
1367
1368
1369
1370 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1371 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1372 fh->fh_ox_id = rx_fh->fh_ox_id;
1373 fh->fh_rx_id = rx_fh->fh_rx_id;
1374 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1375 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1376 fh->fh_type = FC_TYPE_BLS;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1387 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1388 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1389 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1390 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1391 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1392 f_ctl &= ~FC_FC_FIRST_SEQ;
1393 hton24(fh->fh_f_ctl, f_ctl);
1394
1395 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1396 fr_eof(fp) = FC_EOF_T;
1397 if (fc_sof_needs_ack(fr_sof(fp)))
1398 fr_eof(fp) = FC_EOF_N;
1399
1400 lport->tt.frame_send(lport, fp);
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1413{
1414 struct fc_frame *fp;
1415 struct fc_ba_acc *ap;
1416 struct fc_frame_header *fh;
1417 struct fc_seq *sp;
1418
1419 if (!ep)
1420 goto reject;
1421
1422 FC_EXCH_DBG(ep, "exch: ABTS received\n");
1423 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1424 if (!fp) {
1425 FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1426 goto free;
1427 }
1428
1429 spin_lock_bh(&ep->ex_lock);
1430 if (ep->esb_stat & ESB_ST_COMPLETE) {
1431 spin_unlock_bh(&ep->ex_lock);
1432 FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1433 fc_frame_free(fp);
1434 goto reject;
1435 }
1436 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1437 ep->esb_stat |= ESB_ST_REC_QUAL;
1438 fc_exch_hold(ep);
1439 }
1440 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1441 fh = fc_frame_header_get(fp);
1442 ap = fc_frame_payload_get(fp, sizeof(*ap));
1443 memset(ap, 0, sizeof(*ap));
1444 sp = &ep->seq;
1445 ap->ba_high_seq_cnt = htons(0xffff);
1446 if (sp->ssb_stat & SSB_ST_RESP) {
1447 ap->ba_seq_id = sp->id;
1448 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1449 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1450 ap->ba_low_seq_cnt = htons(sp->cnt);
1451 }
1452 sp = fc_seq_start_next_locked(sp);
1453 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1454 ep->esb_stat |= ESB_ST_ABNORMAL;
1455 spin_unlock_bh(&ep->ex_lock);
1456
1457free:
1458 fc_frame_free(rx_fp);
1459 return;
1460
1461reject:
1462 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1463 goto free;
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1476{
1477 struct fc_exch_mgr_anchor *ema;
1478
1479 WARN_ON(lport != fr_dev(fp));
1480 WARN_ON(fr_seq(fp));
1481 fr_seq(fp) = NULL;
1482
1483 list_for_each_entry(ema, &lport->ema_list, ema_list)
1484 if ((!ema->match || ema->match(fp)) &&
1485 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1486 break;
1487 return fr_seq(fp);
1488}
1489EXPORT_SYMBOL(fc_seq_assign);
1490
1491
1492
1493
1494
1495void fc_seq_release(struct fc_seq *sp)
1496{
1497 fc_exch_release(fc_seq_exch(sp));
1498}
1499EXPORT_SYMBOL(fc_seq_release);
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1511 struct fc_frame *fp)
1512{
1513 struct fc_frame_header *fh = fc_frame_header_get(fp);
1514 struct fc_seq *sp = NULL;
1515 struct fc_exch *ep = NULL;
1516 enum fc_pf_rjt_reason reject;
1517
1518
1519
1520
1521 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1522 if (!lport) {
1523 fc_frame_free(fp);
1524 return;
1525 }
1526 fr_dev(fp) = lport;
1527
1528 BUG_ON(fr_seq(fp));
1529
1530
1531
1532
1533
1534 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1535 return fc_lport_recv(lport, fp);
1536
1537 reject = fc_seq_lookup_recip(lport, mp, fp);
1538 if (reject == FC_RJT_NONE) {
1539 sp = fr_seq(fp);
1540 ep = fc_seq_exch(sp);
1541 fc_seq_send_ack(sp, fp);
1542 ep->encaps = fr_encaps(fp);
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555 if (!fc_invoke_resp(ep, sp, fp))
1556 fc_lport_recv(lport, fp);
1557 fc_exch_release(ep);
1558 } else {
1559 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1560 reject);
1561 fc_frame_free(fp);
1562 }
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1573{
1574 struct fc_frame_header *fh = fc_frame_header_get(fp);
1575 struct fc_seq *sp;
1576 struct fc_exch *ep;
1577 enum fc_sof sof;
1578 u32 f_ctl;
1579 int rc;
1580
1581 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1582 if (!ep) {
1583 atomic_inc(&mp->stats.xid_not_found);
1584 goto out;
1585 }
1586 if (ep->esb_stat & ESB_ST_COMPLETE) {
1587 atomic_inc(&mp->stats.xid_not_found);
1588 goto rel;
1589 }
1590 if (ep->rxid == FC_XID_UNKNOWN)
1591 ep->rxid = ntohs(fh->fh_rx_id);
1592 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1593 atomic_inc(&mp->stats.xid_not_found);
1594 goto rel;
1595 }
1596 if (ep->did != ntoh24(fh->fh_s_id) &&
1597 ep->did != FC_FID_FLOGI) {
1598 atomic_inc(&mp->stats.xid_not_found);
1599 goto rel;
1600 }
1601 sof = fr_sof(fp);
1602 sp = &ep->seq;
1603 if (fc_sof_is_init(sof)) {
1604 sp->ssb_stat |= SSB_ST_RESP;
1605 sp->id = fh->fh_seq_id;
1606 }
1607
1608 f_ctl = ntoh24(fh->fh_f_ctl);
1609 fr_seq(fp) = sp;
1610
1611 spin_lock_bh(&ep->ex_lock);
1612 if (f_ctl & FC_FC_SEQ_INIT)
1613 ep->esb_stat |= ESB_ST_SEQ_INIT;
1614 spin_unlock_bh(&ep->ex_lock);
1615
1616 if (fc_sof_needs_ack(sof))
1617 fc_seq_send_ack(sp, fp);
1618
1619 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1620 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1621 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1622 spin_lock_bh(&ep->ex_lock);
1623 rc = fc_exch_done_locked(ep);
1624 WARN_ON(fc_seq_exch(sp) != ep);
1625 spin_unlock_bh(&ep->ex_lock);
1626 if (!rc) {
1627 fc_exch_delete(ep);
1628 } else {
1629 FC_EXCH_DBG(ep, "ep is completed already,"
1630 "hence skip calling the resp\n");
1631 goto skip_resp;
1632 }
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648 if (!fc_invoke_resp(ep, sp, fp))
1649 fc_frame_free(fp);
1650
1651skip_resp:
1652 fc_exch_release(ep);
1653 return;
1654rel:
1655 fc_exch_release(ep);
1656out:
1657 fc_frame_free(fp);
1658}
1659
1660
1661
1662
1663
1664
1665
1666static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1667{
1668 struct fc_seq *sp;
1669
1670 sp = fc_seq_lookup_orig(mp, fp);
1671
1672 if (!sp)
1673 atomic_inc(&mp->stats.xid_not_found);
1674 else
1675 atomic_inc(&mp->stats.non_bls_resp);
1676
1677 fc_frame_free(fp);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1689{
1690 struct fc_frame_header *fh;
1691 struct fc_ba_acc *ap;
1692 struct fc_seq *sp;
1693 u16 low;
1694 u16 high;
1695 int rc = 1, has_rec = 0;
1696
1697 fh = fc_frame_header_get(fp);
1698 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1699 fc_exch_rctl_name(fh->fh_r_ctl));
1700
1701 if (cancel_delayed_work_sync(&ep->timeout_work)) {
1702 FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1703 fc_exch_release(ep);
1704 }
1705
1706 spin_lock_bh(&ep->ex_lock);
1707 switch (fh->fh_r_ctl) {
1708 case FC_RCTL_BA_ACC:
1709 ap = fc_frame_payload_get(fp, sizeof(*ap));
1710 if (!ap)
1711 break;
1712
1713
1714
1715
1716
1717
1718 low = ntohs(ap->ba_low_seq_cnt);
1719 high = ntohs(ap->ba_high_seq_cnt);
1720 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1721 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1722 ap->ba_seq_id == ep->seq_id) && low != high) {
1723 ep->esb_stat |= ESB_ST_REC_QUAL;
1724 fc_exch_hold(ep);
1725 has_rec = 1;
1726 }
1727 break;
1728 case FC_RCTL_BA_RJT:
1729 break;
1730 default:
1731 break;
1732 }
1733
1734
1735
1736
1737 sp = &ep->seq;
1738
1739
1740
1741 if (ep->fh_type != FC_TYPE_FCP &&
1742 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1743 rc = fc_exch_done_locked(ep);
1744 spin_unlock_bh(&ep->ex_lock);
1745
1746 fc_exch_hold(ep);
1747 if (!rc)
1748 fc_exch_delete(ep);
1749 if (!fc_invoke_resp(ep, sp, fp))
1750 fc_frame_free(fp);
1751 if (has_rec)
1752 fc_exch_timer_set(ep, ep->r_a_tov);
1753 fc_exch_release(ep);
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1765{
1766 struct fc_frame_header *fh;
1767 struct fc_exch *ep;
1768 u32 f_ctl;
1769
1770 fh = fc_frame_header_get(fp);
1771 f_ctl = ntoh24(fh->fh_f_ctl);
1772 fr_seq(fp) = NULL;
1773
1774 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1775 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1776 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1777 spin_lock_bh(&ep->ex_lock);
1778 ep->esb_stat |= ESB_ST_SEQ_INIT;
1779 spin_unlock_bh(&ep->ex_lock);
1780 }
1781 if (f_ctl & FC_FC_SEQ_CTX) {
1782
1783
1784
1785
1786 switch (fh->fh_r_ctl) {
1787 case FC_RCTL_ACK_1:
1788 case FC_RCTL_ACK_0:
1789 break;
1790 default:
1791 if (ep)
1792 FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1793 fh->fh_r_ctl,
1794 fc_exch_rctl_name(fh->fh_r_ctl));
1795 break;
1796 }
1797 fc_frame_free(fp);
1798 } else {
1799 switch (fh->fh_r_ctl) {
1800 case FC_RCTL_BA_RJT:
1801 case FC_RCTL_BA_ACC:
1802 if (ep)
1803 fc_exch_abts_resp(ep, fp);
1804 else
1805 fc_frame_free(fp);
1806 break;
1807 case FC_RCTL_BA_ABTS:
1808 if (ep)
1809 fc_exch_recv_abts(ep, fp);
1810 else
1811 fc_frame_free(fp);
1812 break;
1813 default:
1814 fc_frame_free(fp);
1815 break;
1816 }
1817 }
1818 if (ep)
1819 fc_exch_release(ep);
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1830{
1831 struct fc_lport *lport;
1832 struct fc_els_ls_acc *acc;
1833 struct fc_frame *fp;
1834 struct fc_seq *sp;
1835
1836 lport = fr_dev(rx_fp);
1837 sp = fr_seq(rx_fp);
1838 fp = fc_frame_alloc(lport, sizeof(*acc));
1839 if (!fp) {
1840 FC_EXCH_DBG(fc_seq_exch(sp),
1841 "exch: drop LS_ACC, out of memory\n");
1842 return;
1843 }
1844 acc = fc_frame_payload_get(fp, sizeof(*acc));
1845 memset(acc, 0, sizeof(*acc));
1846 acc->la_cmd = ELS_LS_ACC;
1847 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1848 lport->tt.frame_send(lport, fp);
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1861 enum fc_els_rjt_explan explan)
1862{
1863 struct fc_lport *lport;
1864 struct fc_els_ls_rjt *rjt;
1865 struct fc_frame *fp;
1866 struct fc_seq *sp;
1867
1868 lport = fr_dev(rx_fp);
1869 sp = fr_seq(rx_fp);
1870 fp = fc_frame_alloc(lport, sizeof(*rjt));
1871 if (!fp) {
1872 FC_EXCH_DBG(fc_seq_exch(sp),
1873 "exch: drop LS_ACC, out of memory\n");
1874 return;
1875 }
1876 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1877 memset(rjt, 0, sizeof(*rjt));
1878 rjt->er_cmd = ELS_LS_RJT;
1879 rjt->er_reason = reason;
1880 rjt->er_explan = explan;
1881 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1882 lport->tt.frame_send(lport, fp);
1883}
1884
1885
1886
1887
1888
1889
1890
1891static void fc_exch_reset(struct fc_exch *ep)
1892{
1893 struct fc_seq *sp;
1894 int rc = 1;
1895
1896 spin_lock_bh(&ep->ex_lock);
1897 ep->state |= FC_EX_RST_CLEANUP;
1898 fc_exch_timer_cancel(ep);
1899 if (ep->esb_stat & ESB_ST_REC_QUAL)
1900 atomic_dec(&ep->ex_refcnt);
1901 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1902 sp = &ep->seq;
1903 rc = fc_exch_done_locked(ep);
1904 spin_unlock_bh(&ep->ex_lock);
1905
1906 fc_exch_hold(ep);
1907
1908 if (!rc) {
1909 fc_exch_delete(ep);
1910 } else {
1911 FC_EXCH_DBG(ep, "ep is completed already,"
1912 "hence skip calling the resp\n");
1913 goto skip_resp;
1914 }
1915
1916 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1917skip_resp:
1918 fc_seq_set_resp(sp, NULL, ep->arg);
1919 fc_exch_release(ep);
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934static void fc_exch_pool_reset(struct fc_lport *lport,
1935 struct fc_exch_pool *pool,
1936 u32 sid, u32 did)
1937{
1938 struct fc_exch *ep;
1939 struct fc_exch *next;
1940
1941 spin_lock_bh(&pool->lock);
1942restart:
1943 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1944 if ((lport == ep->lp) &&
1945 (sid == 0 || sid == ep->sid) &&
1946 (did == 0 || did == ep->did)) {
1947 fc_exch_hold(ep);
1948 spin_unlock_bh(&pool->lock);
1949
1950 fc_exch_reset(ep);
1951
1952 fc_exch_release(ep);
1953 spin_lock_bh(&pool->lock);
1954
1955
1956
1957
1958
1959 goto restart;
1960 }
1961 }
1962 pool->next_index = 0;
1963 pool->left = FC_XID_UNKNOWN;
1964 pool->right = FC_XID_UNKNOWN;
1965 spin_unlock_bh(&pool->lock);
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1980{
1981 struct fc_exch_mgr_anchor *ema;
1982 unsigned int cpu;
1983
1984 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1985 for_each_possible_cpu(cpu)
1986 fc_exch_pool_reset(lport,
1987 per_cpu_ptr(ema->mp->pool, cpu),
1988 sid, did);
1989 }
1990}
1991EXPORT_SYMBOL(fc_exch_mgr_reset);
1992
1993
1994
1995
1996
1997
1998
1999
2000static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
2001{
2002 struct fc_exch_mgr_anchor *ema;
2003
2004 list_for_each_entry(ema, &lport->ema_list, ema_list)
2005 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
2006 return fc_exch_find(ema->mp, xid);
2007 return NULL;
2008}
2009
2010
2011
2012
2013
2014
2015
2016static void fc_exch_els_rec(struct fc_frame *rfp)
2017{
2018 struct fc_lport *lport;
2019 struct fc_frame *fp;
2020 struct fc_exch *ep;
2021 struct fc_els_rec *rp;
2022 struct fc_els_rec_acc *acc;
2023 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2024 enum fc_els_rjt_explan explan;
2025 u32 sid;
2026 u16 xid, rxid, oxid;
2027
2028 lport = fr_dev(rfp);
2029 rp = fc_frame_payload_get(rfp, sizeof(*rp));
2030 explan = ELS_EXPL_INV_LEN;
2031 if (!rp)
2032 goto reject;
2033 sid = ntoh24(rp->rec_s_id);
2034 rxid = ntohs(rp->rec_rx_id);
2035 oxid = ntohs(rp->rec_ox_id);
2036
2037 explan = ELS_EXPL_OXID_RXID;
2038 if (sid == fc_host_port_id(lport->host))
2039 xid = oxid;
2040 else
2041 xid = rxid;
2042 if (xid == FC_XID_UNKNOWN) {
2043 FC_LPORT_DBG(lport,
2044 "REC request from %x: invalid rxid %x oxid %x\n",
2045 sid, rxid, oxid);
2046 goto reject;
2047 }
2048 ep = fc_exch_lookup(lport, xid);
2049 if (!ep) {
2050 FC_LPORT_DBG(lport,
2051 "REC request from %x: rxid %x oxid %x not found\n",
2052 sid, rxid, oxid);
2053 goto reject;
2054 }
2055 FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2056 sid, rxid, oxid);
2057 if (ep->oid != sid || oxid != ep->oxid)
2058 goto rel;
2059 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2060 goto rel;
2061 fp = fc_frame_alloc(lport, sizeof(*acc));
2062 if (!fp) {
2063 FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2064 goto out;
2065 }
2066
2067 acc = fc_frame_payload_get(fp, sizeof(*acc));
2068 memset(acc, 0, sizeof(*acc));
2069 acc->reca_cmd = ELS_LS_ACC;
2070 acc->reca_ox_id = rp->rec_ox_id;
2071 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2072 acc->reca_rx_id = htons(ep->rxid);
2073 if (ep->sid == ep->oid)
2074 hton24(acc->reca_rfid, ep->did);
2075 else
2076 hton24(acc->reca_rfid, ep->sid);
2077 acc->reca_fc4value = htonl(ep->seq.rec_data);
2078 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2079 ESB_ST_SEQ_INIT |
2080 ESB_ST_COMPLETE));
2081 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2082 lport->tt.frame_send(lport, fp);
2083out:
2084 fc_exch_release(ep);
2085 return;
2086
2087rel:
2088 fc_exch_release(ep);
2089reject:
2090 fc_seq_ls_rjt(rfp, reason, explan);
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2102{
2103 struct fc_exch *aborted_ep = arg;
2104 unsigned int op;
2105
2106 if (IS_ERR(fp)) {
2107 int err = PTR_ERR(fp);
2108
2109 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2110 goto cleanup;
2111 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2112 "frame error %d\n", err);
2113 return;
2114 }
2115
2116 op = fc_frame_payload_op(fp);
2117 fc_frame_free(fp);
2118
2119 switch (op) {
2120 case ELS_LS_RJT:
2121 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2122 fallthrough;
2123 case ELS_LS_ACC:
2124 goto cleanup;
2125 default:
2126 FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2127 op);
2128 return;
2129 }
2130
2131cleanup:
2132 fc_exch_done(&aborted_ep->seq);
2133
2134 fc_exch_release(aborted_ep);
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2176 struct fc_frame *fp,
2177 void (*resp)(struct fc_seq *,
2178 struct fc_frame *fp,
2179 void *arg),
2180 void (*destructor)(struct fc_seq *, void *),
2181 void *arg, u32 timer_msec)
2182{
2183 struct fc_exch *ep;
2184 struct fc_seq *sp = NULL;
2185 struct fc_frame_header *fh;
2186 struct fc_fcp_pkt *fsp = NULL;
2187 int rc = 1;
2188
2189 ep = fc_exch_alloc(lport, fp);
2190 if (!ep) {
2191 fc_frame_free(fp);
2192 return NULL;
2193 }
2194 ep->esb_stat |= ESB_ST_SEQ_INIT;
2195 fh = fc_frame_header_get(fp);
2196 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2197 ep->resp = resp;
2198 ep->destructor = destructor;
2199 ep->arg = arg;
2200 ep->r_a_tov = lport->r_a_tov;
2201 ep->lp = lport;
2202 sp = &ep->seq;
2203
2204 ep->fh_type = fh->fh_type;
2205 ep->f_ctl = ntoh24(fh->fh_f_ctl);
2206 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2207 sp->cnt++;
2208
2209 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2210 fsp = fr_fsp(fp);
2211 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2212 }
2213
2214 if (unlikely(lport->tt.frame_send(lport, fp)))
2215 goto err;
2216
2217 if (timer_msec)
2218 fc_exch_timer_set_locked(ep, timer_msec);
2219 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
2220
2221 if (ep->f_ctl & FC_FC_SEQ_INIT)
2222 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2223 spin_unlock_bh(&ep->ex_lock);
2224 return sp;
2225err:
2226 if (fsp)
2227 fc_fcp_ddp_done(fsp);
2228 rc = fc_exch_done_locked(ep);
2229 spin_unlock_bh(&ep->ex_lock);
2230 if (!rc)
2231 fc_exch_delete(ep);
2232 return NULL;
2233}
2234EXPORT_SYMBOL(fc_exch_seq_send);
2235
2236
2237
2238
2239
2240
2241
2242
2243static void fc_exch_rrq(struct fc_exch *ep)
2244{
2245 struct fc_lport *lport;
2246 struct fc_els_rrq *rrq;
2247 struct fc_frame *fp;
2248 u32 did;
2249
2250 lport = ep->lp;
2251
2252 fp = fc_frame_alloc(lport, sizeof(*rrq));
2253 if (!fp)
2254 goto retry;
2255
2256 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2257 memset(rrq, 0, sizeof(*rrq));
2258 rrq->rrq_cmd = ELS_RRQ;
2259 hton24(rrq->rrq_s_id, ep->sid);
2260 rrq->rrq_ox_id = htons(ep->oxid);
2261 rrq->rrq_rx_id = htons(ep->rxid);
2262
2263 did = ep->did;
2264 if (ep->esb_stat & ESB_ST_RESP)
2265 did = ep->sid;
2266
2267 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2268 lport->port_id, FC_TYPE_ELS,
2269 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2270
2271 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2272 lport->e_d_tov))
2273 return;
2274
2275retry:
2276 FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2277 spin_lock_bh(&ep->ex_lock);
2278 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2279 spin_unlock_bh(&ep->ex_lock);
2280
2281 fc_exch_release(ep);
2282 return;
2283 }
2284 ep->esb_stat |= ESB_ST_REC_QUAL;
2285 fc_exch_timer_set_locked(ep, ep->r_a_tov);
2286 spin_unlock_bh(&ep->ex_lock);
2287}
2288
2289
2290
2291
2292
2293static void fc_exch_els_rrq(struct fc_frame *fp)
2294{
2295 struct fc_lport *lport;
2296 struct fc_exch *ep = NULL;
2297 struct fc_els_rrq *rp;
2298 u32 sid;
2299 u16 xid;
2300 enum fc_els_rjt_explan explan;
2301
2302 lport = fr_dev(fp);
2303 rp = fc_frame_payload_get(fp, sizeof(*rp));
2304 explan = ELS_EXPL_INV_LEN;
2305 if (!rp)
2306 goto reject;
2307
2308
2309
2310
2311 sid = ntoh24(rp->rrq_s_id);
2312 xid = fc_host_port_id(lport->host) == sid ?
2313 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2314 ep = fc_exch_lookup(lport, xid);
2315 explan = ELS_EXPL_OXID_RXID;
2316 if (!ep)
2317 goto reject;
2318 spin_lock_bh(&ep->ex_lock);
2319 FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2320 sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2321 if (ep->oxid != ntohs(rp->rrq_ox_id))
2322 goto unlock_reject;
2323 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2324 ep->rxid != FC_XID_UNKNOWN)
2325 goto unlock_reject;
2326 explan = ELS_EXPL_SID;
2327 if (ep->sid != sid)
2328 goto unlock_reject;
2329
2330
2331
2332
2333 if (ep->esb_stat & ESB_ST_REC_QUAL) {
2334 ep->esb_stat &= ~ESB_ST_REC_QUAL;
2335 atomic_dec(&ep->ex_refcnt);
2336 }
2337 if (ep->esb_stat & ESB_ST_COMPLETE)
2338 fc_exch_timer_cancel(ep);
2339
2340 spin_unlock_bh(&ep->ex_lock);
2341
2342
2343
2344
2345 fc_seq_ls_acc(fp);
2346 goto out;
2347
2348unlock_reject:
2349 spin_unlock_bh(&ep->ex_lock);
2350reject:
2351 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2352out:
2353 if (ep)
2354 fc_exch_release(ep);
2355}
2356
2357
2358
2359
2360
2361void fc_exch_update_stats(struct fc_lport *lport)
2362{
2363 struct fc_host_statistics *st;
2364 struct fc_exch_mgr_anchor *ema;
2365 struct fc_exch_mgr *mp;
2366
2367 st = &lport->host_stats;
2368
2369 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2370 mp = ema->mp;
2371 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2372 st->fc_no_free_exch_xid +=
2373 atomic_read(&mp->stats.no_free_exch_xid);
2374 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2375 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2376 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2377 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2378 }
2379}
2380EXPORT_SYMBOL(fc_exch_update_stats);
2381
2382
2383
2384
2385
2386
2387
2388struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2389 struct fc_exch_mgr *mp,
2390 bool (*match)(struct fc_frame *))
2391{
2392 struct fc_exch_mgr_anchor *ema;
2393
2394 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2395 if (!ema)
2396 return ema;
2397
2398 ema->mp = mp;
2399 ema->match = match;
2400
2401 list_add_tail(&ema->ema_list, &lport->ema_list);
2402 kref_get(&mp->kref);
2403 return ema;
2404}
2405EXPORT_SYMBOL(fc_exch_mgr_add);
2406
2407
2408
2409
2410
2411static void fc_exch_mgr_destroy(struct kref *kref)
2412{
2413 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2414
2415 mempool_destroy(mp->ep_pool);
2416 free_percpu(mp->pool);
2417 kfree(mp);
2418}
2419
2420
2421
2422
2423
2424void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2425{
2426
2427 list_del(&ema->ema_list);
2428 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2429 kfree(ema);
2430}
2431EXPORT_SYMBOL(fc_exch_mgr_del);
2432
2433
2434
2435
2436
2437
2438int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2439{
2440 struct fc_exch_mgr_anchor *ema, *tmp;
2441
2442 list_for_each_entry(ema, &src->ema_list, ema_list) {
2443 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2444 goto err;
2445 }
2446 return 0;
2447err:
2448 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2449 fc_exch_mgr_del(ema);
2450 return -ENOMEM;
2451}
2452EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2463 enum fc_class class,
2464 u16 min_xid, u16 max_xid,
2465 bool (*match)(struct fc_frame *))
2466{
2467 struct fc_exch_mgr *mp;
2468 u16 pool_exch_range;
2469 size_t pool_size;
2470 unsigned int cpu;
2471 struct fc_exch_pool *pool;
2472
2473 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2474 (min_xid & fc_cpu_mask) != 0) {
2475 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2476 min_xid, max_xid);
2477 return NULL;
2478 }
2479
2480
2481
2482
2483 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2484 if (!mp)
2485 return NULL;
2486
2487 mp->class = class;
2488 mp->lport = lport;
2489
2490 mp->min_xid = min_xid;
2491
2492
2493 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2494 sizeof(struct fc_exch *);
2495 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2496 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2497 min_xid - 1;
2498 } else {
2499 mp->max_xid = max_xid;
2500 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2501 (fc_cpu_mask + 1);
2502 }
2503
2504 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2505 if (!mp->ep_pool)
2506 goto free_mp;
2507
2508
2509
2510
2511
2512
2513 mp->pool_max_index = pool_exch_range - 1;
2514
2515
2516
2517
2518 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2519 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2520 if (!mp->pool)
2521 goto free_mempool;
2522 for_each_possible_cpu(cpu) {
2523 pool = per_cpu_ptr(mp->pool, cpu);
2524 pool->next_index = 0;
2525 pool->left = FC_XID_UNKNOWN;
2526 pool->right = FC_XID_UNKNOWN;
2527 spin_lock_init(&pool->lock);
2528 INIT_LIST_HEAD(&pool->ex_list);
2529 }
2530
2531 kref_init(&mp->kref);
2532 if (!fc_exch_mgr_add(lport, mp, match)) {
2533 free_percpu(mp->pool);
2534 goto free_mempool;
2535 }
2536
2537
2538
2539
2540
2541
2542 kref_put(&mp->kref, fc_exch_mgr_destroy);
2543 return mp;
2544
2545free_mempool:
2546 mempool_destroy(mp->ep_pool);
2547free_mp:
2548 kfree(mp);
2549 return NULL;
2550}
2551EXPORT_SYMBOL(fc_exch_mgr_alloc);
2552
2553
2554
2555
2556
2557void fc_exch_mgr_free(struct fc_lport *lport)
2558{
2559 struct fc_exch_mgr_anchor *ema, *next;
2560
2561 flush_workqueue(fc_exch_workqueue);
2562 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2563 fc_exch_mgr_del(ema);
2564}
2565EXPORT_SYMBOL(fc_exch_mgr_free);
2566
2567
2568
2569
2570
2571
2572
2573
2574static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2575 struct fc_lport *lport,
2576 struct fc_frame_header *fh)
2577{
2578 struct fc_exch_mgr_anchor *ema;
2579 u16 xid;
2580
2581 if (f_ctl & FC_FC_EX_CTX)
2582 xid = ntohs(fh->fh_ox_id);
2583 else {
2584 xid = ntohs(fh->fh_rx_id);
2585 if (xid == FC_XID_UNKNOWN)
2586 return list_entry(lport->ema_list.prev,
2587 typeof(*ema), ema_list);
2588 }
2589
2590 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2591 if ((xid >= ema->mp->min_xid) &&
2592 (xid <= ema->mp->max_xid))
2593 return ema;
2594 }
2595 return NULL;
2596}
2597
2598
2599
2600
2601
2602void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2603{
2604 struct fc_frame_header *fh = fc_frame_header_get(fp);
2605 struct fc_exch_mgr_anchor *ema;
2606 u32 f_ctl;
2607
2608
2609 if (!lport || lport->state == LPORT_ST_DISABLED) {
2610 FC_LIBFC_DBG("Receiving frames for an lport that "
2611 "has not been initialized correctly\n");
2612 fc_frame_free(fp);
2613 return;
2614 }
2615
2616 f_ctl = ntoh24(fh->fh_f_ctl);
2617 ema = fc_find_ema(f_ctl, lport, fh);
2618 if (!ema) {
2619 FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2620 "fc_ctl <0x%x>, xid <0x%x>\n",
2621 f_ctl,
2622 (f_ctl & FC_FC_EX_CTX) ?
2623 ntohs(fh->fh_ox_id) :
2624 ntohs(fh->fh_rx_id));
2625 fc_frame_free(fp);
2626 return;
2627 }
2628
2629
2630
2631
2632 switch (fr_eof(fp)) {
2633 case FC_EOF_T:
2634 if (f_ctl & FC_FC_END_SEQ)
2635 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2636 fallthrough;
2637 case FC_EOF_N:
2638 if (fh->fh_type == FC_TYPE_BLS)
2639 fc_exch_recv_bls(ema->mp, fp);
2640 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2641 FC_FC_EX_CTX)
2642 fc_exch_recv_seq_resp(ema->mp, fp);
2643 else if (f_ctl & FC_FC_SEQ_CTX)
2644 fc_exch_recv_resp(ema->mp, fp);
2645 else
2646 fc_exch_recv_req(lport, ema->mp, fp);
2647 break;
2648 default:
2649 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2650 fr_eof(fp));
2651 fc_frame_free(fp);
2652 }
2653}
2654EXPORT_SYMBOL(fc_exch_recv);
2655
2656
2657
2658
2659
2660int fc_exch_init(struct fc_lport *lport)
2661{
2662 if (!lport->tt.exch_mgr_reset)
2663 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2664
2665 return 0;
2666}
2667EXPORT_SYMBOL(fc_exch_init);
2668
2669
2670
2671
2672int fc_setup_exch_mgr(void)
2673{
2674 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2675 0, SLAB_HWCACHE_ALIGN, NULL);
2676 if (!fc_em_cachep)
2677 return -ENOMEM;
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2694 fc_cpu_mask = (1 << fc_cpu_order) - 1;
2695
2696 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2697 if (!fc_exch_workqueue)
2698 goto err;
2699 return 0;
2700err:
2701 kmem_cache_destroy(fc_em_cachep);
2702 return -ENOMEM;
2703}
2704
2705
2706
2707
2708void fc_destroy_exch_mgr(void)
2709{
2710 destroy_workqueue(fc_exch_workqueue);
2711 kmem_cache_destroy(fc_em_cachep);
2712}
2713