1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/timer.h>
27#include <linux/slab.h>
28#include <linux/err.h>
29#include <linux/export.h>
30
31#include <scsi/fc/fc_fc2.h>
32
33#include <scsi/libfc.h>
34#include <scsi/fc_encode.h>
35
36#include "fc_libfc.h"
37
38u16 fc_cpu_mask;
39EXPORT_SYMBOL(fc_cpu_mask);
40static u16 fc_cpu_order;
41static struct kmem_cache *fc_em_cachep;
42static struct workqueue_struct *fc_exch_workqueue;
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68struct fc_exch_pool {
69 spinlock_t lock;
70 struct list_head ex_list;
71 u16 next_index;
72 u16 total_exches;
73
74
75 u16 left;
76 u16 right;
77} ____cacheline_aligned_in_smp;
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct fc_exch_mgr {
94 struct fc_exch_pool __percpu *pool;
95 mempool_t *ep_pool;
96 enum fc_class class;
97 struct kref kref;
98 u16 min_xid;
99 u16 max_xid;
100 u16 pool_max_index;
101
102 struct {
103 atomic_t no_free_exch;
104 atomic_t no_free_exch_xid;
105 atomic_t xid_not_found;
106 atomic_t xid_busy;
107 atomic_t seq_not_found;
108 atomic_t non_bls_resp;
109 } stats;
110};
111
112
113
114
115
116
117
118
119
120
121
122
123
124struct fc_exch_mgr_anchor {
125 struct list_head ema_list;
126 struct fc_exch_mgr *mp;
127 bool (*match)(struct fc_frame *);
128};
129
130static void fc_exch_rrq(struct fc_exch *);
131static void fc_seq_ls_acc(struct fc_frame *);
132static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
133 enum fc_els_rjt_explan);
134static void fc_exch_els_rec(struct fc_frame *);
135static void fc_exch_els_rrq(struct fc_frame *);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
220
221
222
223
224
225
226
227
228
229
230static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
231 unsigned int max_index)
232{
233 const char *name = NULL;
234
235 if (op < max_index)
236 name = table[op];
237 if (!name)
238 name = "unknown";
239 return name;
240}
241
242
243
244
245
246static const char *fc_exch_rctl_name(unsigned int op)
247{
248 return fc_exch_name_lookup(op, fc_exch_rctl_names,
249 ARRAY_SIZE(fc_exch_rctl_names));
250}
251
252
253
254
255
256static inline void fc_exch_hold(struct fc_exch *ep)
257{
258 atomic_inc(&ep->ex_refcnt);
259}
260
261
262
263
264
265
266
267
268
269
270
271static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
272 u32 f_ctl)
273{
274 struct fc_frame_header *fh = fc_frame_header_get(fp);
275 u16 fill;
276
277 fr_sof(fp) = ep->class;
278 if (ep->seq.cnt)
279 fr_sof(fp) = fc_sof_normal(ep->class);
280
281 if (f_ctl & FC_FC_END_SEQ) {
282 fr_eof(fp) = FC_EOF_T;
283 if (fc_sof_needs_ack(ep->class))
284 fr_eof(fp) = FC_EOF_N;
285
286
287
288
289
290
291
292
293
294 fill = fr_len(fp) & 3;
295 if (fill) {
296 fill = 4 - fill;
297
298 skb_put(fp_skb(fp), fill);
299 hton24(fh->fh_f_ctl, f_ctl | fill);
300 }
301 } else {
302 WARN_ON(fr_len(fp) % 4 != 0);
303 fr_eof(fp) = FC_EOF_N;
304 }
305
306
307
308
309
310 fh->fh_ox_id = htons(ep->oxid);
311 fh->fh_rx_id = htons(ep->rxid);
312 fh->fh_seq_id = ep->seq.id;
313 fh->fh_seq_cnt = htons(ep->seq.cnt);
314}
315
316
317
318
319
320
321
322
323static void fc_exch_release(struct fc_exch *ep)
324{
325 struct fc_exch_mgr *mp;
326
327 if (atomic_dec_and_test(&ep->ex_refcnt)) {
328 mp = ep->em;
329 if (ep->destructor)
330 ep->destructor(&ep->seq, ep->arg);
331 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
332 mempool_free(ep, mp->ep_pool);
333 }
334}
335
336
337
338
339
340static inline void fc_exch_timer_cancel(struct fc_exch *ep)
341{
342 if (cancel_delayed_work(&ep->timeout_work)) {
343 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
344 atomic_dec(&ep->ex_refcnt);
345 }
346}
347
348
349
350
351
352
353
354
355
356
357static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
358 unsigned int timer_msec)
359{
360 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
361 return;
362
363 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
364
365 if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
366 msecs_to_jiffies(timer_msec)))
367 fc_exch_hold(ep);
368}
369
370
371
372
373
374
375static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
376{
377 spin_lock_bh(&ep->ex_lock);
378 fc_exch_timer_set_locked(ep, timer_msec);
379 spin_unlock_bh(&ep->ex_lock);
380}
381
382
383
384
385
386static int fc_exch_done_locked(struct fc_exch *ep)
387{
388 int rc = 1;
389
390
391
392
393
394
395
396 ep->resp = NULL;
397 if (ep->state & FC_EX_DONE)
398 return rc;
399 ep->esb_stat |= ESB_ST_COMPLETE;
400
401 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
402 ep->state |= FC_EX_DONE;
403 fc_exch_timer_cancel(ep);
404 rc = 0;
405 }
406 return rc;
407}
408
409
410
411
412
413
414
415
416
417
418static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
419 u16 index)
420{
421 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
422 return exches[index];
423}
424
425
426
427
428
429
430
431static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
432 struct fc_exch *ep)
433{
434 ((struct fc_exch **)(pool + 1))[index] = ep;
435}
436
437
438
439
440
441static void fc_exch_delete(struct fc_exch *ep)
442{
443 struct fc_exch_pool *pool;
444 u16 index;
445
446 pool = ep->pool;
447 spin_lock_bh(&pool->lock);
448 WARN_ON(pool->total_exches <= 0);
449 pool->total_exches--;
450
451
452 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
453 if (pool->left == FC_XID_UNKNOWN)
454 pool->left = index;
455 else if (pool->right == FC_XID_UNKNOWN)
456 pool->right = index;
457 else
458 pool->next_index = index;
459
460 fc_exch_ptr_set(pool, index, NULL);
461 list_del(&ep->ex_list);
462 spin_unlock_bh(&pool->lock);
463 fc_exch_release(ep);
464}
465
466static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
467 struct fc_frame *fp)
468{
469 struct fc_exch *ep;
470 struct fc_frame_header *fh = fc_frame_header_get(fp);
471 int error;
472 u32 f_ctl;
473 u8 fh_type = fh->fh_type;
474
475 ep = fc_seq_exch(sp);
476 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
477
478 f_ctl = ntoh24(fh->fh_f_ctl);
479 fc_exch_setup_hdr(ep, fp, f_ctl);
480 fr_encaps(fp) = ep->encaps;
481
482
483
484
485
486
487 if (fr_max_payload(fp))
488 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
489 fr_max_payload(fp));
490 else
491 sp->cnt++;
492
493
494
495
496 error = lport->tt.frame_send(lport, fp);
497
498 if (fh_type == FC_TYPE_BLS)
499 goto out;
500
501
502
503
504
505
506 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;
507 if (f_ctl & FC_FC_SEQ_INIT)
508 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
509out:
510 return error;
511}
512
513
514
515
516
517
518
519static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
520 struct fc_frame *fp)
521{
522 struct fc_exch *ep;
523 int error;
524 ep = fc_seq_exch(sp);
525 spin_lock_bh(&ep->ex_lock);
526 error = fc_seq_send_locked(lport, sp, fp);
527 spin_unlock_bh(&ep->ex_lock);
528 return error;
529}
530
531
532
533
534
535
536
537
538
539
540static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
541{
542 struct fc_seq *sp;
543
544 sp = &ep->seq;
545 sp->ssb_stat = 0;
546 sp->cnt = 0;
547 sp->id = seq_id;
548 return sp;
549}
550
551
552
553
554
555
556static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
557{
558 struct fc_exch *ep = fc_seq_exch(sp);
559
560 sp = fc_seq_alloc(ep, ep->seq_id++);
561 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
562 ep->f_ctl, sp->id);
563 return sp;
564}
565
566
567
568
569
570
571static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
572{
573 struct fc_exch *ep = fc_seq_exch(sp);
574
575 spin_lock_bh(&ep->ex_lock);
576 sp = fc_seq_start_next_locked(sp);
577 spin_unlock_bh(&ep->ex_lock);
578
579 return sp;
580}
581
582
583
584
585static void fc_seq_set_resp(struct fc_seq *sp,
586 void (*resp)(struct fc_seq *, struct fc_frame *,
587 void *),
588 void *arg)
589{
590 struct fc_exch *ep = fc_seq_exch(sp);
591
592 spin_lock_bh(&ep->ex_lock);
593 ep->resp = resp;
594 ep->arg = arg;
595 spin_unlock_bh(&ep->ex_lock);
596}
597
598
599
600
601
602
603
604
605
606
607static int fc_exch_abort_locked(struct fc_exch *ep,
608 unsigned int timer_msec)
609{
610 struct fc_seq *sp;
611 struct fc_frame *fp;
612 int error;
613
614 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
615 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
616 return -ENXIO;
617
618
619
620
621 sp = fc_seq_start_next_locked(&ep->seq);
622 if (!sp)
623 return -ENOMEM;
624
625 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
626 if (timer_msec)
627 fc_exch_timer_set_locked(ep, timer_msec);
628
629
630
631
632
633 if (!ep->sid)
634 return 0;
635
636
637
638
639 fp = fc_frame_alloc(ep->lp, 0);
640 if (fp) {
641 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
642 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
643 error = fc_seq_send_locked(ep->lp, sp, fp);
644 } else
645 error = -ENOBUFS;
646 return error;
647}
648
649
650
651
652
653
654
655
656
657
658static int fc_seq_exch_abort(const struct fc_seq *req_sp,
659 unsigned int timer_msec)
660{
661 struct fc_exch *ep;
662 int error;
663
664 ep = fc_seq_exch(req_sp);
665 spin_lock_bh(&ep->ex_lock);
666 error = fc_exch_abort_locked(ep, timer_msec);
667 spin_unlock_bh(&ep->ex_lock);
668 return error;
669}
670
671
672
673
674
675static void fc_exch_timeout(struct work_struct *work)
676{
677 struct fc_exch *ep = container_of(work, struct fc_exch,
678 timeout_work.work);
679 struct fc_seq *sp = &ep->seq;
680 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
681 void *arg;
682 u32 e_stat;
683 int rc = 1;
684
685 FC_EXCH_DBG(ep, "Exchange timed out\n");
686
687 spin_lock_bh(&ep->ex_lock);
688 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
689 goto unlock;
690
691 e_stat = ep->esb_stat;
692 if (e_stat & ESB_ST_COMPLETE) {
693 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
694 spin_unlock_bh(&ep->ex_lock);
695 if (e_stat & ESB_ST_REC_QUAL)
696 fc_exch_rrq(ep);
697 goto done;
698 } else {
699 resp = ep->resp;
700 arg = ep->arg;
701 ep->resp = NULL;
702 if (e_stat & ESB_ST_ABNORMAL)
703 rc = fc_exch_done_locked(ep);
704 spin_unlock_bh(&ep->ex_lock);
705 if (!rc)
706 fc_exch_delete(ep);
707 if (resp)
708 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
709 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
710 goto done;
711 }
712unlock:
713 spin_unlock_bh(&ep->ex_lock);
714done:
715
716
717
718 fc_exch_release(ep);
719}
720
721
722
723
724
725
726
727
728static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
729 struct fc_exch_mgr *mp)
730{
731 struct fc_exch *ep;
732 unsigned int cpu;
733 u16 index;
734 struct fc_exch_pool *pool;
735
736
737 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
738 if (!ep) {
739 atomic_inc(&mp->stats.no_free_exch);
740 goto out;
741 }
742 memset(ep, 0, sizeof(*ep));
743
744 cpu = get_cpu();
745 pool = per_cpu_ptr(mp->pool, cpu);
746 spin_lock_bh(&pool->lock);
747 put_cpu();
748
749
750 if (pool->left != FC_XID_UNKNOWN) {
751 index = pool->left;
752 pool->left = FC_XID_UNKNOWN;
753 goto hit;
754 }
755 if (pool->right != FC_XID_UNKNOWN) {
756 index = pool->right;
757 pool->right = FC_XID_UNKNOWN;
758 goto hit;
759 }
760
761 index = pool->next_index;
762
763 while (fc_exch_ptr_get(pool, index)) {
764 index = index == mp->pool_max_index ? 0 : index + 1;
765 if (index == pool->next_index)
766 goto err;
767 }
768 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
769hit:
770 fc_exch_hold(ep);
771 spin_lock_init(&ep->ex_lock);
772
773
774
775
776
777 spin_lock_bh(&ep->ex_lock);
778
779 fc_exch_ptr_set(pool, index, ep);
780 list_add_tail(&ep->ex_list, &pool->ex_list);
781 fc_seq_alloc(ep, ep->seq_id++);
782 pool->total_exches++;
783 spin_unlock_bh(&pool->lock);
784
785
786
787
788 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
789 ep->em = mp;
790 ep->pool = pool;
791 ep->lp = lport;
792 ep->f_ctl = FC_FC_FIRST_SEQ;
793 ep->rxid = FC_XID_UNKNOWN;
794 ep->class = mp->class;
795 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
796out:
797 return ep;
798err:
799 spin_unlock_bh(&pool->lock);
800 atomic_inc(&mp->stats.no_free_exch_xid);
801 mempool_free(ep, mp->ep_pool);
802 return NULL;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
817 struct fc_frame *fp)
818{
819 struct fc_exch_mgr_anchor *ema;
820
821 list_for_each_entry(ema, &lport->ema_list, ema_list)
822 if (!ema->match || ema->match(fp))
823 return fc_exch_em_alloc(lport, ema->mp);
824 return NULL;
825}
826
827
828
829
830
831
832static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
833{
834 struct fc_exch_pool *pool;
835 struct fc_exch *ep = NULL;
836 u16 cpu = xid & fc_cpu_mask;
837
838 if (xid == FC_XID_UNKNOWN)
839 return NULL;
840
841 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
842 printk_ratelimited(KERN_ERR
843 "libfc: lookup request for XID = %d, "
844 "indicates invalid CPU %d\n", xid, cpu);
845 return NULL;
846 }
847
848 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
849 pool = per_cpu_ptr(mp->pool, cpu);
850 spin_lock_bh(&pool->lock);
851 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
852 if (ep && ep->xid == xid)
853 fc_exch_hold(ep);
854 spin_unlock_bh(&pool->lock);
855 }
856 return ep;
857}
858
859
860
861
862
863
864
865static void fc_exch_done(struct fc_seq *sp)
866{
867 struct fc_exch *ep = fc_seq_exch(sp);
868 int rc;
869
870 spin_lock_bh(&ep->ex_lock);
871 rc = fc_exch_done_locked(ep);
872 spin_unlock_bh(&ep->ex_lock);
873 if (!rc)
874 fc_exch_delete(ep);
875}
876
877
878
879
880
881
882
883
884
885static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
886 struct fc_exch_mgr *mp,
887 struct fc_frame *fp)
888{
889 struct fc_exch *ep;
890 struct fc_frame_header *fh;
891
892 ep = fc_exch_alloc(lport, fp);
893 if (ep) {
894 ep->class = fc_frame_class(fp);
895
896
897
898
899 ep->f_ctl |= FC_FC_EX_CTX;
900 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
901 fh = fc_frame_header_get(fp);
902 ep->sid = ntoh24(fh->fh_d_id);
903 ep->did = ntoh24(fh->fh_s_id);
904 ep->oid = ep->did;
905
906
907
908
909
910
911 ep->rxid = ep->xid;
912 ep->oxid = ntohs(fh->fh_ox_id);
913 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
914 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
915 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
916
917 fc_exch_hold(ep);
918 spin_unlock_bh(&ep->ex_lock);
919 }
920 return ep;
921}
922
923
924
925
926
927
928
929
930
931
932
933static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
934 struct fc_exch_mgr *mp,
935 struct fc_frame *fp)
936{
937 struct fc_frame_header *fh = fc_frame_header_get(fp);
938 struct fc_exch *ep = NULL;
939 struct fc_seq *sp = NULL;
940 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
941 u32 f_ctl;
942 u16 xid;
943
944 f_ctl = ntoh24(fh->fh_f_ctl);
945 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
946
947
948
949
950 if (f_ctl & FC_FC_EX_CTX) {
951 xid = ntohs(fh->fh_ox_id);
952 ep = fc_exch_find(mp, xid);
953 if (!ep) {
954 atomic_inc(&mp->stats.xid_not_found);
955 reject = FC_RJT_OX_ID;
956 goto out;
957 }
958 if (ep->rxid == FC_XID_UNKNOWN)
959 ep->rxid = ntohs(fh->fh_rx_id);
960 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
961 reject = FC_RJT_OX_ID;
962 goto rel;
963 }
964 } else {
965 xid = ntohs(fh->fh_rx_id);
966
967
968
969
970
971
972 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
973 fc_frame_payload_op(fp) == ELS_TEST) {
974 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
975 xid = FC_XID_UNKNOWN;
976 }
977
978
979
980
981 ep = fc_exch_find(mp, xid);
982 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
983 if (ep) {
984 atomic_inc(&mp->stats.xid_busy);
985 reject = FC_RJT_RX_ID;
986 goto rel;
987 }
988 ep = fc_exch_resp(lport, mp, fp);
989 if (!ep) {
990 reject = FC_RJT_EXCH_EST;
991 goto out;
992 }
993 xid = ep->xid;
994 } else if (!ep) {
995 atomic_inc(&mp->stats.xid_not_found);
996 reject = FC_RJT_RX_ID;
997 goto out;
998 }
999 }
1000
1001
1002
1003
1004
1005 if (fc_sof_is_init(fr_sof(fp))) {
1006 sp = &ep->seq;
1007 sp->ssb_stat |= SSB_ST_RESP;
1008 sp->id = fh->fh_seq_id;
1009 } else {
1010 sp = &ep->seq;
1011 if (sp->id != fh->fh_seq_id) {
1012 atomic_inc(&mp->stats.seq_not_found);
1013 if (f_ctl & FC_FC_END_SEQ) {
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 spin_lock_bh(&ep->ex_lock);
1029 sp->ssb_stat |= SSB_ST_RESP;
1030 sp->id = fh->fh_seq_id;
1031 spin_unlock_bh(&ep->ex_lock);
1032 } else {
1033
1034 reject = FC_RJT_SEQ_ID;
1035 goto rel;
1036 }
1037 }
1038 }
1039 WARN_ON(ep != fc_seq_exch(sp));
1040
1041 if (f_ctl & FC_FC_SEQ_INIT)
1042 ep->esb_stat |= ESB_ST_SEQ_INIT;
1043
1044 fr_seq(fp) = sp;
1045out:
1046 return reject;
1047rel:
1048 fc_exch_done(&ep->seq);
1049 fc_exch_release(ep);
1050 return reject;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1062 struct fc_frame *fp)
1063{
1064 struct fc_frame_header *fh = fc_frame_header_get(fp);
1065 struct fc_exch *ep;
1066 struct fc_seq *sp = NULL;
1067 u32 f_ctl;
1068 u16 xid;
1069
1070 f_ctl = ntoh24(fh->fh_f_ctl);
1071 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1072 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1073 ep = fc_exch_find(mp, xid);
1074 if (!ep)
1075 return NULL;
1076 if (ep->seq.id == fh->fh_seq_id) {
1077
1078
1079
1080 sp = &ep->seq;
1081 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1082 ep->rxid == FC_XID_UNKNOWN) {
1083 ep->rxid = ntohs(fh->fh_rx_id);
1084 }
1085 }
1086 fc_exch_release(ep);
1087 return sp;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098static void fc_exch_set_addr(struct fc_exch *ep,
1099 u32 orig_id, u32 resp_id)
1100{
1101 ep->oid = orig_id;
1102 if (ep->esb_stat & ESB_ST_RESP) {
1103 ep->sid = resp_id;
1104 ep->did = orig_id;
1105 } else {
1106 ep->sid = orig_id;
1107 ep->did = resp_id;
1108 }
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1121 struct fc_seq_els_data *els_data)
1122{
1123 switch (els_cmd) {
1124 case ELS_LS_RJT:
1125 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1126 break;
1127 case ELS_LS_ACC:
1128 fc_seq_ls_acc(fp);
1129 break;
1130 case ELS_RRQ:
1131 fc_exch_els_rrq(fp);
1132 break;
1133 case ELS_REC:
1134 fc_exch_els_rec(fp);
1135 break;
1136 default:
1137 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1138 }
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1149 enum fc_rctl rctl, enum fc_fh_type fh_type)
1150{
1151 u32 f_ctl;
1152 struct fc_exch *ep = fc_seq_exch(sp);
1153
1154 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1155 f_ctl |= ep->f_ctl;
1156 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1157 fc_seq_send_locked(ep->lp, sp, fp);
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1168{
1169 struct fc_frame *fp;
1170 struct fc_frame_header *rx_fh;
1171 struct fc_frame_header *fh;
1172 struct fc_exch *ep = fc_seq_exch(sp);
1173 struct fc_lport *lport = ep->lp;
1174 unsigned int f_ctl;
1175
1176
1177
1178
1179 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1180 fp = fc_frame_alloc(lport, 0);
1181 if (!fp)
1182 return;
1183
1184 fh = fc_frame_header_get(fp);
1185 fh->fh_r_ctl = FC_RCTL_ACK_1;
1186 fh->fh_type = FC_TYPE_BLS;
1187
1188
1189
1190
1191
1192
1193
1194
1195 rx_fh = fc_frame_header_get(rx_fp);
1196 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1197 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1198 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1199 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1200 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1201 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1202 hton24(fh->fh_f_ctl, f_ctl);
1203
1204 fc_exch_setup_hdr(ep, fp, f_ctl);
1205 fh->fh_seq_id = rx_fh->fh_seq_id;
1206 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1207 fh->fh_parm_offset = htonl(1);
1208
1209 fr_sof(fp) = fr_sof(rx_fp);
1210 if (f_ctl & FC_FC_END_SEQ)
1211 fr_eof(fp) = FC_EOF_T;
1212 else
1213 fr_eof(fp) = FC_EOF_N;
1214
1215 lport->tt.frame_send(lport, fp);
1216 }
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1228 enum fc_ba_rjt_reason reason,
1229 enum fc_ba_rjt_explan explan)
1230{
1231 struct fc_frame *fp;
1232 struct fc_frame_header *rx_fh;
1233 struct fc_frame_header *fh;
1234 struct fc_ba_rjt *rp;
1235 struct fc_lport *lport;
1236 unsigned int f_ctl;
1237
1238 lport = fr_dev(rx_fp);
1239 fp = fc_frame_alloc(lport, sizeof(*rp));
1240 if (!fp)
1241 return;
1242 fh = fc_frame_header_get(fp);
1243 rx_fh = fc_frame_header_get(rx_fp);
1244
1245 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1246
1247 rp = fc_frame_payload_get(fp, sizeof(*rp));
1248 rp->br_reason = reason;
1249 rp->br_explan = explan;
1250
1251
1252
1253
1254 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1255 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1256 fh->fh_ox_id = rx_fh->fh_ox_id;
1257 fh->fh_rx_id = rx_fh->fh_rx_id;
1258 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1259 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1260 fh->fh_type = FC_TYPE_BLS;
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1271 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1272 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1273 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1274 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1275 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1276 f_ctl &= ~FC_FC_FIRST_SEQ;
1277 hton24(fh->fh_f_ctl, f_ctl);
1278
1279 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1280 fr_eof(fp) = FC_EOF_T;
1281 if (fc_sof_needs_ack(fr_sof(fp)))
1282 fr_eof(fp) = FC_EOF_N;
1283
1284 lport->tt.frame_send(lport, fp);
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1297{
1298 struct fc_frame *fp;
1299 struct fc_ba_acc *ap;
1300 struct fc_frame_header *fh;
1301 struct fc_seq *sp;
1302
1303 if (!ep)
1304 goto reject;
1305 spin_lock_bh(&ep->ex_lock);
1306 if (ep->esb_stat & ESB_ST_COMPLETE) {
1307 spin_unlock_bh(&ep->ex_lock);
1308 goto reject;
1309 }
1310 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1311 fc_exch_hold(ep);
1312 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1313 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1314
1315 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1316 if (!fp) {
1317 spin_unlock_bh(&ep->ex_lock);
1318 goto free;
1319 }
1320 fh = fc_frame_header_get(fp);
1321 ap = fc_frame_payload_get(fp, sizeof(*ap));
1322 memset(ap, 0, sizeof(*ap));
1323 sp = &ep->seq;
1324 ap->ba_high_seq_cnt = htons(0xffff);
1325 if (sp->ssb_stat & SSB_ST_RESP) {
1326 ap->ba_seq_id = sp->id;
1327 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1328 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1329 ap->ba_low_seq_cnt = htons(sp->cnt);
1330 }
1331 sp = fc_seq_start_next_locked(sp);
1332 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1333 spin_unlock_bh(&ep->ex_lock);
1334 fc_frame_free(rx_fp);
1335 return;
1336
1337reject:
1338 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1339free:
1340 fc_frame_free(rx_fp);
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1353{
1354 struct fc_exch_mgr_anchor *ema;
1355
1356 WARN_ON(lport != fr_dev(fp));
1357 WARN_ON(fr_seq(fp));
1358 fr_seq(fp) = NULL;
1359
1360 list_for_each_entry(ema, &lport->ema_list, ema_list)
1361 if ((!ema->match || ema->match(fp)) &&
1362 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1363 break;
1364 return fr_seq(fp);
1365}
1366
1367
1368
1369
1370
1371static void fc_seq_release(struct fc_seq *sp)
1372{
1373 fc_exch_release(fc_seq_exch(sp));
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1386 struct fc_frame *fp)
1387{
1388 struct fc_frame_header *fh = fc_frame_header_get(fp);
1389 struct fc_seq *sp = NULL;
1390 struct fc_exch *ep = NULL;
1391 enum fc_pf_rjt_reason reject;
1392
1393
1394
1395
1396 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1397 if (!lport) {
1398 fc_frame_free(fp);
1399 return;
1400 }
1401 fr_dev(fp) = lport;
1402
1403 BUG_ON(fr_seq(fp));
1404
1405
1406
1407
1408
1409 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1410 return lport->tt.lport_recv(lport, fp);
1411
1412 reject = fc_seq_lookup_recip(lport, mp, fp);
1413 if (reject == FC_RJT_NONE) {
1414 sp = fr_seq(fp);
1415 ep = fc_seq_exch(sp);
1416 fc_seq_send_ack(sp, fp);
1417 ep->encaps = fr_encaps(fp);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 if (ep->resp)
1431 ep->resp(sp, fp, ep->arg);
1432 else
1433 lport->tt.lport_recv(lport, fp);
1434 fc_exch_release(ep);
1435 } else {
1436 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1437 reject);
1438 fc_frame_free(fp);
1439 }
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1450{
1451 struct fc_frame_header *fh = fc_frame_header_get(fp);
1452 struct fc_seq *sp;
1453 struct fc_exch *ep;
1454 enum fc_sof sof;
1455 u32 f_ctl;
1456 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1457 void *ex_resp_arg;
1458 int rc;
1459
1460 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1461 if (!ep) {
1462 atomic_inc(&mp->stats.xid_not_found);
1463 goto out;
1464 }
1465 if (ep->esb_stat & ESB_ST_COMPLETE) {
1466 atomic_inc(&mp->stats.xid_not_found);
1467 goto rel;
1468 }
1469 if (ep->rxid == FC_XID_UNKNOWN)
1470 ep->rxid = ntohs(fh->fh_rx_id);
1471 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1472 atomic_inc(&mp->stats.xid_not_found);
1473 goto rel;
1474 }
1475 if (ep->did != ntoh24(fh->fh_s_id) &&
1476 ep->did != FC_FID_FLOGI) {
1477 atomic_inc(&mp->stats.xid_not_found);
1478 goto rel;
1479 }
1480 sof = fr_sof(fp);
1481 sp = &ep->seq;
1482 if (fc_sof_is_init(sof)) {
1483 sp->ssb_stat |= SSB_ST_RESP;
1484 sp->id = fh->fh_seq_id;
1485 } else if (sp->id != fh->fh_seq_id) {
1486 atomic_inc(&mp->stats.seq_not_found);
1487 goto rel;
1488 }
1489
1490 f_ctl = ntoh24(fh->fh_f_ctl);
1491 fr_seq(fp) = sp;
1492 if (f_ctl & FC_FC_SEQ_INIT)
1493 ep->esb_stat |= ESB_ST_SEQ_INIT;
1494
1495 if (fc_sof_needs_ack(sof))
1496 fc_seq_send_ack(sp, fp);
1497 resp = ep->resp;
1498 ex_resp_arg = ep->arg;
1499
1500 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1501 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1502 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1503 spin_lock_bh(&ep->ex_lock);
1504 resp = ep->resp;
1505 rc = fc_exch_done_locked(ep);
1506 WARN_ON(fc_seq_exch(sp) != ep);
1507 spin_unlock_bh(&ep->ex_lock);
1508 if (!rc)
1509 fc_exch_delete(ep);
1510 }
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (resp)
1526 resp(sp, fp, ex_resp_arg);
1527 else
1528 fc_frame_free(fp);
1529 fc_exch_release(ep);
1530 return;
1531rel:
1532 fc_exch_release(ep);
1533out:
1534 fc_frame_free(fp);
1535}
1536
1537
1538
1539
1540
1541
1542
1543static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1544{
1545 struct fc_seq *sp;
1546
1547 sp = fc_seq_lookup_orig(mp, fp);
1548
1549 if (!sp)
1550 atomic_inc(&mp->stats.xid_not_found);
1551 else
1552 atomic_inc(&mp->stats.non_bls_resp);
1553
1554 fc_frame_free(fp);
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1566{
1567 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1568 void *ex_resp_arg;
1569 struct fc_frame_header *fh;
1570 struct fc_ba_acc *ap;
1571 struct fc_seq *sp;
1572 u16 low;
1573 u16 high;
1574 int rc = 1, has_rec = 0;
1575
1576 fh = fc_frame_header_get(fp);
1577 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1578 fc_exch_rctl_name(fh->fh_r_ctl));
1579
1580 if (cancel_delayed_work_sync(&ep->timeout_work)) {
1581 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
1582 fc_exch_release(ep);
1583 }
1584
1585 spin_lock_bh(&ep->ex_lock);
1586 switch (fh->fh_r_ctl) {
1587 case FC_RCTL_BA_ACC:
1588 ap = fc_frame_payload_get(fp, sizeof(*ap));
1589 if (!ap)
1590 break;
1591
1592
1593
1594
1595
1596
1597 low = ntohs(ap->ba_low_seq_cnt);
1598 high = ntohs(ap->ba_high_seq_cnt);
1599 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1600 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1601 ap->ba_seq_id == ep->seq_id) && low != high) {
1602 ep->esb_stat |= ESB_ST_REC_QUAL;
1603 fc_exch_hold(ep);
1604 has_rec = 1;
1605 }
1606 break;
1607 case FC_RCTL_BA_RJT:
1608 break;
1609 default:
1610 break;
1611 }
1612
1613 resp = ep->resp;
1614 ex_resp_arg = ep->arg;
1615
1616
1617
1618
1619 sp = &ep->seq;
1620
1621
1622
1623 if (ep->fh_type != FC_TYPE_FCP &&
1624 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1625 rc = fc_exch_done_locked(ep);
1626 spin_unlock_bh(&ep->ex_lock);
1627 if (!rc)
1628 fc_exch_delete(ep);
1629
1630 if (resp)
1631 resp(sp, fp, ex_resp_arg);
1632 else
1633 fc_frame_free(fp);
1634
1635 if (has_rec)
1636 fc_exch_timer_set(ep, ep->r_a_tov);
1637
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1649{
1650 struct fc_frame_header *fh;
1651 struct fc_exch *ep;
1652 u32 f_ctl;
1653
1654 fh = fc_frame_header_get(fp);
1655 f_ctl = ntoh24(fh->fh_f_ctl);
1656 fr_seq(fp) = NULL;
1657
1658 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1659 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1660 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1661 spin_lock_bh(&ep->ex_lock);
1662 ep->esb_stat |= ESB_ST_SEQ_INIT;
1663 spin_unlock_bh(&ep->ex_lock);
1664 }
1665 if (f_ctl & FC_FC_SEQ_CTX) {
1666
1667
1668
1669
1670 switch (fh->fh_r_ctl) {
1671 case FC_RCTL_ACK_1:
1672 case FC_RCTL_ACK_0:
1673 break;
1674 default:
1675 if (ep)
1676 FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1677 fh->fh_r_ctl,
1678 fc_exch_rctl_name(fh->fh_r_ctl));
1679 break;
1680 }
1681 fc_frame_free(fp);
1682 } else {
1683 switch (fh->fh_r_ctl) {
1684 case FC_RCTL_BA_RJT:
1685 case FC_RCTL_BA_ACC:
1686 if (ep)
1687 fc_exch_abts_resp(ep, fp);
1688 else
1689 fc_frame_free(fp);
1690 break;
1691 case FC_RCTL_BA_ABTS:
1692 fc_exch_recv_abts(ep, fp);
1693 break;
1694 default:
1695 fc_frame_free(fp);
1696 break;
1697 }
1698 }
1699 if (ep)
1700 fc_exch_release(ep);
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1711{
1712 struct fc_lport *lport;
1713 struct fc_els_ls_acc *acc;
1714 struct fc_frame *fp;
1715
1716 lport = fr_dev(rx_fp);
1717 fp = fc_frame_alloc(lport, sizeof(*acc));
1718 if (!fp)
1719 return;
1720 acc = fc_frame_payload_get(fp, sizeof(*acc));
1721 memset(acc, 0, sizeof(*acc));
1722 acc->la_cmd = ELS_LS_ACC;
1723 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1724 lport->tt.frame_send(lport, fp);
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1737 enum fc_els_rjt_explan explan)
1738{
1739 struct fc_lport *lport;
1740 struct fc_els_ls_rjt *rjt;
1741 struct fc_frame *fp;
1742
1743 lport = fr_dev(rx_fp);
1744 fp = fc_frame_alloc(lport, sizeof(*rjt));
1745 if (!fp)
1746 return;
1747 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1748 memset(rjt, 0, sizeof(*rjt));
1749 rjt->er_cmd = ELS_LS_RJT;
1750 rjt->er_reason = reason;
1751 rjt->er_explan = explan;
1752 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1753 lport->tt.frame_send(lport, fp);
1754}
1755
1756
1757
1758
1759
1760static void fc_exch_reset(struct fc_exch *ep)
1761{
1762 struct fc_seq *sp;
1763 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1764 void *arg;
1765 int rc = 1;
1766
1767 spin_lock_bh(&ep->ex_lock);
1768 fc_exch_abort_locked(ep, 0);
1769 ep->state |= FC_EX_RST_CLEANUP;
1770 fc_exch_timer_cancel(ep);
1771 resp = ep->resp;
1772 ep->resp = NULL;
1773 if (ep->esb_stat & ESB_ST_REC_QUAL)
1774 atomic_dec(&ep->ex_refcnt);
1775 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1776 arg = ep->arg;
1777 sp = &ep->seq;
1778 rc = fc_exch_done_locked(ep);
1779 spin_unlock_bh(&ep->ex_lock);
1780 if (!rc)
1781 fc_exch_delete(ep);
1782
1783 if (resp)
1784 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1785}
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799static void fc_exch_pool_reset(struct fc_lport *lport,
1800 struct fc_exch_pool *pool,
1801 u32 sid, u32 did)
1802{
1803 struct fc_exch *ep;
1804 struct fc_exch *next;
1805
1806 spin_lock_bh(&pool->lock);
1807restart:
1808 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1809 if ((lport == ep->lp) &&
1810 (sid == 0 || sid == ep->sid) &&
1811 (did == 0 || did == ep->did)) {
1812 fc_exch_hold(ep);
1813 spin_unlock_bh(&pool->lock);
1814
1815 fc_exch_reset(ep);
1816
1817 fc_exch_release(ep);
1818 spin_lock_bh(&pool->lock);
1819
1820
1821
1822
1823
1824 goto restart;
1825 }
1826 }
1827 pool->next_index = 0;
1828 pool->left = FC_XID_UNKNOWN;
1829 pool->right = FC_XID_UNKNOWN;
1830 spin_unlock_bh(&pool->lock);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1845{
1846 struct fc_exch_mgr_anchor *ema;
1847 unsigned int cpu;
1848
1849 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1850 for_each_possible_cpu(cpu)
1851 fc_exch_pool_reset(lport,
1852 per_cpu_ptr(ema->mp->pool, cpu),
1853 sid, did);
1854 }
1855}
1856EXPORT_SYMBOL(fc_exch_mgr_reset);
1857
1858
1859
1860
1861
1862
1863
1864
1865static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
1866{
1867 struct fc_exch_mgr_anchor *ema;
1868
1869 list_for_each_entry(ema, &lport->ema_list, ema_list)
1870 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1871 return fc_exch_find(ema->mp, xid);
1872 return NULL;
1873}
1874
1875
1876
1877
1878
1879
1880
1881static void fc_exch_els_rec(struct fc_frame *rfp)
1882{
1883 struct fc_lport *lport;
1884 struct fc_frame *fp;
1885 struct fc_exch *ep;
1886 struct fc_els_rec *rp;
1887 struct fc_els_rec_acc *acc;
1888 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1889 enum fc_els_rjt_explan explan;
1890 u32 sid;
1891 u16 rxid;
1892 u16 oxid;
1893
1894 lport = fr_dev(rfp);
1895 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1896 explan = ELS_EXPL_INV_LEN;
1897 if (!rp)
1898 goto reject;
1899 sid = ntoh24(rp->rec_s_id);
1900 rxid = ntohs(rp->rec_rx_id);
1901 oxid = ntohs(rp->rec_ox_id);
1902
1903 ep = fc_exch_lookup(lport,
1904 sid == fc_host_port_id(lport->host) ? oxid : rxid);
1905 explan = ELS_EXPL_OXID_RXID;
1906 if (!ep)
1907 goto reject;
1908 if (ep->oid != sid || oxid != ep->oxid)
1909 goto rel;
1910 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
1911 goto rel;
1912 fp = fc_frame_alloc(lport, sizeof(*acc));
1913 if (!fp)
1914 goto out;
1915
1916 acc = fc_frame_payload_get(fp, sizeof(*acc));
1917 memset(acc, 0, sizeof(*acc));
1918 acc->reca_cmd = ELS_LS_ACC;
1919 acc->reca_ox_id = rp->rec_ox_id;
1920 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1921 acc->reca_rx_id = htons(ep->rxid);
1922 if (ep->sid == ep->oid)
1923 hton24(acc->reca_rfid, ep->did);
1924 else
1925 hton24(acc->reca_rfid, ep->sid);
1926 acc->reca_fc4value = htonl(ep->seq.rec_data);
1927 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1928 ESB_ST_SEQ_INIT |
1929 ESB_ST_COMPLETE));
1930 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
1931 lport->tt.frame_send(lport, fp);
1932out:
1933 fc_exch_release(ep);
1934 return;
1935
1936rel:
1937 fc_exch_release(ep);
1938reject:
1939 fc_seq_ls_rjt(rfp, reason, explan);
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1951{
1952 struct fc_exch *aborted_ep = arg;
1953 unsigned int op;
1954
1955 if (IS_ERR(fp)) {
1956 int err = PTR_ERR(fp);
1957
1958 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
1959 goto cleanup;
1960 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1961 "frame error %d\n", err);
1962 return;
1963 }
1964
1965 op = fc_frame_payload_op(fp);
1966 fc_frame_free(fp);
1967
1968 switch (op) {
1969 case ELS_LS_RJT:
1970 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
1971
1972 case ELS_LS_ACC:
1973 goto cleanup;
1974 default:
1975 FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1976 "for RRQ", op);
1977 return;
1978 }
1979
1980cleanup:
1981 fc_exch_done(&aborted_ep->seq);
1982
1983 fc_exch_release(aborted_ep);
1984}
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2007 struct fc_frame *fp,
2008 void (*resp)(struct fc_seq *,
2009 struct fc_frame *fp,
2010 void *arg),
2011 void (*destructor)(struct fc_seq *,
2012 void *),
2013 void *arg, u32 timer_msec)
2014{
2015 struct fc_exch *ep;
2016 struct fc_seq *sp = NULL;
2017 struct fc_frame_header *fh;
2018 struct fc_fcp_pkt *fsp = NULL;
2019 int rc = 1;
2020
2021 ep = fc_exch_alloc(lport, fp);
2022 if (!ep) {
2023 fc_frame_free(fp);
2024 return NULL;
2025 }
2026 ep->esb_stat |= ESB_ST_SEQ_INIT;
2027 fh = fc_frame_header_get(fp);
2028 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2029 ep->resp = resp;
2030 ep->destructor = destructor;
2031 ep->arg = arg;
2032 ep->r_a_tov = FC_DEF_R_A_TOV;
2033 ep->lp = lport;
2034 sp = &ep->seq;
2035
2036 ep->fh_type = fh->fh_type;
2037 ep->f_ctl = ntoh24(fh->fh_f_ctl);
2038 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2039 sp->cnt++;
2040
2041 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2042 fsp = fr_fsp(fp);
2043 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2044 }
2045
2046 if (unlikely(lport->tt.frame_send(lport, fp)))
2047 goto err;
2048
2049 if (timer_msec)
2050 fc_exch_timer_set_locked(ep, timer_msec);
2051 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
2052
2053 if (ep->f_ctl & FC_FC_SEQ_INIT)
2054 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2055 spin_unlock_bh(&ep->ex_lock);
2056 return sp;
2057err:
2058 if (fsp)
2059 fc_fcp_ddp_done(fsp);
2060 rc = fc_exch_done_locked(ep);
2061 spin_unlock_bh(&ep->ex_lock);
2062 if (!rc)
2063 fc_exch_delete(ep);
2064 return NULL;
2065}
2066
2067
2068
2069
2070
2071
2072
2073
2074static void fc_exch_rrq(struct fc_exch *ep)
2075{
2076 struct fc_lport *lport;
2077 struct fc_els_rrq *rrq;
2078 struct fc_frame *fp;
2079 u32 did;
2080
2081 lport = ep->lp;
2082
2083 fp = fc_frame_alloc(lport, sizeof(*rrq));
2084 if (!fp)
2085 goto retry;
2086
2087 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2088 memset(rrq, 0, sizeof(*rrq));
2089 rrq->rrq_cmd = ELS_RRQ;
2090 hton24(rrq->rrq_s_id, ep->sid);
2091 rrq->rrq_ox_id = htons(ep->oxid);
2092 rrq->rrq_rx_id = htons(ep->rxid);
2093
2094 did = ep->did;
2095 if (ep->esb_stat & ESB_ST_RESP)
2096 did = ep->sid;
2097
2098 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2099 lport->port_id, FC_TYPE_ELS,
2100 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2101
2102 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2103 lport->e_d_tov))
2104 return;
2105
2106retry:
2107 spin_lock_bh(&ep->ex_lock);
2108 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2109 spin_unlock_bh(&ep->ex_lock);
2110
2111 fc_exch_release(ep);
2112 return;
2113 }
2114 ep->esb_stat |= ESB_ST_REC_QUAL;
2115 fc_exch_timer_set_locked(ep, ep->r_a_tov);
2116 spin_unlock_bh(&ep->ex_lock);
2117}
2118
2119
2120
2121
2122
2123static void fc_exch_els_rrq(struct fc_frame *fp)
2124{
2125 struct fc_lport *lport;
2126 struct fc_exch *ep = NULL;
2127 struct fc_els_rrq *rp;
2128 u32 sid;
2129 u16 xid;
2130 enum fc_els_rjt_explan explan;
2131
2132 lport = fr_dev(fp);
2133 rp = fc_frame_payload_get(fp, sizeof(*rp));
2134 explan = ELS_EXPL_INV_LEN;
2135 if (!rp)
2136 goto reject;
2137
2138
2139
2140
2141 sid = ntoh24(rp->rrq_s_id);
2142 xid = fc_host_port_id(lport->host) == sid ?
2143 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2144 ep = fc_exch_lookup(lport, xid);
2145 explan = ELS_EXPL_OXID_RXID;
2146 if (!ep)
2147 goto reject;
2148 spin_lock_bh(&ep->ex_lock);
2149 if (ep->oxid != ntohs(rp->rrq_ox_id))
2150 goto unlock_reject;
2151 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2152 ep->rxid != FC_XID_UNKNOWN)
2153 goto unlock_reject;
2154 explan = ELS_EXPL_SID;
2155 if (ep->sid != sid)
2156 goto unlock_reject;
2157
2158
2159
2160
2161 if (ep->esb_stat & ESB_ST_REC_QUAL) {
2162 ep->esb_stat &= ~ESB_ST_REC_QUAL;
2163 atomic_dec(&ep->ex_refcnt);
2164 }
2165 if (ep->esb_stat & ESB_ST_COMPLETE)
2166 fc_exch_timer_cancel(ep);
2167
2168 spin_unlock_bh(&ep->ex_lock);
2169
2170
2171
2172
2173 fc_seq_ls_acc(fp);
2174 goto out;
2175
2176unlock_reject:
2177 spin_unlock_bh(&ep->ex_lock);
2178reject:
2179 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2180out:
2181 if (ep)
2182 fc_exch_release(ep);
2183}
2184
2185
2186
2187
2188
2189void fc_exch_update_stats(struct fc_lport *lport)
2190{
2191 struct fc_host_statistics *st;
2192 struct fc_exch_mgr_anchor *ema;
2193 struct fc_exch_mgr *mp;
2194
2195 st = &lport->host_stats;
2196
2197 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2198 mp = ema->mp;
2199 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2200 st->fc_no_free_exch_xid +=
2201 atomic_read(&mp->stats.no_free_exch_xid);
2202 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2203 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2204 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2205 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2206 }
2207}
2208EXPORT_SYMBOL(fc_exch_update_stats);
2209
2210
2211
2212
2213
2214
2215
2216struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2217 struct fc_exch_mgr *mp,
2218 bool (*match)(struct fc_frame *))
2219{
2220 struct fc_exch_mgr_anchor *ema;
2221
2222 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2223 if (!ema)
2224 return ema;
2225
2226 ema->mp = mp;
2227 ema->match = match;
2228
2229 list_add_tail(&ema->ema_list, &lport->ema_list);
2230 kref_get(&mp->kref);
2231 return ema;
2232}
2233EXPORT_SYMBOL(fc_exch_mgr_add);
2234
2235
2236
2237
2238
2239static void fc_exch_mgr_destroy(struct kref *kref)
2240{
2241 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2242
2243 mempool_destroy(mp->ep_pool);
2244 free_percpu(mp->pool);
2245 kfree(mp);
2246}
2247
2248
2249
2250
2251
2252void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2253{
2254
2255 list_del(&ema->ema_list);
2256 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2257 kfree(ema);
2258}
2259EXPORT_SYMBOL(fc_exch_mgr_del);
2260
2261
2262
2263
2264
2265
2266int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2267{
2268 struct fc_exch_mgr_anchor *ema, *tmp;
2269
2270 list_for_each_entry(ema, &src->ema_list, ema_list) {
2271 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2272 goto err;
2273 }
2274 return 0;
2275err:
2276 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2277 fc_exch_mgr_del(ema);
2278 return -ENOMEM;
2279}
2280EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2291 enum fc_class class,
2292 u16 min_xid, u16 max_xid,
2293 bool (*match)(struct fc_frame *))
2294{
2295 struct fc_exch_mgr *mp;
2296 u16 pool_exch_range;
2297 size_t pool_size;
2298 unsigned int cpu;
2299 struct fc_exch_pool *pool;
2300
2301 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2302 (min_xid & fc_cpu_mask) != 0) {
2303 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2304 min_xid, max_xid);
2305 return NULL;
2306 }
2307
2308
2309
2310
2311 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2312 if (!mp)
2313 return NULL;
2314
2315 mp->class = class;
2316
2317 mp->min_xid = min_xid;
2318
2319
2320 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2321 sizeof(struct fc_exch *);
2322 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2323 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2324 min_xid - 1;
2325 } else {
2326 mp->max_xid = max_xid;
2327 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2328 (fc_cpu_mask + 1);
2329 }
2330
2331 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2332 if (!mp->ep_pool)
2333 goto free_mp;
2334
2335
2336
2337
2338
2339
2340 mp->pool_max_index = pool_exch_range - 1;
2341
2342
2343
2344
2345 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2346 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2347 if (!mp->pool)
2348 goto free_mempool;
2349 for_each_possible_cpu(cpu) {
2350 pool = per_cpu_ptr(mp->pool, cpu);
2351 pool->next_index = 0;
2352 pool->left = FC_XID_UNKNOWN;
2353 pool->right = FC_XID_UNKNOWN;
2354 spin_lock_init(&pool->lock);
2355 INIT_LIST_HEAD(&pool->ex_list);
2356 }
2357
2358 kref_init(&mp->kref);
2359 if (!fc_exch_mgr_add(lport, mp, match)) {
2360 free_percpu(mp->pool);
2361 goto free_mempool;
2362 }
2363
2364
2365
2366
2367
2368
2369 kref_put(&mp->kref, fc_exch_mgr_destroy);
2370 return mp;
2371
2372free_mempool:
2373 mempool_destroy(mp->ep_pool);
2374free_mp:
2375 kfree(mp);
2376 return NULL;
2377}
2378EXPORT_SYMBOL(fc_exch_mgr_alloc);
2379
2380
2381
2382
2383
2384void fc_exch_mgr_free(struct fc_lport *lport)
2385{
2386 struct fc_exch_mgr_anchor *ema, *next;
2387
2388 flush_workqueue(fc_exch_workqueue);
2389 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2390 fc_exch_mgr_del(ema);
2391}
2392EXPORT_SYMBOL(fc_exch_mgr_free);
2393
2394
2395
2396
2397
2398
2399
2400
2401static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2402 struct fc_lport *lport,
2403 struct fc_frame_header *fh)
2404{
2405 struct fc_exch_mgr_anchor *ema;
2406 u16 xid;
2407
2408 if (f_ctl & FC_FC_EX_CTX)
2409 xid = ntohs(fh->fh_ox_id);
2410 else {
2411 xid = ntohs(fh->fh_rx_id);
2412 if (xid == FC_XID_UNKNOWN)
2413 return list_entry(lport->ema_list.prev,
2414 typeof(*ema), ema_list);
2415 }
2416
2417 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2418 if ((xid >= ema->mp->min_xid) &&
2419 (xid <= ema->mp->max_xid))
2420 return ema;
2421 }
2422 return NULL;
2423}
2424
2425
2426
2427
2428
2429void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2430{
2431 struct fc_frame_header *fh = fc_frame_header_get(fp);
2432 struct fc_exch_mgr_anchor *ema;
2433 u32 f_ctl;
2434
2435
2436 if (!lport || lport->state == LPORT_ST_DISABLED) {
2437 FC_LPORT_DBG(lport, "Receiving frames for an lport that "
2438 "has not been initialized correctly\n");
2439 fc_frame_free(fp);
2440 return;
2441 }
2442
2443 f_ctl = ntoh24(fh->fh_f_ctl);
2444 ema = fc_find_ema(f_ctl, lport, fh);
2445 if (!ema) {
2446 FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2447 "fc_ctl <0x%x>, xid <0x%x>\n",
2448 f_ctl,
2449 (f_ctl & FC_FC_EX_CTX) ?
2450 ntohs(fh->fh_ox_id) :
2451 ntohs(fh->fh_rx_id));
2452 fc_frame_free(fp);
2453 return;
2454 }
2455
2456
2457
2458
2459 switch (fr_eof(fp)) {
2460 case FC_EOF_T:
2461 if (f_ctl & FC_FC_END_SEQ)
2462 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2463
2464 case FC_EOF_N:
2465 if (fh->fh_type == FC_TYPE_BLS)
2466 fc_exch_recv_bls(ema->mp, fp);
2467 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2468 FC_FC_EX_CTX)
2469 fc_exch_recv_seq_resp(ema->mp, fp);
2470 else if (f_ctl & FC_FC_SEQ_CTX)
2471 fc_exch_recv_resp(ema->mp, fp);
2472 else
2473 fc_exch_recv_req(lport, ema->mp, fp);
2474 break;
2475 default:
2476 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2477 fr_eof(fp));
2478 fc_frame_free(fp);
2479 }
2480}
2481EXPORT_SYMBOL(fc_exch_recv);
2482
2483
2484
2485
2486
2487int fc_exch_init(struct fc_lport *lport)
2488{
2489 if (!lport->tt.seq_start_next)
2490 lport->tt.seq_start_next = fc_seq_start_next;
2491
2492 if (!lport->tt.seq_set_resp)
2493 lport->tt.seq_set_resp = fc_seq_set_resp;
2494
2495 if (!lport->tt.exch_seq_send)
2496 lport->tt.exch_seq_send = fc_exch_seq_send;
2497
2498 if (!lport->tt.seq_send)
2499 lport->tt.seq_send = fc_seq_send;
2500
2501 if (!lport->tt.seq_els_rsp_send)
2502 lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2503
2504 if (!lport->tt.exch_done)
2505 lport->tt.exch_done = fc_exch_done;
2506
2507 if (!lport->tt.exch_mgr_reset)
2508 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2509
2510 if (!lport->tt.seq_exch_abort)
2511 lport->tt.seq_exch_abort = fc_seq_exch_abort;
2512
2513 if (!lport->tt.seq_assign)
2514 lport->tt.seq_assign = fc_seq_assign;
2515
2516 if (!lport->tt.seq_release)
2517 lport->tt.seq_release = fc_seq_release;
2518
2519 return 0;
2520}
2521EXPORT_SYMBOL(fc_exch_init);
2522
2523
2524
2525
2526int fc_setup_exch_mgr(void)
2527{
2528 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2529 0, SLAB_HWCACHE_ALIGN, NULL);
2530 if (!fc_em_cachep)
2531 return -ENOMEM;
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547 fc_cpu_mask = 1;
2548 fc_cpu_order = 0;
2549 while (fc_cpu_mask < nr_cpu_ids) {
2550 fc_cpu_mask <<= 1;
2551 fc_cpu_order++;
2552 }
2553 fc_cpu_mask--;
2554
2555 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2556 if (!fc_exch_workqueue)
2557 goto err;
2558 return 0;
2559err:
2560 kmem_cache_destroy(fc_em_cachep);
2561 return -ENOMEM;
2562}
2563
2564
2565
2566
2567void fc_destroy_exch_mgr(void)
2568{
2569 destroy_workqueue(fc_exch_workqueue);
2570 kmem_cache_destroy(fc_em_cachep);
2571}
2572