1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/skbuff.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/if_ether.h>
25#include <linux/if_vlan.h>
26#include <linux/workqueue.h>
27#include <scsi/fc/fc_fip.h>
28#include <scsi/fc/fc_els.h>
29#include <scsi/fc/fc_fcoe.h>
30#include <scsi/fc_frame.h>
31#include <scsi/libfc.h>
32#include "fnic_io.h"
33#include "fnic.h"
34#include "fnic_fip.h"
35#include "cq_enet_desc.h"
36#include "cq_exch_desc.h"
37
38static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39struct workqueue_struct *fnic_fip_queue;
40struct workqueue_struct *fnic_event_queue;
41
42static void fnic_set_eth_mode(struct fnic *);
43static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
49void fnic_handle_link(struct work_struct *work)
50{
51 struct fnic *fnic = container_of(work, struct fnic, link_work);
52 unsigned long flags;
53 int old_link_status;
54 u32 old_link_down_cnt;
55 u64 old_port_speed, new_port_speed;
56
57 spin_lock_irqsave(&fnic->fnic_lock, flags);
58
59 fnic->link_events = 1;
60
61 if (fnic->stop_rx_link_events) {
62 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
63 return;
64 }
65
66 old_link_down_cnt = fnic->link_down_cnt;
67 old_link_status = fnic->link_status;
68 old_port_speed = atomic64_read(
69 &fnic->fnic_stats.misc_stats.current_port_speed);
70
71 fnic->link_status = vnic_dev_link_status(fnic->vdev);
72 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
73
74 new_port_speed = vnic_dev_port_speed(fnic->vdev);
75 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
76 new_port_speed);
77 if (old_port_speed != new_port_speed)
78 FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
79 "Current vnic speed set to : %llu\n",
80 new_port_speed);
81
82 switch (vnic_dev_port_speed(fnic->vdev)) {
83 case DCEM_PORTSPEED_10G:
84 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
85 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
86 break;
87 case DCEM_PORTSPEED_20G:
88 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
89 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
90 break;
91 case DCEM_PORTSPEED_25G:
92 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
93 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
94 break;
95 case DCEM_PORTSPEED_40G:
96 case DCEM_PORTSPEED_4x10G:
97 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
98 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
99 break;
100 case DCEM_PORTSPEED_100G:
101 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
102 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
103 break;
104 default:
105 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
106 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
107 break;
108 }
109
110 if (old_link_status == fnic->link_status) {
111 if (!fnic->link_status) {
112
113 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
114 fnic_fc_trace_set_data(fnic->lport->host->host_no,
115 FNIC_FC_LE, "Link Status: DOWN->DOWN",
116 strlen("Link Status: DOWN->DOWN"));
117 } else {
118 if (old_link_down_cnt != fnic->link_down_cnt) {
119
120 fnic->lport->host_stats.link_failure_count++;
121 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
122 fnic_fc_trace_set_data(
123 fnic->lport->host->host_no,
124 FNIC_FC_LE,
125 "Link Status:UP_DOWN_UP",
126 strlen("Link_Status:UP_DOWN_UP")
127 );
128 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
129 "link down\n");
130 fcoe_ctlr_link_down(&fnic->ctlr);
131 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
132
133 fnic_fc_trace_set_data(
134 fnic->lport->host->host_no,
135 FNIC_FC_LE,
136 "Link Status: UP_DOWN_UP_VLAN",
137 strlen(
138 "Link Status: UP_DOWN_UP_VLAN")
139 );
140 fnic_fcoe_send_vlan_req(fnic);
141 return;
142 }
143 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
144 "link up\n");
145 fcoe_ctlr_link_up(&fnic->ctlr);
146 } else {
147
148 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
149 fnic_fc_trace_set_data(
150 fnic->lport->host->host_no, FNIC_FC_LE,
151 "Link Status: UP_UP",
152 strlen("Link Status: UP_UP"));
153 }
154 }
155 } else if (fnic->link_status) {
156
157 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
158 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
159
160 fnic_fc_trace_set_data(
161 fnic->lport->host->host_no,
162 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
163 strlen("Link Status: DOWN_UP_VLAN"));
164 fnic_fcoe_send_vlan_req(fnic);
165 return;
166 }
167 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
168 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
169 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
170 fcoe_ctlr_link_up(&fnic->ctlr);
171 } else {
172
173 fnic->lport->host_stats.link_failure_count++;
174 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
175 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
176 fnic_fc_trace_set_data(
177 fnic->lport->host->host_no, FNIC_FC_LE,
178 "Link Status: UP_DOWN",
179 strlen("Link Status: UP_DOWN"));
180 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
181 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
182 "deleting fip-timer during link-down\n");
183 del_timer_sync(&fnic->fip_timer);
184 }
185 fcoe_ctlr_link_down(&fnic->ctlr);
186 }
187
188}
189
190
191
192
193void fnic_handle_frame(struct work_struct *work)
194{
195 struct fnic *fnic = container_of(work, struct fnic, frame_work);
196 struct fc_lport *lp = fnic->lport;
197 unsigned long flags;
198 struct sk_buff *skb;
199 struct fc_frame *fp;
200
201 while ((skb = skb_dequeue(&fnic->frame_queue))) {
202
203 spin_lock_irqsave(&fnic->fnic_lock, flags);
204 if (fnic->stop_rx_link_events) {
205 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
206 dev_kfree_skb(skb);
207 return;
208 }
209 fp = (struct fc_frame *)skb;
210
211
212
213
214
215 if (fnic->state != FNIC_IN_FC_MODE &&
216 fnic->state != FNIC_IN_ETH_MODE) {
217 skb_queue_head(&fnic->frame_queue, skb);
218 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
219 return;
220 }
221 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
222
223 fc_exch_recv(lp, fp);
224 }
225}
226
227void fnic_fcoe_evlist_free(struct fnic *fnic)
228{
229 struct fnic_event *fevt = NULL;
230 struct fnic_event *next = NULL;
231 unsigned long flags;
232
233 spin_lock_irqsave(&fnic->fnic_lock, flags);
234 if (list_empty(&fnic->evlist)) {
235 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
236 return;
237 }
238
239 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
240 list_del(&fevt->list);
241 kfree(fevt);
242 }
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244}
245
246void fnic_handle_event(struct work_struct *work)
247{
248 struct fnic *fnic = container_of(work, struct fnic, event_work);
249 struct fnic_event *fevt = NULL;
250 struct fnic_event *next = NULL;
251 unsigned long flags;
252
253 spin_lock_irqsave(&fnic->fnic_lock, flags);
254 if (list_empty(&fnic->evlist)) {
255 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
256 return;
257 }
258
259 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
260 if (fnic->stop_rx_link_events) {
261 list_del(&fevt->list);
262 kfree(fevt);
263 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
264 return;
265 }
266
267
268
269
270 if (fnic->state != FNIC_IN_FC_MODE &&
271 fnic->state != FNIC_IN_ETH_MODE) {
272 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
273 return;
274 }
275
276 list_del(&fevt->list);
277 switch (fevt->event) {
278 case FNIC_EVT_START_VLAN_DISC:
279 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
280 fnic_fcoe_send_vlan_req(fnic);
281 spin_lock_irqsave(&fnic->fnic_lock, flags);
282 break;
283 case FNIC_EVT_START_FCF_DISC:
284 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
285 "Start FCF Discovery\n");
286 fnic_fcoe_start_fcf_disc(fnic);
287 break;
288 default:
289 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
290 "Unknown event 0x%x\n", fevt->event);
291 break;
292 }
293 kfree(fevt);
294 }
295 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
296}
297
298
299
300
301
302
303
304
305
306static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
307 struct sk_buff *skb)
308{
309 struct fc_lport *lport = fip->lp;
310 struct fip_header *fiph;
311 struct fc_frame_header *fh = NULL;
312 struct fip_desc *desc;
313 struct fip_encaps *els;
314 enum fip_desc_type els_dtype = 0;
315 u16 op;
316 u8 els_op;
317 u8 sub;
318
319 size_t els_len = 0;
320 size_t rlen;
321 size_t dlen = 0;
322
323 if (skb_linearize(skb))
324 return 0;
325
326 if (skb->len < sizeof(*fiph))
327 return 0;
328
329 fiph = (struct fip_header *)skb->data;
330 op = ntohs(fiph->fip_op);
331 sub = fiph->fip_subcode;
332
333 if (op != FIP_OP_LS)
334 return 0;
335
336 if (sub != FIP_SC_REP)
337 return 0;
338
339 rlen = ntohs(fiph->fip_dl_len) * 4;
340 if (rlen + sizeof(*fiph) > skb->len)
341 return 0;
342
343 desc = (struct fip_desc *)(fiph + 1);
344 dlen = desc->fip_dlen * FIP_BPW;
345
346 if (desc->fip_dtype == FIP_DT_FLOGI) {
347
348 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
349 return 0;
350
351 els_len = dlen - sizeof(*els);
352 els = (struct fip_encaps *)desc;
353 fh = (struct fc_frame_header *)(els + 1);
354 els_dtype = desc->fip_dtype;
355
356 if (!fh)
357 return 0;
358
359
360
361
362
363 els_op = *(u8 *)(fh + 1);
364 if (els_op == ELS_LS_RJT) {
365 shost_printk(KERN_INFO, lport->host,
366 "Flogi Request Rejected by Switch\n");
367 return 1;
368 }
369 shost_printk(KERN_INFO, lport->host,
370 "Flogi Request Accepted by Switch\n");
371 }
372 return 0;
373}
374
375static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
376{
377 struct fcoe_ctlr *fip = &fnic->ctlr;
378 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
379 struct sk_buff *skb;
380 char *eth_fr;
381 int fr_len;
382 struct fip_vlan *vlan;
383 u64 vlan_tov;
384
385 fnic_fcoe_reset_vlans(fnic);
386 fnic->set_vlan(fnic, 0);
387
388 if (printk_ratelimit())
389 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
390 "Sending VLAN request...\n");
391
392 skb = dev_alloc_skb(sizeof(struct fip_vlan));
393 if (!skb)
394 return;
395
396 fr_len = sizeof(*vlan);
397 eth_fr = (char *)skb->data;
398 vlan = (struct fip_vlan *)eth_fr;
399
400 memset(vlan, 0, sizeof(*vlan));
401 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
402 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
403 vlan->eth.h_proto = htons(ETH_P_FIP);
404
405 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
406 vlan->fip.fip_op = htons(FIP_OP_VLAN);
407 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
408 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
409
410 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
411 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
412 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
413
414 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
415 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
416 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
417 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
418
419 skb_put(skb, sizeof(*vlan));
420 skb->protocol = htons(ETH_P_FIP);
421 skb_reset_mac_header(skb);
422 skb_reset_network_header(skb);
423 fip->send(fip, skb);
424
425
426 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
427 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
428}
429
430static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
431{
432 struct fcoe_ctlr *fip = &fnic->ctlr;
433 struct fip_header *fiph;
434 struct fip_desc *desc;
435 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
436 u16 vid;
437 size_t rlen;
438 size_t dlen;
439 struct fcoe_vlan *vlan;
440 u64 sol_time;
441 unsigned long flags;
442
443 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
444 "Received VLAN response...\n");
445
446 fiph = (struct fip_header *) skb->data;
447
448 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
449 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
450 ntohs(fiph->fip_op), fiph->fip_subcode);
451
452 rlen = ntohs(fiph->fip_dl_len) * 4;
453 fnic_fcoe_reset_vlans(fnic);
454 spin_lock_irqsave(&fnic->vlans_lock, flags);
455 desc = (struct fip_desc *)(fiph + 1);
456 while (rlen > 0) {
457 dlen = desc->fip_dlen * FIP_BPW;
458 switch (desc->fip_dtype) {
459 case FIP_DT_VLAN:
460 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
461 shost_printk(KERN_INFO, fnic->lport->host,
462 "process_vlan_resp: FIP VLAN %d\n", vid);
463 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
464 if (!vlan) {
465
466 spin_unlock_irqrestore(&fnic->vlans_lock,
467 flags);
468 goto out;
469 }
470 vlan->vid = vid & 0x0fff;
471 vlan->state = FIP_VLAN_AVAIL;
472 list_add_tail(&vlan->list, &fnic->vlans);
473 break;
474 }
475 desc = (struct fip_desc *)((char *)desc + dlen);
476 rlen -= dlen;
477 }
478
479
480 if (list_empty(&fnic->vlans)) {
481
482 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
483 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
484 "No VLAN descriptors in FIP VLAN response\n");
485 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
486 goto out;
487 }
488
489 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
490 fnic->set_vlan(fnic, vlan->vid);
491 vlan->state = FIP_VLAN_SENT;
492 vlan->sol_count++;
493 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
494
495
496 fcoe_ctlr_link_up(fip);
497
498 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
499 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
500out:
501 return;
502}
503
504static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
505{
506 unsigned long flags;
507 struct fcoe_vlan *vlan;
508 u64 sol_time;
509
510 spin_lock_irqsave(&fnic->vlans_lock, flags);
511 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
512 fnic->set_vlan(fnic, vlan->vid);
513 vlan->state = FIP_VLAN_SENT;
514 vlan->sol_count = 1;
515 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
516
517
518 fcoe_ctlr_link_up(&fnic->ctlr);
519
520 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
521 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
522}
523
524static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
525{
526 unsigned long flags;
527 struct fcoe_vlan *fvlan;
528
529 spin_lock_irqsave(&fnic->vlans_lock, flags);
530 if (list_empty(&fnic->vlans)) {
531 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
532 return -EINVAL;
533 }
534
535 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
536 if (fvlan->state == FIP_VLAN_USED) {
537 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
538 return 0;
539 }
540
541 if (fvlan->state == FIP_VLAN_SENT) {
542 fvlan->state = FIP_VLAN_USED;
543 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
544 return 0;
545 }
546 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
547 return -EINVAL;
548}
549
550static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
551{
552 struct fnic_event *fevt;
553 unsigned long flags;
554
555 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
556 if (!fevt)
557 return;
558
559 fevt->fnic = fnic;
560 fevt->event = ev;
561
562 spin_lock_irqsave(&fnic->fnic_lock, flags);
563 list_add_tail(&fevt->list, &fnic->evlist);
564 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
565
566 schedule_work(&fnic->event_work);
567}
568
569static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
570{
571 struct fip_header *fiph;
572 int ret = 1;
573 u16 op;
574 u8 sub;
575
576 if (!skb || !(skb->data))
577 return -1;
578
579 if (skb_linearize(skb))
580 goto drop;
581
582 fiph = (struct fip_header *)skb->data;
583 op = ntohs(fiph->fip_op);
584 sub = fiph->fip_subcode;
585
586 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
587 goto drop;
588
589 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
590 goto drop;
591
592 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
593 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
594 goto drop;
595
596 ret = 1;
597 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
598
599 fnic_fcoe_process_vlan_resp(fnic, skb);
600 ret = 0;
601 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
602
603 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
604
605 ret = 1;
606 }
607drop:
608 return ret;
609}
610
611void fnic_handle_fip_frame(struct work_struct *work)
612{
613 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
614 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
615 unsigned long flags;
616 struct sk_buff *skb;
617 struct ethhdr *eh;
618
619 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
620 spin_lock_irqsave(&fnic->fnic_lock, flags);
621 if (fnic->stop_rx_link_events) {
622 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
623 dev_kfree_skb(skb);
624 return;
625 }
626
627
628
629
630 if (fnic->state != FNIC_IN_FC_MODE &&
631 fnic->state != FNIC_IN_ETH_MODE) {
632 skb_queue_head(&fnic->fip_frame_queue, skb);
633 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
634 return;
635 }
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 eh = (struct ethhdr *)skb->data;
638 if (eh->h_proto == htons(ETH_P_FIP)) {
639 skb_pull(skb, sizeof(*eh));
640 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
641 dev_kfree_skb(skb);
642 continue;
643 }
644
645
646
647
648 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
649 atomic64_inc(
650 &fnic_stats->vlan_stats.flogi_rejects);
651 shost_printk(KERN_INFO, fnic->lport->host,
652 "Trigger a Link down - VLAN Disc\n");
653 fcoe_ctlr_link_down(&fnic->ctlr);
654
655 fnic_fcoe_send_vlan_req(fnic);
656 dev_kfree_skb(skb);
657 continue;
658 }
659 fcoe_ctlr_recv(&fnic->ctlr, skb);
660 continue;
661 }
662 }
663}
664
665
666
667
668
669
670static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
671{
672 struct fc_frame *fp;
673 struct ethhdr *eh;
674 struct fcoe_hdr *fcoe_hdr;
675 struct fcoe_crc_eof *ft;
676
677
678
679
680 eh = (struct ethhdr *)skb->data;
681 if (eh->h_proto == htons(ETH_P_8021Q)) {
682 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
683 eh = skb_pull(skb, VLAN_HLEN);
684 skb_reset_mac_header(skb);
685 }
686 if (eh->h_proto == htons(ETH_P_FIP)) {
687 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
688 printk(KERN_ERR "Dropped FIP frame, as firmware "
689 "uses non-FIP mode, Enable FIP "
690 "using UCSM\n");
691 goto drop;
692 }
693 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
694 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
695 printk(KERN_ERR "fnic ctlr frame trace error!!!");
696 }
697 skb_queue_tail(&fnic->fip_frame_queue, skb);
698 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
699 return 1;
700 }
701 if (eh->h_proto != htons(ETH_P_FCOE))
702 goto drop;
703 skb_set_network_header(skb, sizeof(*eh));
704 skb_pull(skb, sizeof(*eh));
705
706 fcoe_hdr = (struct fcoe_hdr *)skb->data;
707 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
708 goto drop;
709
710 fp = (struct fc_frame *)skb;
711 fc_frame_init(fp);
712 fr_sof(fp) = fcoe_hdr->fcoe_sof;
713 skb_pull(skb, sizeof(struct fcoe_hdr));
714 skb_reset_transport_header(skb);
715
716 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
717 fr_eof(fp) = ft->fcoe_eof;
718 skb_trim(skb, skb->len - sizeof(*ft));
719 return 0;
720drop:
721 dev_kfree_skb_irq(skb);
722 return -1;
723}
724
725
726
727
728
729
730
731
732void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
733{
734 u8 *ctl = fnic->ctlr.ctl_src_addr;
735 u8 *data = fnic->data_src_addr;
736
737 if (is_zero_ether_addr(new))
738 new = ctl;
739 if (ether_addr_equal(data, new))
740 return;
741 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
742 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
743 vnic_dev_del_addr(fnic->vdev, data);
744 memcpy(data, new, ETH_ALEN);
745 if (!ether_addr_equal(new, ctl))
746 vnic_dev_add_addr(fnic->vdev, new);
747}
748
749
750
751
752
753
754void fnic_update_mac(struct fc_lport *lport, u8 *new)
755{
756 struct fnic *fnic = lport_priv(lport);
757
758 spin_lock_irq(&fnic->fnic_lock);
759 fnic_update_mac_locked(fnic, new);
760 spin_unlock_irq(&fnic->fnic_lock);
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
778{
779 struct fnic *fnic = lport_priv(lport);
780 u8 *mac;
781 int ret;
782
783 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
784 port_id, fp);
785
786
787
788
789
790 if (!port_id) {
791 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
792 fnic_set_eth_mode(fnic);
793 return;
794 }
795
796 if (fp) {
797 mac = fr_cb(fp)->granted_mac;
798 if (is_zero_ether_addr(mac)) {
799
800 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
801 }
802 fnic_update_mac(lport, mac);
803 }
804
805
806 spin_lock_irq(&fnic->fnic_lock);
807 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
808 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
809 else {
810 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
811 "Unexpected fnic state %s while"
812 " processing flogi resp\n",
813 fnic_state_to_str(fnic->state));
814 spin_unlock_irq(&fnic->fnic_lock);
815 return;
816 }
817 spin_unlock_irq(&fnic->fnic_lock);
818
819
820
821
822
823 ret = fnic_flogi_reg_handler(fnic, port_id);
824
825 if (ret < 0) {
826 spin_lock_irq(&fnic->fnic_lock);
827 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
828 fnic->state = FNIC_IN_ETH_MODE;
829 spin_unlock_irq(&fnic->fnic_lock);
830 }
831}
832
833static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
834 *cq_desc, struct vnic_rq_buf *buf,
835 int skipped __attribute__((unused)),
836 void *opaque)
837{
838 struct fnic *fnic = vnic_dev_priv(rq->vdev);
839 struct sk_buff *skb;
840 struct fc_frame *fp;
841 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
842 unsigned int eth_hdrs_stripped;
843 u8 type, color, eop, sop, ingress_port, vlan_stripped;
844 u8 fcoe = 0, fcoe_sof, fcoe_eof;
845 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
846 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
847 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
848 u8 fcs_ok = 1, packet_error = 0;
849 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
850 u32 rss_hash;
851 u16 exchange_id, tmpl;
852 u8 sof = 0;
853 u8 eof = 0;
854 u32 fcp_bytes_written = 0;
855 unsigned long flags;
856
857 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
858 DMA_FROM_DEVICE);
859 skb = buf->os_buf;
860 fp = (struct fc_frame *)skb;
861 buf->os_buf = NULL;
862
863 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
864 if (type == CQ_DESC_TYPE_RQ_FCP) {
865 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
866 &type, &color, &q_number, &completed_index,
867 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
868 &tmpl, &fcp_bytes_written, &sof, &eof,
869 &ingress_port, &packet_error,
870 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
871 &vlan);
872 eth_hdrs_stripped = 1;
873 skb_trim(skb, fcp_bytes_written);
874 fr_sof(fp) = sof;
875 fr_eof(fp) = eof;
876
877 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
878 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
879 &type, &color, &q_number, &completed_index,
880 &ingress_port, &fcoe, &eop, &sop,
881 &rss_type, &csum_not_calc, &rss_hash,
882 &bytes_written, &packet_error,
883 &vlan_stripped, &vlan, &checksum,
884 &fcoe_sof, &fcoe_fc_crc_ok,
885 &fcoe_enc_error, &fcoe_eof,
886 &tcp_udp_csum_ok, &udp, &tcp,
887 &ipv4_csum_ok, &ipv6, &ipv4,
888 &ipv4_fragment, &fcs_ok);
889 eth_hdrs_stripped = 0;
890 skb_trim(skb, bytes_written);
891 if (!fcs_ok) {
892 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
893 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
894 "fcs error. dropping packet.\n");
895 goto drop;
896 }
897 if (fnic_import_rq_eth_pkt(fnic, skb))
898 return;
899
900 } else {
901
902 shost_printk(KERN_ERR, fnic->lport->host,
903 "fnic rq_cmpl wrong cq type x%x\n", type);
904 goto drop;
905 }
906
907 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
908 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
909 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
910 "fnic rq_cmpl fcoe x%x fcsok x%x"
911 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
912 " x%x\n",
913 fcoe, fcs_ok, packet_error,
914 fcoe_fc_crc_ok, fcoe_enc_error);
915 goto drop;
916 }
917
918 spin_lock_irqsave(&fnic->fnic_lock, flags);
919 if (fnic->stop_rx_link_events) {
920 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
921 goto drop;
922 }
923 fr_dev(fp) = fnic->lport;
924 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
925 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
926 (char *)skb->data, skb->len)) != 0) {
927 printk(KERN_ERR "fnic ctlr frame trace error!!!");
928 }
929
930 skb_queue_tail(&fnic->frame_queue, skb);
931 queue_work(fnic_event_queue, &fnic->frame_work);
932
933 return;
934drop:
935 dev_kfree_skb_irq(skb);
936}
937
938static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
939 struct cq_desc *cq_desc, u8 type,
940 u16 q_number, u16 completed_index,
941 void *opaque)
942{
943 struct fnic *fnic = vnic_dev_priv(vdev);
944
945 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
946 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
947 NULL);
948 return 0;
949}
950
951int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
952{
953 unsigned int tot_rq_work_done = 0, cur_work_done;
954 unsigned int i;
955 int err;
956
957 for (i = 0; i < fnic->rq_count; i++) {
958 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
959 fnic_rq_cmpl_handler_cont,
960 NULL);
961 if (cur_work_done) {
962 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
963 if (err)
964 shost_printk(KERN_ERR, fnic->lport->host,
965 "fnic_alloc_rq_frame can't alloc"
966 " frame\n");
967 }
968 tot_rq_work_done += cur_work_done;
969 }
970
971 return tot_rq_work_done;
972}
973
974
975
976
977
978
979int fnic_alloc_rq_frame(struct vnic_rq *rq)
980{
981 struct fnic *fnic = vnic_dev_priv(rq->vdev);
982 struct sk_buff *skb;
983 u16 len;
984 dma_addr_t pa;
985 int r;
986
987 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
988 skb = dev_alloc_skb(len);
989 if (!skb) {
990 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
991 "Unable to allocate RQ sk_buff\n");
992 return -ENOMEM;
993 }
994 skb_reset_mac_header(skb);
995 skb_reset_transport_header(skb);
996 skb_reset_network_header(skb);
997 skb_put(skb, len);
998 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
999 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1000 r = -ENOMEM;
1001 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
1002 goto free_skb;
1003 }
1004
1005 fnic_queue_rq_desc(rq, skb, pa, len);
1006 return 0;
1007
1008free_skb:
1009 kfree_skb(skb);
1010 return r;
1011}
1012
1013void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1014{
1015 struct fc_frame *fp = buf->os_buf;
1016 struct fnic *fnic = vnic_dev_priv(rq->vdev);
1017
1018 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1019 DMA_FROM_DEVICE);
1020
1021 dev_kfree_skb(fp_skb(fp));
1022 buf->os_buf = NULL;
1023}
1024
1025
1026
1027
1028
1029
1030void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1031{
1032 struct fnic *fnic = fnic_from_ctlr(fip);
1033 struct vnic_wq *wq = &fnic->wq[0];
1034 dma_addr_t pa;
1035 struct ethhdr *eth_hdr;
1036 struct vlan_ethhdr *vlan_hdr;
1037 unsigned long flags;
1038
1039 if (!fnic->vlan_hw_insert) {
1040 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1041 vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1042 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1043 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1044 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1045 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1046 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1047 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1048 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1049 }
1050 } else {
1051 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1052 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1053 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1054 }
1055 }
1056
1057 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1058 DMA_TO_DEVICE);
1059 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1060 printk(KERN_ERR "DMA mapping failed\n");
1061 goto free_skb;
1062 }
1063
1064 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1065 if (!vnic_wq_desc_avail(wq))
1066 goto irq_restore;
1067
1068 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1069 0 ,
1070 fnic->vlan_id, 1);
1071 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1072 return;
1073
1074irq_restore:
1075 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1076 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1077free_skb:
1078 kfree_skb(skb);
1079}
1080
1081
1082
1083
1084static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1085{
1086 struct vnic_wq *wq = &fnic->wq[0];
1087 struct sk_buff *skb;
1088 dma_addr_t pa;
1089 struct ethhdr *eth_hdr;
1090 struct vlan_ethhdr *vlan_hdr;
1091 struct fcoe_hdr *fcoe_hdr;
1092 struct fc_frame_header *fh;
1093 u32 tot_len, eth_hdr_len;
1094 int ret = 0;
1095 unsigned long flags;
1096
1097 fh = fc_frame_header_get(fp);
1098 skb = fp_skb(fp);
1099
1100 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1101 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1102 return 0;
1103
1104 if (!fnic->vlan_hw_insert) {
1105 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1106 vlan_hdr = skb_push(skb, eth_hdr_len);
1107 eth_hdr = (struct ethhdr *)vlan_hdr;
1108 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1109 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1110 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1111 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1112 } else {
1113 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1114 eth_hdr = skb_push(skb, eth_hdr_len);
1115 eth_hdr->h_proto = htons(ETH_P_FCOE);
1116 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1117 }
1118
1119 if (fnic->ctlr.map_dest)
1120 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1121 else
1122 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1123 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1124
1125 tot_len = skb->len;
1126 BUG_ON(tot_len % 4);
1127
1128 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1129 fcoe_hdr->fcoe_sof = fr_sof(fp);
1130 if (FC_FCOE_VER)
1131 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1132
1133 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1134 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1135 ret = -ENOMEM;
1136 printk(KERN_ERR "DMA map failed with error %d\n", ret);
1137 goto free_skb_on_err;
1138 }
1139
1140 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1141 (char *)eth_hdr, tot_len)) != 0) {
1142 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1143 }
1144
1145 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1146
1147 if (!vnic_wq_desc_avail(wq)) {
1148 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1149 ret = -1;
1150 goto irq_restore;
1151 }
1152
1153 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1154 0 ,
1155 fnic->vlan_id, 1, 1, 1);
1156
1157irq_restore:
1158 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1159
1160free_skb_on_err:
1161 if (ret)
1162 dev_kfree_skb_any(fp_skb(fp));
1163
1164 return ret;
1165}
1166
1167
1168
1169
1170
1171int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1172{
1173 struct fnic *fnic = lport_priv(lp);
1174 unsigned long flags;
1175
1176 if (fnic->in_remove) {
1177 dev_kfree_skb(fp_skb(fp));
1178 return -1;
1179 }
1180
1181
1182
1183
1184
1185 spin_lock_irqsave(&fnic->fnic_lock, flags);
1186 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1187 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1188 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1189 return 0;
1190 }
1191 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1192
1193 return fnic_send_frame(fnic, fp);
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206void fnic_flush_tx(struct fnic *fnic)
1207{
1208 struct sk_buff *skb;
1209 struct fc_frame *fp;
1210
1211 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1212 fp = (struct fc_frame *)skb;
1213 fnic_send_frame(fnic, fp);
1214 }
1215}
1216
1217
1218
1219
1220
1221
1222
1223static void fnic_set_eth_mode(struct fnic *fnic)
1224{
1225 unsigned long flags;
1226 enum fnic_state old_state;
1227 int ret;
1228
1229 spin_lock_irqsave(&fnic->fnic_lock, flags);
1230again:
1231 old_state = fnic->state;
1232 switch (old_state) {
1233 case FNIC_IN_FC_MODE:
1234 case FNIC_IN_ETH_TRANS_FC_MODE:
1235 default:
1236 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1237 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1238
1239 ret = fnic_fw_reset_handler(fnic);
1240
1241 spin_lock_irqsave(&fnic->fnic_lock, flags);
1242 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1243 goto again;
1244 if (ret)
1245 fnic->state = old_state;
1246 break;
1247
1248 case FNIC_IN_FC_TRANS_ETH_MODE:
1249 case FNIC_IN_ETH_MODE:
1250 break;
1251 }
1252 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1253}
1254
1255static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1256 struct cq_desc *cq_desc,
1257 struct vnic_wq_buf *buf, void *opaque)
1258{
1259 struct sk_buff *skb = buf->os_buf;
1260 struct fc_frame *fp = (struct fc_frame *)skb;
1261 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1262
1263 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1264 DMA_TO_DEVICE);
1265 dev_kfree_skb_irq(fp_skb(fp));
1266 buf->os_buf = NULL;
1267}
1268
1269static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1270 struct cq_desc *cq_desc, u8 type,
1271 u16 q_number, u16 completed_index,
1272 void *opaque)
1273{
1274 struct fnic *fnic = vnic_dev_priv(vdev);
1275 unsigned long flags;
1276
1277 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1278 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1279 fnic_wq_complete_frame_send, NULL);
1280 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1281
1282 return 0;
1283}
1284
1285int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1286{
1287 unsigned int wq_work_done = 0;
1288 unsigned int i;
1289
1290 for (i = 0; i < fnic->raw_wq_count; i++) {
1291 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1292 work_to_do,
1293 fnic_wq_cmpl_handler_cont,
1294 NULL);
1295 }
1296
1297 return wq_work_done;
1298}
1299
1300
1301void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1302{
1303 struct fc_frame *fp = buf->os_buf;
1304 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1305
1306 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1307 DMA_TO_DEVICE);
1308
1309 dev_kfree_skb(fp_skb(fp));
1310 buf->os_buf = NULL;
1311}
1312
1313void fnic_fcoe_reset_vlans(struct fnic *fnic)
1314{
1315 unsigned long flags;
1316 struct fcoe_vlan *vlan;
1317 struct fcoe_vlan *next;
1318
1319
1320
1321
1322
1323
1324 spin_lock_irqsave(&fnic->vlans_lock, flags);
1325 if (!list_empty(&fnic->vlans)) {
1326 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1327 list_del(&vlan->list);
1328 kfree(vlan);
1329 }
1330 }
1331 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1332}
1333
1334void fnic_handle_fip_timer(struct fnic *fnic)
1335{
1336 unsigned long flags;
1337 struct fcoe_vlan *vlan;
1338 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1339 u64 sol_time;
1340
1341 spin_lock_irqsave(&fnic->fnic_lock, flags);
1342 if (fnic->stop_rx_link_events) {
1343 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1344 return;
1345 }
1346 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1347
1348 if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1349 return;
1350
1351 spin_lock_irqsave(&fnic->vlans_lock, flags);
1352 if (list_empty(&fnic->vlans)) {
1353 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1354
1355 if (printk_ratelimit())
1356 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1357 "Start VLAN Discovery\n");
1358 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1359 return;
1360 }
1361
1362 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1363 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1364 "fip_timer: vlan %d state %d sol_count %d\n",
1365 vlan->vid, vlan->state, vlan->sol_count);
1366 switch (vlan->state) {
1367 case FIP_VLAN_USED:
1368 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1369 "FIP VLAN is selected for FC transaction\n");
1370 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1371 break;
1372 case FIP_VLAN_FAILED:
1373 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1374
1375 if (printk_ratelimit())
1376 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1377 "Start VLAN Discovery\n");
1378 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1379 break;
1380 case FIP_VLAN_SENT:
1381 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1382
1383
1384
1385
1386 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1387 "Dequeue this VLAN ID %d from list\n",
1388 vlan->vid);
1389 list_del(&vlan->list);
1390 kfree(vlan);
1391 vlan = NULL;
1392 if (list_empty(&fnic->vlans)) {
1393
1394 spin_unlock_irqrestore(&fnic->vlans_lock,
1395 flags);
1396 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1397 "fip_timer: vlan list empty, "
1398 "trigger vlan disc\n");
1399 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1400 return;
1401 }
1402
1403 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1404 list);
1405 fnic->set_vlan(fnic, vlan->vid);
1406 vlan->state = FIP_VLAN_SENT;
1407 }
1408 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1409 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1410 vlan->sol_count++;
1411 sol_time = jiffies + msecs_to_jiffies
1412 (FCOE_CTLR_START_DELAY);
1413 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1414 break;
1415 }
1416}
1417