1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/etherdevice.h>
15
16#include "vxge-traffic.h"
17#include "vxge-config.h"
18#include "vxge-main.h"
19
20
21
22
23
24
25
26
27
28
29enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30{
31 u64 val64;
32
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 val64 = readq(&vp_reg->vpath_general_int_status);
86
87
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141exit:
142 return status;
143
144}
145
146
147
148
149
150
151
152
153
154
155enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157{
158 u64 val64;
159
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 if (vp == NULL) {
164 status = VXGE_HW_ERR_INVALID_HANDLE;
165 goto exit;
166 }
167
168 vpath = vp->vpath;
169
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172 goto exit;
173 }
174 vp_reg = vpath->vp_reg;
175
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
179
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
186
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
189
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
195
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
198
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
201
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
204
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
207
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
210
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
213
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
216
217exit:
218 return status;
219}
220
221
222
223
224
225
226
227
228
229
230void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{
232
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239}
240
241
242
243
244
245
246
247
248
249
250void
251vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{
253
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260}
261
262
263
264
265
266
267
268u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269{
270
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
276
277 hldev->config.intr_mode = intr_mode;
278 return intr_mode;
279}
280
281
282
283
284
285
286
287
288
289
290
291
292void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
293{
294 u32 i;
295 u64 val64;
296 u32 val32;
297
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
301 continue;
302
303 vxge_hw_vpath_intr_enable(
304 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
305 }
306
307 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
308 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
309 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
310
311 if (val64 != 0) {
312 writeq(val64, &hldev->common_reg->tim_int_status0);
313
314 writeq(~val64, &hldev->common_reg->tim_int_mask0);
315 }
316
317 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
318 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
319
320 if (val32 != 0) {
321 __vxge_hw_pio_mem_write32_upper(val32,
322 &hldev->common_reg->tim_int_status1);
323
324 __vxge_hw_pio_mem_write32_upper(~val32,
325 &hldev->common_reg->tim_int_mask1);
326 }
327 }
328
329 val64 = readq(&hldev->common_reg->titan_general_int_status);
330
331 vxge_hw_device_unmask_all(hldev);
332
333 return;
334}
335
336
337
338
339
340
341
342
343
344
345
346void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
347{
348 u32 i;
349
350 vxge_hw_device_mask_all(hldev);
351
352
353 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
354 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
355 &hldev->common_reg->tim_int_mask1);
356
357 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
358
359 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
360 continue;
361
362 vxge_hw_vpath_intr_disable(
363 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
364 }
365
366 return;
367}
368
369
370
371
372
373
374
375
376
377void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
378{
379 u64 val64;
380
381 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
382 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
383
384 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
385 &hldev->common_reg->titan_mask_all_int);
386
387 return;
388}
389
390
391
392
393
394
395
396
397
398void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
399{
400 u64 val64 = 0;
401
402 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
403 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
404
405 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
406 &hldev->common_reg->titan_mask_all_int);
407
408 return;
409}
410
411
412
413
414
415
416
417
418
419void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
420{
421 u32 val32;
422
423 val32 = readl(&hldev->common_reg->titan_general_int_status);
424}
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
446 u32 skip_alarms, u64 *reason)
447{
448 u32 i;
449 u64 val64;
450 u64 adapter_status;
451 u64 vpath_mask;
452 enum vxge_hw_status ret = VXGE_HW_OK;
453
454 val64 = readq(&hldev->common_reg->titan_general_int_status);
455
456 if (unlikely(!val64)) {
457
458 *reason = 0;
459 ret = VXGE_HW_ERR_WRONG_IRQ;
460 goto exit;
461 }
462
463 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
464
465 adapter_status = readq(&hldev->common_reg->adapter_status);
466
467 if (adapter_status == VXGE_HW_ALL_FOXES) {
468
469 __vxge_hw_device_handle_error(hldev,
470 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
471 *reason = 0;
472 ret = VXGE_HW_ERR_SLOT_FREEZE;
473 goto exit;
474 }
475 }
476
477 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
478
479 *reason = val64;
480
481 vpath_mask = hldev->vpaths_deployed >>
482 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
483
484 if (val64 &
485 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
486 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
487
488 return VXGE_HW_OK;
489 }
490
491 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
492
493 if (unlikely(val64 &
494 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
495
496 enum vxge_hw_status error_level = VXGE_HW_OK;
497
498 hldev->stats.sw_dev_err_stats.vpath_alarms++;
499
500 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
501
502 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
503 continue;
504
505 ret = __vxge_hw_vpath_alarm_process(
506 &hldev->virtual_paths[i], skip_alarms);
507
508 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
509
510 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
511 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
512 break;
513 }
514
515 ret = error_level;
516 }
517exit:
518 return ret;
519}
520
521
522
523
524
525
526
527
528enum vxge_hw_status
529__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
530{
531
532
533
534 if (hldev->link_state == VXGE_HW_LINK_UP)
535 goto exit;
536
537 hldev->link_state = VXGE_HW_LINK_UP;
538
539
540 if (hldev->uld_callbacks.link_up)
541 hldev->uld_callbacks.link_up(hldev);
542exit:
543 return VXGE_HW_OK;
544}
545
546
547
548
549
550
551
552
553enum vxge_hw_status
554__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555{
556
557
558
559 if (hldev->link_state == VXGE_HW_LINK_DOWN)
560 goto exit;
561
562 hldev->link_state = VXGE_HW_LINK_DOWN;
563
564
565 if (hldev->uld_callbacks.link_down)
566 hldev->uld_callbacks.link_down(hldev);
567exit:
568 return VXGE_HW_OK;
569}
570
571
572
573
574
575
576
577
578
579enum vxge_hw_status
580__vxge_hw_device_handle_error(
581 struct __vxge_hw_device *hldev,
582 u32 vp_id,
583 enum vxge_hw_event type)
584{
585 switch (type) {
586 case VXGE_HW_EVENT_UNKNOWN:
587 break;
588 case VXGE_HW_EVENT_RESET_START:
589 case VXGE_HW_EVENT_RESET_COMPLETE:
590 case VXGE_HW_EVENT_LINK_DOWN:
591 case VXGE_HW_EVENT_LINK_UP:
592 goto out;
593 case VXGE_HW_EVENT_ALARM_CLEARED:
594 goto out;
595 case VXGE_HW_EVENT_ECCERR:
596 case VXGE_HW_EVENT_MRPCIM_ECCERR:
597 goto out;
598 case VXGE_HW_EVENT_FIFO_ERR:
599 case VXGE_HW_EVENT_VPATH_ERR:
600 case VXGE_HW_EVENT_CRITICAL_ERR:
601 case VXGE_HW_EVENT_SERR:
602 break;
603 case VXGE_HW_EVENT_SRPCIM_SERR:
604 case VXGE_HW_EVENT_MRPCIM_SERR:
605 goto out;
606 case VXGE_HW_EVENT_SLOT_FREEZE:
607 break;
608 default:
609 vxge_assert(0);
610 goto out;
611 }
612
613
614 if (hldev->uld_callbacks.crit_err)
615 hldev->uld_callbacks.crit_err(
616 (struct __vxge_hw_device *)hldev,
617 type, vp_id);
618out:
619
620 return VXGE_HW_OK;
621}
622
623
624
625
626
627
628
629
630
631
632
633void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
634{
635
636 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
637 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
638 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
639 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
640 &hldev->common_reg->tim_int_status0);
641 }
642
643 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
644 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
645 __vxge_hw_pio_mem_write32_upper(
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
647 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
648 &hldev->common_reg->tim_int_status1);
649 }
650
651 return;
652}
653
654
655
656
657
658
659
660
661
662
663enum vxge_hw_status
664vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
665{
666 void **tmp_arr;
667
668 if (channel->reserve_ptr - channel->reserve_top > 0) {
669_alloc_after_swap:
670 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
671
672 return VXGE_HW_OK;
673 }
674
675
676
677
678
679
680
681 if (channel->length - channel->free_ptr > 0) {
682
683 tmp_arr = channel->reserve_arr;
684 channel->reserve_arr = channel->free_arr;
685 channel->free_arr = tmp_arr;
686 channel->reserve_ptr = channel->length;
687 channel->reserve_top = channel->free_ptr;
688 channel->free_ptr = channel->length;
689
690 channel->stats->reserve_free_swaps_cnt++;
691
692 goto _alloc_after_swap;
693 }
694
695 channel->stats->full_cnt++;
696
697 *dtrh = NULL;
698 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
699}
700
701
702
703
704
705
706
707
708
709void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
710{
711 vxge_assert(channel->work_arr[channel->post_index] == NULL);
712
713 channel->work_arr[channel->post_index++] = dtrh;
714
715
716 if (channel->post_index == channel->length)
717 channel->post_index = 0;
718}
719
720
721
722
723
724
725
726
727
728void
729vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
730{
731 vxge_assert(channel->compl_index < channel->length);
732
733 *dtrh = channel->work_arr[channel->compl_index];
734 prefetch(*dtrh);
735}
736
737
738
739
740
741
742
743
744void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
745{
746 channel->work_arr[channel->compl_index] = NULL;
747
748
749 if (++channel->compl_index == channel->length)
750 channel->compl_index = 0;
751
752 channel->stats->total_compl_cnt++;
753}
754
755
756
757
758
759
760
761
762
763void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
764{
765 channel->free_arr[--channel->free_ptr] = dtrh;
766}
767
768
769
770
771
772
773
774
775int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
776{
777 return (channel->reserve_ptr - channel->reserve_top) +
778 (channel->length - channel->free_ptr);
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
796 void **rxdh)
797{
798 enum vxge_hw_status status;
799 struct __vxge_hw_channel *channel;
800
801 channel = &ring->channel;
802
803 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
804
805 if (status == VXGE_HW_OK) {
806 struct vxge_hw_ring_rxd_1 *rxdp =
807 (struct vxge_hw_ring_rxd_1 *)*rxdh;
808
809 rxdp->control_0 = rxdp->control_1 = 0;
810 }
811
812 return status;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
840{
841 struct __vxge_hw_channel *channel;
842
843 channel = &ring->channel;
844
845 vxge_hw_channel_dtr_free(channel, rxdh);
846
847}
848
849
850
851
852
853
854
855
856void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
857{
858 struct __vxge_hw_channel *channel;
859
860 channel = &ring->channel;
861
862 vxge_hw_channel_dtr_post(channel, rxdh);
863}
864
865
866
867
868
869
870
871
872void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
873{
874 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
875 struct __vxge_hw_channel *channel;
876
877 channel = &ring->channel;
878
879 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
880
881 if (ring->stats->common_stats.usage_cnt > 0)
882 ring->stats->common_stats.usage_cnt--;
883}
884
885
886
887
888
889
890
891
892
893
894
895void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
896{
897 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
898 struct __vxge_hw_channel *channel;
899
900 channel = &ring->channel;
901
902 wmb();
903 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
904
905 vxge_hw_channel_dtr_post(channel, rxdh);
906
907 if (ring->stats->common_stats.usage_cnt > 0)
908 ring->stats->common_stats.usage_cnt--;
909}
910
911
912
913
914
915
916
917
918void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
919{
920 struct __vxge_hw_channel *channel;
921
922 channel = &ring->channel;
923
924 wmb();
925 vxge_hw_ring_rxd_post_post(ring, rxdh);
926}
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
962 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
963{
964 struct __vxge_hw_channel *channel;
965 struct vxge_hw_ring_rxd_1 *rxdp;
966 enum vxge_hw_status status = VXGE_HW_OK;
967
968 channel = &ring->channel;
969
970 vxge_hw_channel_dtr_try_complete(channel, rxdh);
971
972 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
973 if (rxdp == NULL) {
974 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
975 goto exit;
976 }
977
978
979 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
980
981 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
982 0);
983
984 ++ring->cmpl_cnt;
985 vxge_hw_channel_dtr_complete(channel);
986
987 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
988
989 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
990
991 ring->stats->common_stats.usage_cnt++;
992 if (ring->stats->common_stats.usage_max <
993 ring->stats->common_stats.usage_cnt)
994 ring->stats->common_stats.usage_max =
995 ring->stats->common_stats.usage_cnt;
996
997 status = VXGE_HW_OK;
998 goto exit;
999 }
1000
1001
1002
1003 *rxdh = NULL;
1004 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1005exit:
1006 return status;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023enum vxge_hw_status vxge_hw_ring_handle_tcode(
1024 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1025{
1026 struct __vxge_hw_channel *channel;
1027 enum vxge_hw_status status = VXGE_HW_OK;
1028
1029 channel = &ring->channel;
1030
1031
1032
1033
1034
1035
1036 if (t_code == 0 || t_code == 5) {
1037 status = VXGE_HW_OK;
1038 goto exit;
1039 }
1040
1041 if (t_code > 0xF) {
1042 status = VXGE_HW_ERR_INVALID_TCODE;
1043 goto exit;
1044 }
1045
1046 ring->stats->rxd_t_code_err_cnt[t_code]++;
1047exit:
1048 return status;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1063 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1064{
1065 struct __vxge_hw_channel *channel;
1066
1067 channel = &fifo->channel;
1068
1069 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1070 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1071 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1072 &fifo->nofl_db->control_0);
1073
1074 mmiowb();
1075
1076 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1077
1078 mmiowb();
1079}
1080
1081
1082
1083
1084
1085
1086u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1087{
1088 return vxge_hw_channel_dtr_count(&fifoh->channel);
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1112 struct __vxge_hw_fifo *fifo,
1113 void **txdlh, void **txdl_priv)
1114{
1115 struct __vxge_hw_channel *channel;
1116 enum vxge_hw_status status;
1117 int i;
1118
1119 channel = &fifo->channel;
1120
1121 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1122
1123 if (status == VXGE_HW_OK) {
1124 struct vxge_hw_fifo_txd *txdp =
1125 (struct vxge_hw_fifo_txd *)*txdlh;
1126 struct __vxge_hw_fifo_txdl_priv *priv;
1127
1128 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1129
1130
1131 priv->align_dma_offset = 0;
1132 priv->align_vaddr_start = priv->align_vaddr;
1133 priv->align_used_frags = 0;
1134 priv->frags = 0;
1135 priv->alloc_frags = fifo->config->max_frags;
1136 priv->next_txdl_priv = NULL;
1137
1138 *txdl_priv = (void *)(size_t)txdp->host_control;
1139
1140 for (i = 0; i < fifo->config->max_frags; i++) {
1141 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1142 txdp->control_0 = txdp->control_1 = 0;
1143 }
1144 }
1145
1146 return status;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1167 void *txdlh, u32 frag_idx,
1168 dma_addr_t dma_pointer, u32 size)
1169{
1170 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1171 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1172 struct __vxge_hw_channel *channel;
1173
1174 channel = &fifo->channel;
1175
1176 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1177 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1178
1179 if (frag_idx != 0)
1180 txdp->control_0 = txdp->control_1 = 0;
1181 else {
1182 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1183 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1184 txdp->control_1 |= fifo->interrupt_type;
1185 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1186 fifo->tx_intr_num);
1187 if (txdl_priv->frags) {
1188 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1189 (txdl_priv->frags - 1);
1190 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1191 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1192 }
1193 }
1194
1195 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1196
1197 txdp->buffer_pointer = (u64)dma_pointer;
1198 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1199 fifo->stats->total_buffers++;
1200 txdl_priv->frags++;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1216{
1217 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1218 struct vxge_hw_fifo_txd *txdp_last;
1219 struct vxge_hw_fifo_txd *txdp_first;
1220 struct __vxge_hw_channel *channel;
1221
1222 channel = &fifo->channel;
1223
1224 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1225 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1226
1227 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1228 txdp_last->control_0 |=
1229 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1230 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1231
1232 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1233
1234 __vxge_hw_non_offload_db_post(fifo,
1235 (u64)(size_t)txdl_priv->dma_addr,
1236 txdl_priv->frags - 1,
1237 fifo->no_snoop_bits);
1238
1239 fifo->stats->total_posts++;
1240 fifo->stats->common_stats.usage_cnt++;
1241 if (fifo->stats->common_stats.usage_max <
1242 fifo->stats->common_stats.usage_cnt)
1243 fifo->stats->common_stats.usage_max =
1244 fifo->stats->common_stats.usage_cnt;
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1280 struct __vxge_hw_fifo *fifo, void **txdlh,
1281 enum vxge_hw_fifo_tcode *t_code)
1282{
1283 struct __vxge_hw_channel *channel;
1284 struct vxge_hw_fifo_txd *txdp;
1285 enum vxge_hw_status status = VXGE_HW_OK;
1286
1287 channel = &fifo->channel;
1288
1289 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1290
1291 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1292 if (txdp == NULL) {
1293 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1294 goto exit;
1295 }
1296
1297
1298 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1299
1300 vxge_assert(txdp->host_control != 0);
1301
1302 vxge_hw_channel_dtr_complete(channel);
1303
1304 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1305
1306 if (fifo->stats->common_stats.usage_cnt > 0)
1307 fifo->stats->common_stats.usage_cnt--;
1308
1309 status = VXGE_HW_OK;
1310 goto exit;
1311 }
1312
1313
1314 *txdlh = NULL;
1315 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1316exit:
1317 return status;
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1335 void *txdlh,
1336 enum vxge_hw_fifo_tcode t_code)
1337{
1338 struct __vxge_hw_channel *channel;
1339
1340 enum vxge_hw_status status = VXGE_HW_OK;
1341 channel = &fifo->channel;
1342
1343 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1344 status = VXGE_HW_ERR_INVALID_TCODE;
1345 goto exit;
1346 }
1347
1348 fifo->stats->txd_t_code_err_cnt[t_code]++;
1349exit:
1350 return status;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1378{
1379 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1380 u32 max_frags;
1381 struct __vxge_hw_channel *channel;
1382
1383 channel = &fifo->channel;
1384
1385 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1386 (struct vxge_hw_fifo_txd *)txdlh);
1387
1388 max_frags = fifo->config->max_frags;
1389
1390 vxge_hw_channel_dtr_free(channel, txdlh);
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408enum vxge_hw_status
1409vxge_hw_vpath_mac_addr_add(
1410 struct __vxge_hw_vpath_handle *vp,
1411 u8 (macaddr)[ETH_ALEN],
1412 u8 (macaddr_mask)[ETH_ALEN],
1413 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1414{
1415 u32 i;
1416 u64 data1 = 0ULL;
1417 u64 data2 = 0ULL;
1418 enum vxge_hw_status status = VXGE_HW_OK;
1419
1420 if (vp == NULL) {
1421 status = VXGE_HW_ERR_INVALID_HANDLE;
1422 goto exit;
1423 }
1424
1425 for (i = 0; i < ETH_ALEN; i++) {
1426 data1 <<= 8;
1427 data1 |= (u8)macaddr[i];
1428
1429 data2 <<= 8;
1430 data2 |= (u8)macaddr_mask[i];
1431 }
1432
1433 switch (duplicate_mode) {
1434 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1435 i = 0;
1436 break;
1437 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1438 i = 1;
1439 break;
1440 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1441 i = 2;
1442 break;
1443 default:
1444 i = 0;
1445 break;
1446 }
1447
1448 status = __vxge_hw_vpath_rts_table_set(vp,
1449 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1450 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1451 0,
1452 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1453 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1454 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1455exit:
1456 return status;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471enum vxge_hw_status
1472vxge_hw_vpath_mac_addr_get(
1473 struct __vxge_hw_vpath_handle *vp,
1474 u8 (macaddr)[ETH_ALEN],
1475 u8 (macaddr_mask)[ETH_ALEN])
1476{
1477 u32 i;
1478 u64 data1 = 0ULL;
1479 u64 data2 = 0ULL;
1480 enum vxge_hw_status status = VXGE_HW_OK;
1481
1482 if (vp == NULL) {
1483 status = VXGE_HW_ERR_INVALID_HANDLE;
1484 goto exit;
1485 }
1486
1487 status = __vxge_hw_vpath_rts_table_get(vp,
1488 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1489 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1490 0, &data1, &data2);
1491
1492 if (status != VXGE_HW_OK)
1493 goto exit;
1494
1495 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1496
1497 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1498
1499 for (i = ETH_ALEN; i > 0; i--) {
1500 macaddr[i-1] = (u8)(data1 & 0xFF);
1501 data1 >>= 8;
1502
1503 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1504 data2 >>= 8;
1505 }
1506exit:
1507 return status;
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523enum vxge_hw_status
1524vxge_hw_vpath_mac_addr_get_next(
1525 struct __vxge_hw_vpath_handle *vp,
1526 u8 (macaddr)[ETH_ALEN],
1527 u8 (macaddr_mask)[ETH_ALEN])
1528{
1529 u32 i;
1530 u64 data1 = 0ULL;
1531 u64 data2 = 0ULL;
1532 enum vxge_hw_status status = VXGE_HW_OK;
1533
1534 if (vp == NULL) {
1535 status = VXGE_HW_ERR_INVALID_HANDLE;
1536 goto exit;
1537 }
1538
1539 status = __vxge_hw_vpath_rts_table_get(vp,
1540 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1541 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1542 0, &data1, &data2);
1543
1544 if (status != VXGE_HW_OK)
1545 goto exit;
1546
1547 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1548
1549 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1550
1551 for (i = ETH_ALEN; i > 0; i--) {
1552 macaddr[i-1] = (u8)(data1 & 0xFF);
1553 data1 >>= 8;
1554
1555 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1556 data2 >>= 8;
1557 }
1558
1559exit:
1560 return status;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576enum vxge_hw_status
1577vxge_hw_vpath_mac_addr_delete(
1578 struct __vxge_hw_vpath_handle *vp,
1579 u8 (macaddr)[ETH_ALEN],
1580 u8 (macaddr_mask)[ETH_ALEN])
1581{
1582 u32 i;
1583 u64 data1 = 0ULL;
1584 u64 data2 = 0ULL;
1585 enum vxge_hw_status status = VXGE_HW_OK;
1586
1587 if (vp == NULL) {
1588 status = VXGE_HW_ERR_INVALID_HANDLE;
1589 goto exit;
1590 }
1591
1592 for (i = 0; i < ETH_ALEN; i++) {
1593 data1 <<= 8;
1594 data1 |= (u8)macaddr[i];
1595
1596 data2 <<= 8;
1597 data2 |= (u8)macaddr_mask[i];
1598 }
1599
1600 status = __vxge_hw_vpath_rts_table_set(vp,
1601 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1602 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1603 0,
1604 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1605 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1606exit:
1607 return status;
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621enum vxge_hw_status
1622vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1623{
1624 enum vxge_hw_status status = VXGE_HW_OK;
1625
1626 if (vp == NULL) {
1627 status = VXGE_HW_ERR_INVALID_HANDLE;
1628 goto exit;
1629 }
1630
1631 status = __vxge_hw_vpath_rts_table_set(vp,
1632 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1633 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1634 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1635exit:
1636 return status;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649enum vxge_hw_status
1650vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1651{
1652 u64 data;
1653 enum vxge_hw_status status = VXGE_HW_OK;
1654
1655 if (vp == NULL) {
1656 status = VXGE_HW_ERR_INVALID_HANDLE;
1657 goto exit;
1658 }
1659
1660 status = __vxge_hw_vpath_rts_table_get(vp,
1661 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1662 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1663 0, vid, &data);
1664
1665 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1666exit:
1667 return status;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680enum vxge_hw_status
1681vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1682{
1683 u64 data;
1684 enum vxge_hw_status status = VXGE_HW_OK;
1685
1686 if (vp == NULL) {
1687 status = VXGE_HW_ERR_INVALID_HANDLE;
1688 goto exit;
1689 }
1690
1691 status = __vxge_hw_vpath_rts_table_get(vp,
1692 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1693 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1694 0, vid, &data);
1695
1696 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1697exit:
1698 return status;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712enum vxge_hw_status
1713vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1714{
1715 enum vxge_hw_status status = VXGE_HW_OK;
1716
1717 if (vp == NULL) {
1718 status = VXGE_HW_ERR_INVALID_HANDLE;
1719 goto exit;
1720 }
1721
1722 status = __vxge_hw_vpath_rts_table_set(vp,
1723 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1724 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1725 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1726exit:
1727 return status;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1739 struct __vxge_hw_vpath_handle *vp)
1740{
1741 u64 val64;
1742 struct __vxge_hw_virtualpath *vpath;
1743 enum vxge_hw_status status = VXGE_HW_OK;
1744
1745 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1746 status = VXGE_HW_ERR_INVALID_HANDLE;
1747 goto exit;
1748 }
1749
1750 vpath = vp->vpath;
1751
1752
1753 if (!(vpath->hldev->access_rights &
1754 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1755 return VXGE_HW_OK;
1756
1757 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1758
1759 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1760
1761 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1762 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1763 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1764 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1765
1766 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1767 }
1768exit:
1769 return status;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1781 struct __vxge_hw_vpath_handle *vp)
1782{
1783 u64 val64;
1784 struct __vxge_hw_virtualpath *vpath;
1785 enum vxge_hw_status status = VXGE_HW_OK;
1786
1787 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1788 status = VXGE_HW_ERR_INVALID_HANDLE;
1789 goto exit;
1790 }
1791
1792 vpath = vp->vpath;
1793
1794 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1795
1796 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1797
1798 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1799 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1800 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1801
1802 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1803 }
1804exit:
1805 return status;
1806}
1807
1808
1809
1810
1811
1812
1813
1814enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1815 struct __vxge_hw_vpath_handle *vp)
1816{
1817 u64 val64;
1818 struct __vxge_hw_virtualpath *vpath;
1819 enum vxge_hw_status status = VXGE_HW_OK;
1820
1821 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1822 status = VXGE_HW_ERR_INVALID_HANDLE;
1823 goto exit;
1824 }
1825
1826 vpath = vp->vpath;
1827
1828 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1829
1830 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1831 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1832 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1833 }
1834exit:
1835 return status;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1847 struct __vxge_hw_vpath_handle *vp)
1848{
1849 u64 val64;
1850 struct __vxge_hw_virtualpath *vpath;
1851 enum vxge_hw_status status = VXGE_HW_OK;
1852
1853 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1854 status = VXGE_HW_ERR_INVALID_HANDLE;
1855 goto exit;
1856 }
1857
1858 vpath = vp->vpath;
1859
1860 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1861
1862 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1863 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1864 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1865 }
1866exit:
1867 return status;
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879enum vxge_hw_status
1880vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1881{
1882 u64 val64;
1883 struct __vxge_hw_virtualpath *vpath;
1884 enum vxge_hw_status status = VXGE_HW_OK;
1885
1886 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1887 status = VXGE_HW_ERR_INVALID_HANDLE;
1888 goto exit;
1889 }
1890
1891 vpath = vp->vpath;
1892
1893 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1894
1895 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1896 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1897 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1898 }
1899exit:
1900 return status;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1912 struct __vxge_hw_virtualpath *vpath,
1913 u32 skip_alarms)
1914{
1915 u64 val64;
1916 u64 alarm_status;
1917 u64 pic_status;
1918 struct __vxge_hw_device *hldev = NULL;
1919 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1920 u64 mask64;
1921 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1922 struct vxge_hw_vpath_reg __iomem *vp_reg;
1923
1924 if (vpath == NULL) {
1925 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1926 alarm_event);
1927 goto out2;
1928 }
1929
1930 hldev = vpath->hldev;
1931 vp_reg = vpath->vp_reg;
1932 alarm_status = readq(&vp_reg->vpath_general_int_status);
1933
1934 if (alarm_status == VXGE_HW_ALL_FOXES) {
1935 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1936 alarm_event);
1937 goto out;
1938 }
1939
1940 sw_stats = vpath->sw_stats;
1941
1942 if (alarm_status & ~(
1943 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1944 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1945 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1946 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1947 sw_stats->error_stats.unknown_alarms++;
1948
1949 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1950 alarm_event);
1951 goto out;
1952 }
1953
1954 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1955
1956 val64 = readq(&vp_reg->xgmac_vp_int_status);
1957
1958 if (val64 &
1959 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1960
1961 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1962
1963 if (((val64 &
1964 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1965 (!(val64 &
1966 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1967 ((val64 &
1968 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1969 && (!(val64 &
1970 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1971 ))) {
1972 sw_stats->error_stats.network_sustained_fault++;
1973
1974 writeq(
1975 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1976 &vp_reg->asic_ntwk_vp_err_mask);
1977
1978 __vxge_hw_device_handle_link_down_ind(hldev);
1979 alarm_event = VXGE_HW_SET_LEVEL(
1980 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1981 }
1982
1983 if (((val64 &
1984 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1985 (!(val64 &
1986 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1987 ((val64 &
1988 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1989 && (!(val64 &
1990 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1991 ))) {
1992
1993 sw_stats->error_stats.network_sustained_ok++;
1994
1995 writeq(
1996 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1997 &vp_reg->asic_ntwk_vp_err_mask);
1998
1999 __vxge_hw_device_handle_link_up_ind(hldev);
2000 alarm_event = VXGE_HW_SET_LEVEL(
2001 VXGE_HW_EVENT_LINK_UP, alarm_event);
2002 }
2003
2004 writeq(VXGE_HW_INTR_MASK_ALL,
2005 &vp_reg->asic_ntwk_vp_err_reg);
2006
2007 alarm_event = VXGE_HW_SET_LEVEL(
2008 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2009
2010 if (skip_alarms)
2011 return VXGE_HW_OK;
2012 }
2013 }
2014
2015 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2016
2017 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2018
2019 if (pic_status &
2020 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2021
2022 val64 = readq(&vp_reg->general_errors_reg);
2023 mask64 = readq(&vp_reg->general_errors_mask);
2024
2025 if ((val64 &
2026 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2027 ~mask64) {
2028 sw_stats->error_stats.ini_serr_det++;
2029
2030 alarm_event = VXGE_HW_SET_LEVEL(
2031 VXGE_HW_EVENT_SERR, alarm_event);
2032 }
2033
2034 if ((val64 &
2035 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2036 ~mask64) {
2037 sw_stats->error_stats.dblgen_fifo0_overflow++;
2038
2039 alarm_event = VXGE_HW_SET_LEVEL(
2040 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2041 }
2042
2043 if ((val64 &
2044 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2045 ~mask64)
2046 sw_stats->error_stats.statsb_pif_chain_error++;
2047
2048 if ((val64 &
2049 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2050 ~mask64)
2051 sw_stats->error_stats.statsb_drop_timeout++;
2052
2053 if ((val64 &
2054 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2055 ~mask64)
2056 sw_stats->error_stats.target_illegal_access++;
2057
2058 if (!skip_alarms) {
2059 writeq(VXGE_HW_INTR_MASK_ALL,
2060 &vp_reg->general_errors_reg);
2061 alarm_event = VXGE_HW_SET_LEVEL(
2062 VXGE_HW_EVENT_ALARM_CLEARED,
2063 alarm_event);
2064 }
2065 }
2066
2067 if (pic_status &
2068 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2069
2070 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2071 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2072
2073 if ((val64 &
2074 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2075 ~mask64) {
2076 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2077
2078 alarm_event = VXGE_HW_SET_LEVEL(
2079 VXGE_HW_EVENT_FIFO_ERR,
2080 alarm_event);
2081 }
2082
2083 if ((val64 &
2084 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2085 ~mask64) {
2086 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2087
2088 alarm_event = VXGE_HW_SET_LEVEL(
2089 VXGE_HW_EVENT_FIFO_ERR,
2090 alarm_event);
2091 }
2092
2093 if ((val64 &
2094 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2095 ~mask64) {
2096 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2097
2098 alarm_event = VXGE_HW_SET_LEVEL(
2099 VXGE_HW_EVENT_FIFO_ERR,
2100 alarm_event);
2101 }
2102
2103 if (!skip_alarms) {
2104 writeq(VXGE_HW_INTR_MASK_ALL,
2105 &vp_reg->kdfcctl_errors_reg);
2106 alarm_event = VXGE_HW_SET_LEVEL(
2107 VXGE_HW_EVENT_ALARM_CLEARED,
2108 alarm_event);
2109 }
2110 }
2111
2112 }
2113
2114 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2115
2116 val64 = readq(&vp_reg->wrdma_alarm_status);
2117
2118 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2119
2120 val64 = readq(&vp_reg->prc_alarm_reg);
2121 mask64 = readq(&vp_reg->prc_alarm_mask);
2122
2123 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2124 ~mask64)
2125 sw_stats->error_stats.prc_ring_bumps++;
2126
2127 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2128 ~mask64) {
2129 sw_stats->error_stats.prc_rxdcm_sc_err++;
2130
2131 alarm_event = VXGE_HW_SET_LEVEL(
2132 VXGE_HW_EVENT_VPATH_ERR,
2133 alarm_event);
2134 }
2135
2136 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2137 & ~mask64) {
2138 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2139
2140 alarm_event = VXGE_HW_SET_LEVEL(
2141 VXGE_HW_EVENT_VPATH_ERR,
2142 alarm_event);
2143 }
2144
2145 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2146 & ~mask64) {
2147 sw_stats->error_stats.prc_quanta_size_err++;
2148
2149 alarm_event = VXGE_HW_SET_LEVEL(
2150 VXGE_HW_EVENT_VPATH_ERR,
2151 alarm_event);
2152 }
2153
2154 if (!skip_alarms) {
2155 writeq(VXGE_HW_INTR_MASK_ALL,
2156 &vp_reg->prc_alarm_reg);
2157 alarm_event = VXGE_HW_SET_LEVEL(
2158 VXGE_HW_EVENT_ALARM_CLEARED,
2159 alarm_event);
2160 }
2161 }
2162 }
2163out:
2164 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2165out2:
2166 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2167 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2168 return VXGE_HW_OK;
2169
2170 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2171
2172 if (alarm_event == VXGE_HW_EVENT_SERR)
2173 return VXGE_HW_ERR_CRITICAL;
2174
2175 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2176 VXGE_HW_ERR_SLOT_FREEZE :
2177 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2178 VXGE_HW_ERR_VPATH;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189enum vxge_hw_status vxge_hw_vpath_alarm_process(
2190 struct __vxge_hw_vpath_handle *vp,
2191 u32 skip_alarms)
2192{
2193 enum vxge_hw_status status = VXGE_HW_OK;
2194
2195 if (vp == NULL) {
2196 status = VXGE_HW_ERR_INVALID_HANDLE;
2197 goto exit;
2198 }
2199
2200 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2201exit:
2202 return status;
2203}
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217enum vxge_hw_status
2218vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2219 int alarm_msix_id)
2220{
2221 u64 val64;
2222 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2223 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2224 u32 first_vp_id = vpath->hldev->first_vp_id;
2225
2226 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2227 (first_vp_id * 4) + tim_msix_id[0]) |
2228 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2229 (first_vp_id * 4) + tim_msix_id[1]) |
2230 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2231 (first_vp_id * 4) + tim_msix_id[2]);
2232
2233 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2234 (first_vp_id * 4) + tim_msix_id[3]);
2235
2236 writeq(val64, &vp_reg->interrupt_cfg0);
2237
2238 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2239 (first_vp_id * 4) + alarm_msix_id),
2240 &vp_reg->interrupt_cfg2);
2241
2242 if (vpath->hldev->config.intr_mode ==
2243 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2244 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2245 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2246 0, 32), &vp_reg->one_shot_vect1_en);
2247 }
2248
2249 if (vpath->hldev->config.intr_mode ==
2250 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2251 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2252 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2253 0, 32), &vp_reg->one_shot_vect2_en);
2254
2255 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2256 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2257 0, 32), &vp_reg->one_shot_vect3_en);
2258 }
2259
2260 return VXGE_HW_OK;
2261}
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275void
2276vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2277{
2278 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2279 __vxge_hw_pio_mem_write32_upper(
2280 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2281 (msix_id / 4)), 0, 32),
2282 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2283
2284 return;
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299void
2300vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2301{
2302 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2303 if (hldev->config.intr_mode ==
2304 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2305 __vxge_hw_pio_mem_write32_upper(
2306 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2307 (msix_id/4)), 0, 32),
2308 &hldev->common_reg->
2309 clr_msix_one_shot_vec[msix_id%4]);
2310 } else {
2311 __vxge_hw_pio_mem_write32_upper(
2312 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2313 (msix_id/4)), 0, 32),
2314 &hldev->common_reg->
2315 clear_msix_mask_vect[msix_id%4]);
2316 }
2317
2318 return;
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333void
2334vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2335{
2336 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2337 __vxge_hw_pio_mem_write32_upper(
2338 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2339 (msix_id/4)), 0, 32),
2340 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2341
2342 return;
2343}
2344
2345
2346
2347
2348
2349
2350
2351
2352void
2353vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2354{
2355
2356 __vxge_hw_pio_mem_write32_upper(
2357 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2358 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2359
2360 return;
2361}
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2372{
2373 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2374 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2375 u64 val64;
2376 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2377
2378 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2379 tim_int_mask1, vp->vpath->vp_id);
2380
2381 val64 = readq(&hldev->common_reg->tim_int_mask0);
2382
2383 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2384 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2385 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2386 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2387 &hldev->common_reg->tim_int_mask0);
2388 }
2389
2390 val64 = readl(&hldev->common_reg->tim_int_mask1);
2391
2392 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2393 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2394 __vxge_hw_pio_mem_write32_upper(
2395 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2396 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2397 &hldev->common_reg->tim_int_mask1);
2398 }
2399
2400 return;
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2412{
2413 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2414 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2415 u64 val64;
2416 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2417
2418 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2419 tim_int_mask1, vp->vpath->vp_id);
2420
2421 val64 = readq(&hldev->common_reg->tim_int_mask0);
2422
2423 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2424 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2425 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2426 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2427 &hldev->common_reg->tim_int_mask0);
2428 }
2429
2430 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2431 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2432 __vxge_hw_pio_mem_write32_upper(
2433 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2434 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2435 &hldev->common_reg->tim_int_mask1);
2436 }
2437
2438 return;
2439}
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2456{
2457 u8 t_code;
2458 enum vxge_hw_status status = VXGE_HW_OK;
2459 void *first_rxdh;
2460 u64 val64 = 0;
2461 int new_count = 0;
2462
2463 ring->cmpl_cnt = 0;
2464
2465 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2466 if (status == VXGE_HW_OK)
2467 ring->callback(ring, first_rxdh,
2468 t_code, ring->channel.userdata);
2469
2470 if (ring->cmpl_cnt != 0) {
2471 ring->doorbell_cnt += ring->cmpl_cnt;
2472 if (ring->doorbell_cnt >= ring->rxds_limit) {
2473
2474
2475
2476
2477 new_count = (ring->doorbell_cnt * 4);
2478
2479
2480 ring->total_db_cnt += ring->doorbell_cnt;
2481 if (ring->total_db_cnt >= ring->rxds_per_block) {
2482 new_count += 4;
2483
2484 ring->total_db_cnt %= ring->rxds_per_block;
2485 }
2486 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2487 &ring->vp_reg->prc_rxd_doorbell);
2488 val64 =
2489 readl(&ring->common_reg->titan_general_int_status);
2490 ring->doorbell_cnt = 0;
2491 }
2492 }
2493
2494 return status;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2512 struct sk_buff ***skb_ptr, int nr_skb,
2513 int *more)
2514{
2515 enum vxge_hw_fifo_tcode t_code;
2516 void *first_txdlh;
2517 enum vxge_hw_status status = VXGE_HW_OK;
2518 struct __vxge_hw_channel *channel;
2519
2520 channel = &fifo->channel;
2521
2522 status = vxge_hw_fifo_txdl_next_completed(fifo,
2523 &first_txdlh, &t_code);
2524 if (status == VXGE_HW_OK)
2525 if (fifo->callback(fifo, first_txdlh, t_code,
2526 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2527 status = VXGE_HW_COMPLETIONS_REMAIN;
2528
2529 return status;
2530}
2531