1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/etherdevice.h>
15#include <linux/io-64-nonatomic-lo-hi.h>
16#include <linux/prefetch.h>
17
18#include "vxge-traffic.h"
19#include "vxge-config.h"
20#include "vxge-main.h"
21
22
23
24
25
26
27
28
29
30
31enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32{
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 readq(&vp_reg->vpath_general_int_status);
86
87
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141exit:
142 return status;
143
144}
145
146
147
148
149
150
151
152
153
154
155enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157{
158 struct __vxge_hw_virtualpath *vpath;
159 enum vxge_hw_status status = VXGE_HW_OK;
160 struct vxge_hw_vpath_reg __iomem *vp_reg;
161 if (vp == NULL) {
162 status = VXGE_HW_ERR_INVALID_HANDLE;
163 goto exit;
164 }
165
166 vpath = vp->vpath;
167
168 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
169 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
170 goto exit;
171 }
172 vp_reg = vpath->vp_reg;
173
174 __vxge_hw_pio_mem_write32_upper(
175 (u32)VXGE_HW_INTR_MASK_ALL,
176 &vp_reg->vpath_general_int_mask);
177
178 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
179
180 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
181 &vp_reg->general_errors_mask);
182
183 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
184 &vp_reg->pci_config_errors_mask);
185
186 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 &vp_reg->mrpcim_to_vpath_alarm_mask);
188
189 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 &vp_reg->srpcim_to_vpath_alarm_mask);
191
192 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 &vp_reg->vpath_ppif_int_mask);
194
195 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 &vp_reg->srpcim_msg_to_vpath_mask);
197
198 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199 &vp_reg->vpath_pcipif_int_mask);
200
201 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202 &vp_reg->wrdma_alarm_mask);
203
204 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205 &vp_reg->prc_alarm_mask);
206
207 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208 &vp_reg->xgmac_vp_int_mask);
209
210 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211 &vp_reg->asic_ntwk_vp_err_mask);
212
213exit:
214 return status;
215}
216
217void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
218{
219 struct vxge_hw_vpath_reg __iomem *vp_reg;
220 struct vxge_hw_vp_config *config;
221 u64 val64;
222
223 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
224 return;
225
226 vp_reg = fifo->vp_reg;
227 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
228
229 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
230 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
231 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
232 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
233 fifo->tim_tti_cfg1_saved = val64;
234 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
235 }
236}
237
238void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
239{
240 u64 val64 = ring->tim_rti_cfg1_saved;
241
242 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
243 ring->tim_rti_cfg1_saved = val64;
244 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
245}
246
247void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
248{
249 u64 val64 = fifo->tim_tti_cfg3_saved;
250 u64 timer = (fifo->rtimer * 1000) / 272;
251
252 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
253 if (timer)
254 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
255 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
256
257 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
258
259
260
261}
262
263void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
264{
265 u64 val64 = ring->tim_rti_cfg3_saved;
266 u64 timer = (ring->rtimer * 1000) / 272;
267
268 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
269 if (timer)
270 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
271 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
272
273 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
274
275
276
277}
278
279
280
281
282
283
284
285
286
287
288void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
289{
290
291 __vxge_hw_pio_mem_write32_upper(
292 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
293 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
294}
295
296
297
298
299
300
301
302
303
304
305void
306vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
307{
308
309 __vxge_hw_pio_mem_write32_upper(
310 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
311 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
312}
313
314
315
316
317
318
319
320
321
322
323
324void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
325{
326 __vxge_hw_pio_mem_write32_upper(
327 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
328 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
329}
330
331
332
333
334
335
336
337u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
338{
339
340 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
341 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
342 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
343 (intr_mode != VXGE_HW_INTR_MODE_DEF))
344 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
345
346 hldev->config.intr_mode = intr_mode;
347 return intr_mode;
348}
349
350
351
352
353
354
355
356
357
358
359void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
360{
361 u32 i;
362 u64 val64;
363 u32 val32;
364
365 vxge_hw_device_mask_all(hldev);
366
367 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
368
369 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
370 continue;
371
372 vxge_hw_vpath_intr_enable(
373 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
374 }
375
376 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
377 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
378 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
379
380 if (val64 != 0) {
381 writeq(val64, &hldev->common_reg->tim_int_status0);
382
383 writeq(~val64, &hldev->common_reg->tim_int_mask0);
384 }
385
386 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
387 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
388
389 if (val32 != 0) {
390 __vxge_hw_pio_mem_write32_upper(val32,
391 &hldev->common_reg->tim_int_status1);
392
393 __vxge_hw_pio_mem_write32_upper(~val32,
394 &hldev->common_reg->tim_int_mask1);
395 }
396 }
397
398 val64 = readq(&hldev->common_reg->titan_general_int_status);
399
400 vxge_hw_device_unmask_all(hldev);
401}
402
403
404
405
406
407
408
409
410
411void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
412{
413 u32 i;
414
415 vxge_hw_device_mask_all(hldev);
416
417
418 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
419 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
420 &hldev->common_reg->tim_int_mask1);
421
422 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
423
424 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
425 continue;
426
427 vxge_hw_vpath_intr_disable(
428 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
429 }
430}
431
432
433
434
435
436
437
438
439
440void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
441{
442 u64 val64;
443
444 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
445 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
446
447 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
448 &hldev->common_reg->titan_mask_all_int);
449}
450
451
452
453
454
455
456
457
458
459void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
460{
461 u64 val64 = 0;
462
463 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
464 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
465
466 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
467 &hldev->common_reg->titan_mask_all_int);
468}
469
470
471
472
473
474
475
476
477
478void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
479{
480 readl(&hldev->common_reg->titan_general_int_status);
481}
482
483
484
485
486
487
488
489
490
491static enum vxge_hw_status
492__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
493 enum vxge_hw_event type)
494{
495 switch (type) {
496 case VXGE_HW_EVENT_UNKNOWN:
497 break;
498 case VXGE_HW_EVENT_RESET_START:
499 case VXGE_HW_EVENT_RESET_COMPLETE:
500 case VXGE_HW_EVENT_LINK_DOWN:
501 case VXGE_HW_EVENT_LINK_UP:
502 goto out;
503 case VXGE_HW_EVENT_ALARM_CLEARED:
504 goto out;
505 case VXGE_HW_EVENT_ECCERR:
506 case VXGE_HW_EVENT_MRPCIM_ECCERR:
507 goto out;
508 case VXGE_HW_EVENT_FIFO_ERR:
509 case VXGE_HW_EVENT_VPATH_ERR:
510 case VXGE_HW_EVENT_CRITICAL_ERR:
511 case VXGE_HW_EVENT_SERR:
512 break;
513 case VXGE_HW_EVENT_SRPCIM_SERR:
514 case VXGE_HW_EVENT_MRPCIM_SERR:
515 goto out;
516 case VXGE_HW_EVENT_SLOT_FREEZE:
517 break;
518 default:
519 vxge_assert(0);
520 goto out;
521 }
522
523
524 if (hldev->uld_callbacks->crit_err)
525 hldev->uld_callbacks->crit_err(hldev,
526 type, vp_id);
527out:
528
529 return VXGE_HW_OK;
530}
531
532
533
534
535
536
537
538
539static enum vxge_hw_status
540__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
541{
542
543
544
545 if (hldev->link_state == VXGE_HW_LINK_DOWN)
546 goto exit;
547
548 hldev->link_state = VXGE_HW_LINK_DOWN;
549
550
551 if (hldev->uld_callbacks->link_down)
552 hldev->uld_callbacks->link_down(hldev);
553exit:
554 return VXGE_HW_OK;
555}
556
557
558
559
560
561
562
563
564static enum vxge_hw_status
565__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
566{
567
568
569
570 if (hldev->link_state == VXGE_HW_LINK_UP)
571 goto exit;
572
573 hldev->link_state = VXGE_HW_LINK_UP;
574
575
576 if (hldev->uld_callbacks->link_up)
577 hldev->uld_callbacks->link_up(hldev);
578exit:
579 return VXGE_HW_OK;
580}
581
582
583
584
585
586
587
588
589
590static enum vxge_hw_status
591__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
592 u32 skip_alarms)
593{
594 u64 val64;
595 u64 alarm_status;
596 u64 pic_status;
597 struct __vxge_hw_device *hldev = NULL;
598 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
599 u64 mask64;
600 struct vxge_hw_vpath_stats_sw_info *sw_stats;
601 struct vxge_hw_vpath_reg __iomem *vp_reg;
602
603 if (vpath == NULL) {
604 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
605 alarm_event);
606 goto out2;
607 }
608
609 hldev = vpath->hldev;
610 vp_reg = vpath->vp_reg;
611 alarm_status = readq(&vp_reg->vpath_general_int_status);
612
613 if (alarm_status == VXGE_HW_ALL_FOXES) {
614 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
615 alarm_event);
616 goto out;
617 }
618
619 sw_stats = vpath->sw_stats;
620
621 if (alarm_status & ~(
622 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
623 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
624 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
625 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
626 sw_stats->error_stats.unknown_alarms++;
627
628 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
629 alarm_event);
630 goto out;
631 }
632
633 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
634
635 val64 = readq(&vp_reg->xgmac_vp_int_status);
636
637 if (val64 &
638 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
639
640 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
641
642 if (((val64 &
643 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
644 (!(val64 &
645 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
646 ((val64 &
647 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
648 (!(val64 &
649 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
650 ))) {
651 sw_stats->error_stats.network_sustained_fault++;
652
653 writeq(
654 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
655 &vp_reg->asic_ntwk_vp_err_mask);
656
657 __vxge_hw_device_handle_link_down_ind(hldev);
658 alarm_event = VXGE_HW_SET_LEVEL(
659 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
660 }
661
662 if (((val64 &
663 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
664 (!(val64 &
665 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
666 ((val64 &
667 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
668 (!(val64 &
669 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
670 ))) {
671
672 sw_stats->error_stats.network_sustained_ok++;
673
674 writeq(
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
676 &vp_reg->asic_ntwk_vp_err_mask);
677
678 __vxge_hw_device_handle_link_up_ind(hldev);
679 alarm_event = VXGE_HW_SET_LEVEL(
680 VXGE_HW_EVENT_LINK_UP, alarm_event);
681 }
682
683 writeq(VXGE_HW_INTR_MASK_ALL,
684 &vp_reg->asic_ntwk_vp_err_reg);
685
686 alarm_event = VXGE_HW_SET_LEVEL(
687 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
688
689 if (skip_alarms)
690 return VXGE_HW_OK;
691 }
692 }
693
694 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
695
696 pic_status = readq(&vp_reg->vpath_ppif_int_status);
697
698 if (pic_status &
699 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
700
701 val64 = readq(&vp_reg->general_errors_reg);
702 mask64 = readq(&vp_reg->general_errors_mask);
703
704 if ((val64 &
705 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
706 ~mask64) {
707 sw_stats->error_stats.ini_serr_det++;
708
709 alarm_event = VXGE_HW_SET_LEVEL(
710 VXGE_HW_EVENT_SERR, alarm_event);
711 }
712
713 if ((val64 &
714 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
715 ~mask64) {
716 sw_stats->error_stats.dblgen_fifo0_overflow++;
717
718 alarm_event = VXGE_HW_SET_LEVEL(
719 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
720 }
721
722 if ((val64 &
723 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
724 ~mask64)
725 sw_stats->error_stats.statsb_pif_chain_error++;
726
727 if ((val64 &
728 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
729 ~mask64)
730 sw_stats->error_stats.statsb_drop_timeout++;
731
732 if ((val64 &
733 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
734 ~mask64)
735 sw_stats->error_stats.target_illegal_access++;
736
737 if (!skip_alarms) {
738 writeq(VXGE_HW_INTR_MASK_ALL,
739 &vp_reg->general_errors_reg);
740 alarm_event = VXGE_HW_SET_LEVEL(
741 VXGE_HW_EVENT_ALARM_CLEARED,
742 alarm_event);
743 }
744 }
745
746 if (pic_status &
747 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
748
749 val64 = readq(&vp_reg->kdfcctl_errors_reg);
750 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
751
752 if ((val64 &
753 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
754 ~mask64) {
755 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
756
757 alarm_event = VXGE_HW_SET_LEVEL(
758 VXGE_HW_EVENT_FIFO_ERR,
759 alarm_event);
760 }
761
762 if ((val64 &
763 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
764 ~mask64) {
765 sw_stats->error_stats.kdfcctl_fifo0_poison++;
766
767 alarm_event = VXGE_HW_SET_LEVEL(
768 VXGE_HW_EVENT_FIFO_ERR,
769 alarm_event);
770 }
771
772 if ((val64 &
773 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
774 ~mask64) {
775 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
776
777 alarm_event = VXGE_HW_SET_LEVEL(
778 VXGE_HW_EVENT_FIFO_ERR,
779 alarm_event);
780 }
781
782 if (!skip_alarms) {
783 writeq(VXGE_HW_INTR_MASK_ALL,
784 &vp_reg->kdfcctl_errors_reg);
785 alarm_event = VXGE_HW_SET_LEVEL(
786 VXGE_HW_EVENT_ALARM_CLEARED,
787 alarm_event);
788 }
789 }
790
791 }
792
793 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
794
795 val64 = readq(&vp_reg->wrdma_alarm_status);
796
797 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
798
799 val64 = readq(&vp_reg->prc_alarm_reg);
800 mask64 = readq(&vp_reg->prc_alarm_mask);
801
802 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
803 ~mask64)
804 sw_stats->error_stats.prc_ring_bumps++;
805
806 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
807 ~mask64) {
808 sw_stats->error_stats.prc_rxdcm_sc_err++;
809
810 alarm_event = VXGE_HW_SET_LEVEL(
811 VXGE_HW_EVENT_VPATH_ERR,
812 alarm_event);
813 }
814
815 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
816 & ~mask64) {
817 sw_stats->error_stats.prc_rxdcm_sc_abort++;
818
819 alarm_event = VXGE_HW_SET_LEVEL(
820 VXGE_HW_EVENT_VPATH_ERR,
821 alarm_event);
822 }
823
824 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
825 & ~mask64) {
826 sw_stats->error_stats.prc_quanta_size_err++;
827
828 alarm_event = VXGE_HW_SET_LEVEL(
829 VXGE_HW_EVENT_VPATH_ERR,
830 alarm_event);
831 }
832
833 if (!skip_alarms) {
834 writeq(VXGE_HW_INTR_MASK_ALL,
835 &vp_reg->prc_alarm_reg);
836 alarm_event = VXGE_HW_SET_LEVEL(
837 VXGE_HW_EVENT_ALARM_CLEARED,
838 alarm_event);
839 }
840 }
841 }
842out:
843 hldev->stats.sw_dev_err_stats.vpath_alarms++;
844out2:
845 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
846 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
847 return VXGE_HW_OK;
848
849 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
850
851 if (alarm_event == VXGE_HW_EVENT_SERR)
852 return VXGE_HW_ERR_CRITICAL;
853
854 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
855 VXGE_HW_ERR_SLOT_FREEZE :
856 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
857 VXGE_HW_ERR_VPATH;
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
880 u32 skip_alarms, u64 *reason)
881{
882 u32 i;
883 u64 val64;
884 u64 adapter_status;
885 u64 vpath_mask;
886 enum vxge_hw_status ret = VXGE_HW_OK;
887
888 val64 = readq(&hldev->common_reg->titan_general_int_status);
889
890 if (unlikely(!val64)) {
891
892 *reason = 0;
893 ret = VXGE_HW_ERR_WRONG_IRQ;
894 goto exit;
895 }
896
897 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
898
899 adapter_status = readq(&hldev->common_reg->adapter_status);
900
901 if (adapter_status == VXGE_HW_ALL_FOXES) {
902
903 __vxge_hw_device_handle_error(hldev,
904 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
905 *reason = 0;
906 ret = VXGE_HW_ERR_SLOT_FREEZE;
907 goto exit;
908 }
909 }
910
911 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
912
913 *reason = val64;
914
915 vpath_mask = hldev->vpaths_deployed >>
916 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
917
918 if (val64 &
919 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
920 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
921
922 return VXGE_HW_OK;
923 }
924
925 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
926
927 if (unlikely(val64 &
928 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
929
930 enum vxge_hw_status error_level = VXGE_HW_OK;
931
932 hldev->stats.sw_dev_err_stats.vpath_alarms++;
933
934 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
935
936 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
937 continue;
938
939 ret = __vxge_hw_vpath_alarm_process(
940 &hldev->virtual_paths[i], skip_alarms);
941
942 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
943
944 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
945 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
946 break;
947 }
948
949 ret = error_level;
950 }
951exit:
952 return ret;
953}
954
955
956
957
958
959
960
961
962
963
964
965void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
966{
967
968 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
969 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
970 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
971 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
972 &hldev->common_reg->tim_int_status0);
973 }
974
975 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
976 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
977 __vxge_hw_pio_mem_write32_upper(
978 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
979 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
980 &hldev->common_reg->tim_int_status1);
981 }
982}
983
984
985
986
987
988
989
990
991
992
993static enum vxge_hw_status
994vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
995{
996 if (channel->reserve_ptr - channel->reserve_top > 0) {
997_alloc_after_swap:
998 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
999
1000 return VXGE_HW_OK;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009 if (channel->length - channel->free_ptr > 0) {
1010 swap(channel->reserve_arr, channel->free_arr);
1011 channel->reserve_ptr = channel->length;
1012 channel->reserve_top = channel->free_ptr;
1013 channel->free_ptr = channel->length;
1014
1015 channel->stats->reserve_free_swaps_cnt++;
1016
1017 goto _alloc_after_swap;
1018 }
1019
1020 channel->stats->full_cnt++;
1021
1022 *dtrh = NULL;
1023 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static void
1035vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1036{
1037 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1038
1039 channel->work_arr[channel->post_index++] = dtrh;
1040
1041
1042 if (channel->post_index == channel->length)
1043 channel->post_index = 0;
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054void
1055vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1056{
1057 vxge_assert(channel->compl_index < channel->length);
1058
1059 *dtrh = channel->work_arr[channel->compl_index];
1060 prefetch(*dtrh);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1071{
1072 channel->work_arr[channel->compl_index] = NULL;
1073
1074
1075 if (++channel->compl_index == channel->length)
1076 channel->compl_index = 0;
1077
1078 channel->stats->total_compl_cnt++;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1090{
1091 channel->free_arr[--channel->free_ptr] = dtrh;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1102{
1103 return (channel->reserve_ptr - channel->reserve_top) +
1104 (channel->length - channel->free_ptr);
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1122 void **rxdh)
1123{
1124 enum vxge_hw_status status;
1125 struct __vxge_hw_channel *channel;
1126
1127 channel = &ring->channel;
1128
1129 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1130
1131 if (status == VXGE_HW_OK) {
1132 struct vxge_hw_ring_rxd_1 *rxdp =
1133 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1134
1135 rxdp->control_0 = rxdp->control_1 = 0;
1136 }
1137
1138 return status;
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1166{
1167 struct __vxge_hw_channel *channel;
1168
1169 channel = &ring->channel;
1170
1171 vxge_hw_channel_dtr_free(channel, rxdh);
1172
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1183{
1184 struct __vxge_hw_channel *channel;
1185
1186 channel = &ring->channel;
1187
1188 vxge_hw_channel_dtr_post(channel, rxdh);
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1199{
1200 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1201
1202 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1203
1204 if (ring->stats->common_stats.usage_cnt > 0)
1205 ring->stats->common_stats.usage_cnt--;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1219{
1220 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1221 struct __vxge_hw_channel *channel;
1222
1223 channel = &ring->channel;
1224
1225 wmb();
1226 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1227
1228 vxge_hw_channel_dtr_post(channel, rxdh);
1229
1230 if (ring->stats->common_stats.usage_cnt > 0)
1231 ring->stats->common_stats.usage_cnt--;
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1242{
1243 wmb();
1244 vxge_hw_ring_rxd_post_post(ring, rxdh);
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1281 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1282{
1283 struct __vxge_hw_channel *channel;
1284 struct vxge_hw_ring_rxd_1 *rxdp;
1285 enum vxge_hw_status status = VXGE_HW_OK;
1286 u64 control_0, own;
1287
1288 channel = &ring->channel;
1289
1290 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1291
1292 rxdp = *rxdh;
1293 if (rxdp == NULL) {
1294 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1295 goto exit;
1296 }
1297
1298 control_0 = rxdp->control_0;
1299 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1300 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1301
1302
1303 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1304
1305 vxge_assert((rxdp)->host_control !=
1306 0);
1307
1308 ++ring->cmpl_cnt;
1309 vxge_hw_channel_dtr_complete(channel);
1310
1311 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1312
1313 ring->stats->common_stats.usage_cnt++;
1314 if (ring->stats->common_stats.usage_max <
1315 ring->stats->common_stats.usage_cnt)
1316 ring->stats->common_stats.usage_max =
1317 ring->stats->common_stats.usage_cnt;
1318
1319 status = VXGE_HW_OK;
1320 goto exit;
1321 }
1322
1323
1324
1325 *rxdh = NULL;
1326 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1327exit:
1328 return status;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345enum vxge_hw_status vxge_hw_ring_handle_tcode(
1346 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1347{
1348 enum vxge_hw_status status = VXGE_HW_OK;
1349
1350
1351
1352
1353
1354
1355 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1356 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1357 status = VXGE_HW_OK;
1358 goto exit;
1359 }
1360
1361 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1362 status = VXGE_HW_ERR_INVALID_TCODE;
1363 goto exit;
1364 }
1365
1366 ring->stats->rxd_t_code_err_cnt[t_code]++;
1367exit:
1368 return status;
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1383 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1384{
1385 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1386 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1387 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1388 &fifo->nofl_db->control_0);
1389
1390 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1391}
1392
1393
1394
1395
1396
1397
1398u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1399{
1400 return vxge_hw_channel_dtr_count(&fifoh->channel);
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1424 struct __vxge_hw_fifo *fifo,
1425 void **txdlh, void **txdl_priv)
1426{
1427 struct __vxge_hw_channel *channel;
1428 enum vxge_hw_status status;
1429 int i;
1430
1431 channel = &fifo->channel;
1432
1433 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1434
1435 if (status == VXGE_HW_OK) {
1436 struct vxge_hw_fifo_txd *txdp =
1437 (struct vxge_hw_fifo_txd *)*txdlh;
1438 struct __vxge_hw_fifo_txdl_priv *priv;
1439
1440 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1441
1442
1443 priv->align_dma_offset = 0;
1444 priv->align_vaddr_start = priv->align_vaddr;
1445 priv->align_used_frags = 0;
1446 priv->frags = 0;
1447 priv->alloc_frags = fifo->config->max_frags;
1448 priv->next_txdl_priv = NULL;
1449
1450 *txdl_priv = (void *)(size_t)txdp->host_control;
1451
1452 for (i = 0; i < fifo->config->max_frags; i++) {
1453 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1454 txdp->control_0 = txdp->control_1 = 0;
1455 }
1456 }
1457
1458 return status;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1479 void *txdlh, u32 frag_idx,
1480 dma_addr_t dma_pointer, u32 size)
1481{
1482 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1483 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1484
1485 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1486 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1487
1488 if (frag_idx != 0)
1489 txdp->control_0 = txdp->control_1 = 0;
1490 else {
1491 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1492 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1493 txdp->control_1 |= fifo->interrupt_type;
1494 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1495 fifo->tx_intr_num);
1496 if (txdl_priv->frags) {
1497 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1498 (txdl_priv->frags - 1);
1499 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1500 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1501 }
1502 }
1503
1504 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1505
1506 txdp->buffer_pointer = (u64)dma_pointer;
1507 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1508 fifo->stats->total_buffers++;
1509 txdl_priv->frags++;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1523{
1524 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1525 struct vxge_hw_fifo_txd *txdp_last;
1526 struct vxge_hw_fifo_txd *txdp_first;
1527
1528 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1529 txdp_first = txdlh;
1530
1531 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1532 txdp_last->control_0 |=
1533 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1534 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1535
1536 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1537
1538 __vxge_hw_non_offload_db_post(fifo,
1539 (u64)txdl_priv->dma_addr,
1540 txdl_priv->frags - 1,
1541 fifo->no_snoop_bits);
1542
1543 fifo->stats->total_posts++;
1544 fifo->stats->common_stats.usage_cnt++;
1545 if (fifo->stats->common_stats.usage_max <
1546 fifo->stats->common_stats.usage_cnt)
1547 fifo->stats->common_stats.usage_max =
1548 fifo->stats->common_stats.usage_cnt;
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1584 struct __vxge_hw_fifo *fifo, void **txdlh,
1585 enum vxge_hw_fifo_tcode *t_code)
1586{
1587 struct __vxge_hw_channel *channel;
1588 struct vxge_hw_fifo_txd *txdp;
1589 enum vxge_hw_status status = VXGE_HW_OK;
1590
1591 channel = &fifo->channel;
1592
1593 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1594
1595 txdp = *txdlh;
1596 if (txdp == NULL) {
1597 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1598 goto exit;
1599 }
1600
1601
1602 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1603
1604 vxge_assert(txdp->host_control != 0);
1605
1606 vxge_hw_channel_dtr_complete(channel);
1607
1608 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1609
1610 if (fifo->stats->common_stats.usage_cnt > 0)
1611 fifo->stats->common_stats.usage_cnt--;
1612
1613 status = VXGE_HW_OK;
1614 goto exit;
1615 }
1616
1617
1618 *txdlh = NULL;
1619 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1620exit:
1621 return status;
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1639 void *txdlh,
1640 enum vxge_hw_fifo_tcode t_code)
1641{
1642 enum vxge_hw_status status = VXGE_HW_OK;
1643
1644 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1645 status = VXGE_HW_ERR_INVALID_TCODE;
1646 goto exit;
1647 }
1648
1649 fifo->stats->txd_t_code_err_cnt[t_code]++;
1650exit:
1651 return status;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1679{
1680 struct __vxge_hw_channel *channel;
1681
1682 channel = &fifo->channel;
1683
1684 vxge_hw_channel_dtr_free(channel, txdlh);
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701enum vxge_hw_status
1702vxge_hw_vpath_mac_addr_add(
1703 struct __vxge_hw_vpath_handle *vp,
1704 u8 *macaddr,
1705 u8 *macaddr_mask,
1706 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1707{
1708 u32 i;
1709 u64 data1 = 0ULL;
1710 u64 data2 = 0ULL;
1711 enum vxge_hw_status status = VXGE_HW_OK;
1712
1713 if (vp == NULL) {
1714 status = VXGE_HW_ERR_INVALID_HANDLE;
1715 goto exit;
1716 }
1717
1718 for (i = 0; i < ETH_ALEN; i++) {
1719 data1 <<= 8;
1720 data1 |= (u8)macaddr[i];
1721
1722 data2 <<= 8;
1723 data2 |= (u8)macaddr_mask[i];
1724 }
1725
1726 switch (duplicate_mode) {
1727 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1728 i = 0;
1729 break;
1730 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1731 i = 1;
1732 break;
1733 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1734 i = 2;
1735 break;
1736 default:
1737 i = 0;
1738 break;
1739 }
1740
1741 status = __vxge_hw_vpath_rts_table_set(vp,
1742 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1743 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1744 0,
1745 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1746 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1747 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1748exit:
1749 return status;
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764enum vxge_hw_status
1765vxge_hw_vpath_mac_addr_get(
1766 struct __vxge_hw_vpath_handle *vp,
1767 u8 *macaddr,
1768 u8 *macaddr_mask)
1769{
1770 u32 i;
1771 u64 data1 = 0ULL;
1772 u64 data2 = 0ULL;
1773 enum vxge_hw_status status = VXGE_HW_OK;
1774
1775 if (vp == NULL) {
1776 status = VXGE_HW_ERR_INVALID_HANDLE;
1777 goto exit;
1778 }
1779
1780 status = __vxge_hw_vpath_rts_table_get(vp,
1781 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1782 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1783 0, &data1, &data2);
1784
1785 if (status != VXGE_HW_OK)
1786 goto exit;
1787
1788 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1789
1790 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1791
1792 for (i = ETH_ALEN; i > 0; i--) {
1793 macaddr[i-1] = (u8)(data1 & 0xFF);
1794 data1 >>= 8;
1795
1796 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1797 data2 >>= 8;
1798 }
1799exit:
1800 return status;
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815enum vxge_hw_status
1816vxge_hw_vpath_mac_addr_get_next(
1817 struct __vxge_hw_vpath_handle *vp,
1818 u8 *macaddr,
1819 u8 *macaddr_mask)
1820{
1821 u32 i;
1822 u64 data1 = 0ULL;
1823 u64 data2 = 0ULL;
1824 enum vxge_hw_status status = VXGE_HW_OK;
1825
1826 if (vp == NULL) {
1827 status = VXGE_HW_ERR_INVALID_HANDLE;
1828 goto exit;
1829 }
1830
1831 status = __vxge_hw_vpath_rts_table_get(vp,
1832 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1833 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1834 0, &data1, &data2);
1835
1836 if (status != VXGE_HW_OK)
1837 goto exit;
1838
1839 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1840
1841 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1842
1843 for (i = ETH_ALEN; i > 0; i--) {
1844 macaddr[i-1] = (u8)(data1 & 0xFF);
1845 data1 >>= 8;
1846
1847 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1848 data2 >>= 8;
1849 }
1850
1851exit:
1852 return status;
1853}
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867enum vxge_hw_status
1868vxge_hw_vpath_mac_addr_delete(
1869 struct __vxge_hw_vpath_handle *vp,
1870 u8 *macaddr,
1871 u8 *macaddr_mask)
1872{
1873 u32 i;
1874 u64 data1 = 0ULL;
1875 u64 data2 = 0ULL;
1876 enum vxge_hw_status status = VXGE_HW_OK;
1877
1878 if (vp == NULL) {
1879 status = VXGE_HW_ERR_INVALID_HANDLE;
1880 goto exit;
1881 }
1882
1883 for (i = 0; i < ETH_ALEN; i++) {
1884 data1 <<= 8;
1885 data1 |= (u8)macaddr[i];
1886
1887 data2 <<= 8;
1888 data2 |= (u8)macaddr_mask[i];
1889 }
1890
1891 status = __vxge_hw_vpath_rts_table_set(vp,
1892 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1893 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1894 0,
1895 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1896 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1897exit:
1898 return status;
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910enum vxge_hw_status
1911vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1912{
1913 enum vxge_hw_status status = VXGE_HW_OK;
1914
1915 if (vp == NULL) {
1916 status = VXGE_HW_ERR_INVALID_HANDLE;
1917 goto exit;
1918 }
1919
1920 status = __vxge_hw_vpath_rts_table_set(vp,
1921 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1922 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1923 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1924exit:
1925 return status;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938enum vxge_hw_status
1939vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1940{
1941 enum vxge_hw_status status = VXGE_HW_OK;
1942
1943 if (vp == NULL) {
1944 status = VXGE_HW_ERR_INVALID_HANDLE;
1945 goto exit;
1946 }
1947
1948 status = __vxge_hw_vpath_rts_table_set(vp,
1949 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1950 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1951 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1952exit:
1953 return status;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1965 struct __vxge_hw_vpath_handle *vp)
1966{
1967 u64 val64;
1968 struct __vxge_hw_virtualpath *vpath;
1969 enum vxge_hw_status status = VXGE_HW_OK;
1970
1971 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1972 status = VXGE_HW_ERR_INVALID_HANDLE;
1973 goto exit;
1974 }
1975
1976 vpath = vp->vpath;
1977
1978
1979 if (!(vpath->hldev->access_rights &
1980 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1981 return VXGE_HW_OK;
1982
1983 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1984
1985 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1986
1987 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1988 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1989 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1990 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1991
1992 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1993 }
1994exit:
1995 return status;
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2007 struct __vxge_hw_vpath_handle *vp)
2008{
2009 u64 val64;
2010 struct __vxge_hw_virtualpath *vpath;
2011 enum vxge_hw_status status = VXGE_HW_OK;
2012
2013 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2014 status = VXGE_HW_ERR_INVALID_HANDLE;
2015 goto exit;
2016 }
2017
2018 vpath = vp->vpath;
2019
2020 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2021
2022 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2023
2024 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2025 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2026 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2027
2028 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2029 }
2030exit:
2031 return status;
2032}
2033
2034
2035
2036
2037
2038
2039
2040enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2041 struct __vxge_hw_vpath_handle *vp)
2042{
2043 u64 val64;
2044 struct __vxge_hw_virtualpath *vpath;
2045 enum vxge_hw_status status = VXGE_HW_OK;
2046
2047 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2048 status = VXGE_HW_ERR_INVALID_HANDLE;
2049 goto exit;
2050 }
2051
2052 vpath = vp->vpath;
2053
2054 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2055
2056 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2057 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2058 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2059 }
2060exit:
2061 return status;
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2073 struct __vxge_hw_vpath_handle *vp)
2074{
2075 u64 val64;
2076 struct __vxge_hw_virtualpath *vpath;
2077 enum vxge_hw_status status = VXGE_HW_OK;
2078
2079 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2080 status = VXGE_HW_ERR_INVALID_HANDLE;
2081 goto exit;
2082 }
2083
2084 vpath = vp->vpath;
2085
2086 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2087
2088 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2089 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2090 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2091 }
2092exit:
2093 return status;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105enum vxge_hw_status
2106vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2107{
2108 u64 val64;
2109 struct __vxge_hw_virtualpath *vpath;
2110 enum vxge_hw_status status = VXGE_HW_OK;
2111
2112 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2113 status = VXGE_HW_ERR_INVALID_HANDLE;
2114 goto exit;
2115 }
2116
2117 vpath = vp->vpath;
2118
2119 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2120
2121 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2122 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2123 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2124 }
2125exit:
2126 return status;
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137enum vxge_hw_status vxge_hw_vpath_alarm_process(
2138 struct __vxge_hw_vpath_handle *vp,
2139 u32 skip_alarms)
2140{
2141 enum vxge_hw_status status = VXGE_HW_OK;
2142
2143 if (vp == NULL) {
2144 status = VXGE_HW_ERR_INVALID_HANDLE;
2145 goto exit;
2146 }
2147
2148 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2149exit:
2150 return status;
2151}
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165void
2166vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2167 int alarm_msix_id)
2168{
2169 u64 val64;
2170 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2171 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2172 u32 vp_id = vp->vpath->vp_id;
2173
2174 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2175 (vp_id * 4) + tim_msix_id[0]) |
2176 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2177 (vp_id * 4) + tim_msix_id[1]);
2178
2179 writeq(val64, &vp_reg->interrupt_cfg0);
2180
2181 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2182 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2183 &vp_reg->interrupt_cfg2);
2184
2185 if (vpath->hldev->config.intr_mode ==
2186 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2187 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2188 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2189 0, 32), &vp_reg->one_shot_vect0_en);
2190 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2191 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2192 0, 32), &vp_reg->one_shot_vect1_en);
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2195 0, 32), &vp_reg->one_shot_vect2_en);
2196 }
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211void
2212vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2213{
2214 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2215 __vxge_hw_pio_mem_write32_upper(
2216 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2217 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2233{
2234 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2235
2236 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2237 __vxge_hw_pio_mem_write32_upper(
2238 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2239 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2240 else
2241 __vxge_hw_pio_mem_write32_upper(
2242 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2243 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2244}
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258void
2259vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260{
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262 __vxge_hw_pio_mem_write32_upper(
2263 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2276{
2277 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2278 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2279 u64 val64;
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281
2282 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2283 tim_int_mask1, vp->vpath->vp_id);
2284
2285 val64 = readq(&hldev->common_reg->tim_int_mask0);
2286
2287 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2288 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2289 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2290 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2291 &hldev->common_reg->tim_int_mask0);
2292 }
2293
2294 val64 = readl(&hldev->common_reg->tim_int_mask1);
2295
2296 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2297 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2298 __vxge_hw_pio_mem_write32_upper(
2299 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2300 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2301 &hldev->common_reg->tim_int_mask1);
2302 }
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2314{
2315 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2316 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2317 u64 val64;
2318 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2319
2320 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2321 tim_int_mask1, vp->vpath->vp_id);
2322
2323 val64 = readq(&hldev->common_reg->tim_int_mask0);
2324
2325 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2326 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2327 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2328 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2329 &hldev->common_reg->tim_int_mask0);
2330 }
2331
2332 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2333 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2334 __vxge_hw_pio_mem_write32_upper(
2335 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2336 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2337 &hldev->common_reg->tim_int_mask1);
2338 }
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2356{
2357 u8 t_code;
2358 enum vxge_hw_status status = VXGE_HW_OK;
2359 void *first_rxdh;
2360 int new_count = 0;
2361
2362 ring->cmpl_cnt = 0;
2363
2364 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2365 if (status == VXGE_HW_OK)
2366 ring->callback(ring, first_rxdh,
2367 t_code, ring->channel.userdata);
2368
2369 if (ring->cmpl_cnt != 0) {
2370 ring->doorbell_cnt += ring->cmpl_cnt;
2371 if (ring->doorbell_cnt >= ring->rxds_limit) {
2372
2373
2374
2375
2376 new_count = (ring->doorbell_cnt * 4);
2377
2378
2379 ring->total_db_cnt += ring->doorbell_cnt;
2380 if (ring->total_db_cnt >= ring->rxds_per_block) {
2381 new_count += 4;
2382
2383 ring->total_db_cnt %= ring->rxds_per_block;
2384 }
2385 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2386 &ring->vp_reg->prc_rxd_doorbell);
2387 readl(&ring->common_reg->titan_general_int_status);
2388 ring->doorbell_cnt = 0;
2389 }
2390 }
2391
2392 return status;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2410 struct sk_buff ***skb_ptr, int nr_skb,
2411 int *more)
2412{
2413 enum vxge_hw_fifo_tcode t_code;
2414 void *first_txdlh;
2415 enum vxge_hw_status status = VXGE_HW_OK;
2416 struct __vxge_hw_channel *channel;
2417
2418 channel = &fifo->channel;
2419
2420 status = vxge_hw_fifo_txdl_next_completed(fifo,
2421 &first_txdlh, &t_code);
2422 if (status == VXGE_HW_OK)
2423 if (fifo->callback(fifo, first_txdlh, t_code,
2424 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2425 status = VXGE_HW_COMPLETIONS_REMAIN;
2426
2427 return status;
2428}
2429