1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/etherdevice.h>
15#include <linux/prefetch.h>
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-main.h"
20
21
22
23
24
25
26
27
28
29
30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31{
32 u64 val64;
33
34 struct __vxge_hw_virtualpath *vpath;
35 struct vxge_hw_vpath_reg __iomem *vp_reg;
36 enum vxge_hw_status status = VXGE_HW_OK;
37 if (vp == NULL) {
38 status = VXGE_HW_ERR_INVALID_HANDLE;
39 goto exit;
40 }
41
42 vpath = vp->vpath;
43
44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 goto exit;
47 }
48
49 vp_reg = vpath->vp_reg;
50
51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52
53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 &vp_reg->general_errors_reg);
55
56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 &vp_reg->pci_config_errors_reg);
58
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->mrpcim_to_vpath_alarm_reg);
61
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->srpcim_to_vpath_alarm_reg);
64
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->vpath_ppif_int_status);
67
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_msg_to_vpath_reg);
70
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_pcipif_int_status);
73
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->prc_alarm_reg);
76
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->wrdma_alarm_status);
79
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->asic_ntwk_vp_err_reg);
82
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->xgmac_vp_int_status);
85
86 val64 = readq(&vp_reg->vpath_general_int_status);
87
88
89
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->vpath_pcipif_int_mask);
92
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->srpcim_msg_to_vpath_mask);
95
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->srpcim_to_vpath_alarm_mask);
98
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->mrpcim_to_vpath_alarm_mask);
101
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->pci_config_errors_mask);
104
105
106
107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 &vp_reg->general_errors_mask);
112
113 __vxge_hw_pio_mem_write32_upper(
114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 &vp_reg->kdfcctl_errors_mask);
121
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123
124 __vxge_hw_pio_mem_write32_upper(
125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 &vp_reg->prc_alarm_mask);
127
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130
131 if (vpath->hldev->first_vp_id != vpath->vp_id)
132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 &vp_reg->asic_ntwk_vp_err_mask);
134 else
135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 &vp_reg->asic_ntwk_vp_err_mask);
139
140 __vxge_hw_pio_mem_write32_upper(0,
141 &vp_reg->vpath_general_int_mask);
142exit:
143 return status;
144
145}
146
147
148
149
150
151
152
153
154
155
156enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 struct __vxge_hw_vpath_handle *vp)
158{
159 u64 val64;
160
161 struct __vxge_hw_virtualpath *vpath;
162 enum vxge_hw_status status = VXGE_HW_OK;
163 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 if (vp == NULL) {
165 status = VXGE_HW_ERR_INVALID_HANDLE;
166 goto exit;
167 }
168
169 vpath = vp->vpath;
170
171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 goto exit;
174 }
175 vp_reg = vpath->vp_reg;
176
177 __vxge_hw_pio_mem_write32_upper(
178 (u32)VXGE_HW_INTR_MASK_ALL,
179 &vp_reg->vpath_general_int_mask);
180
181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182
183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->general_errors_mask);
187
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->pci_config_errors_mask);
190
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->mrpcim_to_vpath_alarm_mask);
193
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_to_vpath_alarm_mask);
196
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_ppif_int_mask);
199
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_msg_to_vpath_mask);
202
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_pcipif_int_mask);
205
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->wrdma_alarm_mask);
208
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->prc_alarm_mask);
211
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->xgmac_vp_int_mask);
214
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->asic_ntwk_vp_err_mask);
217
218exit:
219 return status;
220}
221
222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223{
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
227
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
230
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 }
241}
242
243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244{
245 u64 val64 = ring->tim_rti_cfg1_saved;
246
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250}
251
252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253{
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
256
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263
264
265
266}
267
268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269{
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
272
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279
280
281
282}
283
284
285
286
287
288
289
290
291
292
293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294{
295
296 __vxge_hw_pio_mem_write32_upper(
297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299}
300
301
302
303
304
305
306
307
308
309
310void
311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312{
313
314 __vxge_hw_pio_mem_write32_upper(
315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317}
318
319
320
321
322
323
324
325
326
327
328
329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330{
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334}
335
336
337
338
339
340
341
342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343{
344
345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350
351 hldev->config.intr_mode = intr_mode;
352 return intr_mode;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367{
368 u32 i;
369 u64 val64;
370 u32 val32;
371
372 vxge_hw_device_mask_all(hldev);
373
374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375
376 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 continue;
378
379 vxge_hw_vpath_intr_enable(
380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 }
382
383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386
387 if (val64 != 0) {
388 writeq(val64, &hldev->common_reg->tim_int_status0);
389
390 writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 }
392
393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395
396 if (val32 != 0) {
397 __vxge_hw_pio_mem_write32_upper(val32,
398 &hldev->common_reg->tim_int_status1);
399
400 __vxge_hw_pio_mem_write32_upper(~val32,
401 &hldev->common_reg->tim_int_mask1);
402 }
403 }
404
405 val64 = readq(&hldev->common_reg->titan_general_int_status);
406
407 vxge_hw_device_unmask_all(hldev);
408}
409
410
411
412
413
414
415
416
417
418
419
420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421{
422 u32 i;
423
424 vxge_hw_device_mask_all(hldev);
425
426
427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 &hldev->common_reg->tim_int_mask1);
430
431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432
433 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 continue;
435
436 vxge_hw_vpath_intr_disable(
437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 }
439}
440
441
442
443
444
445
446
447
448
449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450{
451 u64 val64;
452
453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455
456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 &hldev->common_reg->titan_mask_all_int);
458}
459
460
461
462
463
464
465
466
467
468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469{
470 u64 val64 = 0;
471
472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474
475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 &hldev->common_reg->titan_mask_all_int);
477}
478
479
480
481
482
483
484
485
486
487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488{
489 u32 val32;
490
491 val32 = readl(&hldev->common_reg->titan_general_int_status);
492}
493
494
495
496
497
498
499
500
501
502static enum vxge_hw_status
503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
505{
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
532 }
533
534
535 if (hldev->uld_callbacks->crit_err)
536 hldev->uld_callbacks->crit_err(hldev,
537 type, vp_id);
538out:
539
540 return VXGE_HW_OK;
541}
542
543
544
545
546
547
548
549
550static enum vxge_hw_status
551__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552{
553
554
555
556 if (hldev->link_state == VXGE_HW_LINK_DOWN)
557 goto exit;
558
559 hldev->link_state = VXGE_HW_LINK_DOWN;
560
561
562 if (hldev->uld_callbacks->link_down)
563 hldev->uld_callbacks->link_down(hldev);
564exit:
565 return VXGE_HW_OK;
566}
567
568
569
570
571
572
573
574
575static enum vxge_hw_status
576__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
577{
578
579
580
581 if (hldev->link_state == VXGE_HW_LINK_UP)
582 goto exit;
583
584 hldev->link_state = VXGE_HW_LINK_UP;
585
586
587 if (hldev->uld_callbacks->link_up)
588 hldev->uld_callbacks->link_up(hldev);
589exit:
590 return VXGE_HW_OK;
591}
592
593
594
595
596
597
598
599
600
601static enum vxge_hw_status
602__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603 u32 skip_alarms)
604{
605 u64 val64;
606 u64 alarm_status;
607 u64 pic_status;
608 struct __vxge_hw_device *hldev = NULL;
609 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610 u64 mask64;
611 struct vxge_hw_vpath_stats_sw_info *sw_stats;
612 struct vxge_hw_vpath_reg __iomem *vp_reg;
613
614 if (vpath == NULL) {
615 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
616 alarm_event);
617 goto out2;
618 }
619
620 hldev = vpath->hldev;
621 vp_reg = vpath->vp_reg;
622 alarm_status = readq(&vp_reg->vpath_general_int_status);
623
624 if (alarm_status == VXGE_HW_ALL_FOXES) {
625 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
626 alarm_event);
627 goto out;
628 }
629
630 sw_stats = vpath->sw_stats;
631
632 if (alarm_status & ~(
633 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
637 sw_stats->error_stats.unknown_alarms++;
638
639 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
640 alarm_event);
641 goto out;
642 }
643
644 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
645
646 val64 = readq(&vp_reg->xgmac_vp_int_status);
647
648 if (val64 &
649 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
650
651 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
652
653 if (((val64 &
654 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
655 (!(val64 &
656 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
657 ((val64 &
658 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
659 (!(val64 &
660 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
661 ))) {
662 sw_stats->error_stats.network_sustained_fault++;
663
664 writeq(
665 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666 &vp_reg->asic_ntwk_vp_err_mask);
667
668 __vxge_hw_device_handle_link_down_ind(hldev);
669 alarm_event = VXGE_HW_SET_LEVEL(
670 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
671 }
672
673 if (((val64 &
674 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675 (!(val64 &
676 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677 ((val64 &
678 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679 (!(val64 &
680 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681 ))) {
682
683 sw_stats->error_stats.network_sustained_ok++;
684
685 writeq(
686 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687 &vp_reg->asic_ntwk_vp_err_mask);
688
689 __vxge_hw_device_handle_link_up_ind(hldev);
690 alarm_event = VXGE_HW_SET_LEVEL(
691 VXGE_HW_EVENT_LINK_UP, alarm_event);
692 }
693
694 writeq(VXGE_HW_INTR_MASK_ALL,
695 &vp_reg->asic_ntwk_vp_err_reg);
696
697 alarm_event = VXGE_HW_SET_LEVEL(
698 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
699
700 if (skip_alarms)
701 return VXGE_HW_OK;
702 }
703 }
704
705 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
706
707 pic_status = readq(&vp_reg->vpath_ppif_int_status);
708
709 if (pic_status &
710 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
711
712 val64 = readq(&vp_reg->general_errors_reg);
713 mask64 = readq(&vp_reg->general_errors_mask);
714
715 if ((val64 &
716 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
717 ~mask64) {
718 sw_stats->error_stats.ini_serr_det++;
719
720 alarm_event = VXGE_HW_SET_LEVEL(
721 VXGE_HW_EVENT_SERR, alarm_event);
722 }
723
724 if ((val64 &
725 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
726 ~mask64) {
727 sw_stats->error_stats.dblgen_fifo0_overflow++;
728
729 alarm_event = VXGE_HW_SET_LEVEL(
730 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
731 }
732
733 if ((val64 &
734 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
735 ~mask64)
736 sw_stats->error_stats.statsb_pif_chain_error++;
737
738 if ((val64 &
739 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
740 ~mask64)
741 sw_stats->error_stats.statsb_drop_timeout++;
742
743 if ((val64 &
744 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
745 ~mask64)
746 sw_stats->error_stats.target_illegal_access++;
747
748 if (!skip_alarms) {
749 writeq(VXGE_HW_INTR_MASK_ALL,
750 &vp_reg->general_errors_reg);
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_ALARM_CLEARED,
753 alarm_event);
754 }
755 }
756
757 if (pic_status &
758 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
759
760 val64 = readq(&vp_reg->kdfcctl_errors_reg);
761 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
762
763 if ((val64 &
764 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
765 ~mask64) {
766 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
767
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_FIFO_ERR,
770 alarm_event);
771 }
772
773 if ((val64 &
774 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
775 ~mask64) {
776 sw_stats->error_stats.kdfcctl_fifo0_poison++;
777
778 alarm_event = VXGE_HW_SET_LEVEL(
779 VXGE_HW_EVENT_FIFO_ERR,
780 alarm_event);
781 }
782
783 if ((val64 &
784 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
785 ~mask64) {
786 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
787
788 alarm_event = VXGE_HW_SET_LEVEL(
789 VXGE_HW_EVENT_FIFO_ERR,
790 alarm_event);
791 }
792
793 if (!skip_alarms) {
794 writeq(VXGE_HW_INTR_MASK_ALL,
795 &vp_reg->kdfcctl_errors_reg);
796 alarm_event = VXGE_HW_SET_LEVEL(
797 VXGE_HW_EVENT_ALARM_CLEARED,
798 alarm_event);
799 }
800 }
801
802 }
803
804 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
805
806 val64 = readq(&vp_reg->wrdma_alarm_status);
807
808 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
809
810 val64 = readq(&vp_reg->prc_alarm_reg);
811 mask64 = readq(&vp_reg->prc_alarm_mask);
812
813 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
814 ~mask64)
815 sw_stats->error_stats.prc_ring_bumps++;
816
817 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
818 ~mask64) {
819 sw_stats->error_stats.prc_rxdcm_sc_err++;
820
821 alarm_event = VXGE_HW_SET_LEVEL(
822 VXGE_HW_EVENT_VPATH_ERR,
823 alarm_event);
824 }
825
826 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
827 & ~mask64) {
828 sw_stats->error_stats.prc_rxdcm_sc_abort++;
829
830 alarm_event = VXGE_HW_SET_LEVEL(
831 VXGE_HW_EVENT_VPATH_ERR,
832 alarm_event);
833 }
834
835 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
836 & ~mask64) {
837 sw_stats->error_stats.prc_quanta_size_err++;
838
839 alarm_event = VXGE_HW_SET_LEVEL(
840 VXGE_HW_EVENT_VPATH_ERR,
841 alarm_event);
842 }
843
844 if (!skip_alarms) {
845 writeq(VXGE_HW_INTR_MASK_ALL,
846 &vp_reg->prc_alarm_reg);
847 alarm_event = VXGE_HW_SET_LEVEL(
848 VXGE_HW_EVENT_ALARM_CLEARED,
849 alarm_event);
850 }
851 }
852 }
853out:
854 hldev->stats.sw_dev_err_stats.vpath_alarms++;
855out2:
856 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
858 return VXGE_HW_OK;
859
860 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
861
862 if (alarm_event == VXGE_HW_EVENT_SERR)
863 return VXGE_HW_ERR_CRITICAL;
864
865 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
866 VXGE_HW_ERR_SLOT_FREEZE :
867 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
868 VXGE_HW_ERR_VPATH;
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
891 u32 skip_alarms, u64 *reason)
892{
893 u32 i;
894 u64 val64;
895 u64 adapter_status;
896 u64 vpath_mask;
897 enum vxge_hw_status ret = VXGE_HW_OK;
898
899 val64 = readq(&hldev->common_reg->titan_general_int_status);
900
901 if (unlikely(!val64)) {
902
903 *reason = 0;
904 ret = VXGE_HW_ERR_WRONG_IRQ;
905 goto exit;
906 }
907
908 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
909
910 adapter_status = readq(&hldev->common_reg->adapter_status);
911
912 if (adapter_status == VXGE_HW_ALL_FOXES) {
913
914 __vxge_hw_device_handle_error(hldev,
915 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
916 *reason = 0;
917 ret = VXGE_HW_ERR_SLOT_FREEZE;
918 goto exit;
919 }
920 }
921
922 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
923
924 *reason = val64;
925
926 vpath_mask = hldev->vpaths_deployed >>
927 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
928
929 if (val64 &
930 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
931 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
932
933 return VXGE_HW_OK;
934 }
935
936 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
937
938 if (unlikely(val64 &
939 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
940
941 enum vxge_hw_status error_level = VXGE_HW_OK;
942
943 hldev->stats.sw_dev_err_stats.vpath_alarms++;
944
945 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
946
947 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948 continue;
949
950 ret = __vxge_hw_vpath_alarm_process(
951 &hldev->virtual_paths[i], skip_alarms);
952
953 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
954
955 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
957 break;
958 }
959
960 ret = error_level;
961 }
962exit:
963 return ret;
964}
965
966
967
968
969
970
971
972
973
974
975
976void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
977{
978
979 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
981 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
982 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
983 &hldev->common_reg->tim_int_status0);
984 }
985
986 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988 __vxge_hw_pio_mem_write32_upper(
989 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
990 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
991 &hldev->common_reg->tim_int_status1);
992 }
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004static enum vxge_hw_status
1005vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1006{
1007 if (channel->reserve_ptr - channel->reserve_top > 0) {
1008_alloc_after_swap:
1009 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1010
1011 return VXGE_HW_OK;
1012 }
1013
1014
1015
1016
1017
1018
1019
1020 if (channel->length - channel->free_ptr > 0) {
1021 swap(channel->reserve_arr, channel->free_arr);
1022 channel->reserve_ptr = channel->length;
1023 channel->reserve_top = channel->free_ptr;
1024 channel->free_ptr = channel->length;
1025
1026 channel->stats->reserve_free_swaps_cnt++;
1027
1028 goto _alloc_after_swap;
1029 }
1030
1031 channel->stats->full_cnt++;
1032
1033 *dtrh = NULL;
1034 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static void
1046vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1047{
1048 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1049
1050 channel->work_arr[channel->post_index++] = dtrh;
1051
1052
1053 if (channel->post_index == channel->length)
1054 channel->post_index = 0;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065void
1066vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1067{
1068 vxge_assert(channel->compl_index < channel->length);
1069
1070 *dtrh = channel->work_arr[channel->compl_index];
1071 prefetch(*dtrh);
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1082{
1083 channel->work_arr[channel->compl_index] = NULL;
1084
1085
1086 if (++channel->compl_index == channel->length)
1087 channel->compl_index = 0;
1088
1089 channel->stats->total_compl_cnt++;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1101{
1102 channel->free_arr[--channel->free_ptr] = dtrh;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1113{
1114 return (channel->reserve_ptr - channel->reserve_top) +
1115 (channel->length - channel->free_ptr);
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1133 void **rxdh)
1134{
1135 enum vxge_hw_status status;
1136 struct __vxge_hw_channel *channel;
1137
1138 channel = &ring->channel;
1139
1140 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1141
1142 if (status == VXGE_HW_OK) {
1143 struct vxge_hw_ring_rxd_1 *rxdp =
1144 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1145
1146 rxdp->control_0 = rxdp->control_1 = 0;
1147 }
1148
1149 return status;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1177{
1178 struct __vxge_hw_channel *channel;
1179
1180 channel = &ring->channel;
1181
1182 vxge_hw_channel_dtr_free(channel, rxdh);
1183
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1194{
1195 struct __vxge_hw_channel *channel;
1196
1197 channel = &ring->channel;
1198
1199 vxge_hw_channel_dtr_post(channel, rxdh);
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1210{
1211 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1212 struct __vxge_hw_channel *channel;
1213
1214 channel = &ring->channel;
1215
1216 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1217
1218 if (ring->stats->common_stats.usage_cnt > 0)
1219 ring->stats->common_stats.usage_cnt--;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1233{
1234 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1235 struct __vxge_hw_channel *channel;
1236
1237 channel = &ring->channel;
1238
1239 wmb();
1240 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1241
1242 vxge_hw_channel_dtr_post(channel, rxdh);
1243
1244 if (ring->stats->common_stats.usage_cnt > 0)
1245 ring->stats->common_stats.usage_cnt--;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1256{
1257 wmb();
1258 vxge_hw_ring_rxd_post_post(ring, rxdh);
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1295 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1296{
1297 struct __vxge_hw_channel *channel;
1298 struct vxge_hw_ring_rxd_1 *rxdp;
1299 enum vxge_hw_status status = VXGE_HW_OK;
1300 u64 control_0, own;
1301
1302 channel = &ring->channel;
1303
1304 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1305
1306 rxdp = *rxdh;
1307 if (rxdp == NULL) {
1308 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1309 goto exit;
1310 }
1311
1312 control_0 = rxdp->control_0;
1313 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1314 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1315
1316
1317 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1318
1319 vxge_assert((rxdp)->host_control !=
1320 0);
1321
1322 ++ring->cmpl_cnt;
1323 vxge_hw_channel_dtr_complete(channel);
1324
1325 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1326
1327 ring->stats->common_stats.usage_cnt++;
1328 if (ring->stats->common_stats.usage_max <
1329 ring->stats->common_stats.usage_cnt)
1330 ring->stats->common_stats.usage_max =
1331 ring->stats->common_stats.usage_cnt;
1332
1333 status = VXGE_HW_OK;
1334 goto exit;
1335 }
1336
1337
1338
1339 *rxdh = NULL;
1340 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1341exit:
1342 return status;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359enum vxge_hw_status vxge_hw_ring_handle_tcode(
1360 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1361{
1362 struct __vxge_hw_channel *channel;
1363 enum vxge_hw_status status = VXGE_HW_OK;
1364
1365 channel = &ring->channel;
1366
1367
1368
1369
1370
1371
1372 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1373 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1374 status = VXGE_HW_OK;
1375 goto exit;
1376 }
1377
1378 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1379 status = VXGE_HW_ERR_INVALID_TCODE;
1380 goto exit;
1381 }
1382
1383 ring->stats->rxd_t_code_err_cnt[t_code]++;
1384exit:
1385 return status;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1400 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1401{
1402 struct __vxge_hw_channel *channel;
1403
1404 channel = &fifo->channel;
1405
1406 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1407 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1408 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1409 &fifo->nofl_db->control_0);
1410
1411 mmiowb();
1412
1413 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1414
1415 mmiowb();
1416}
1417
1418
1419
1420
1421
1422
1423u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1424{
1425 return vxge_hw_channel_dtr_count(&fifoh->channel);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1449 struct __vxge_hw_fifo *fifo,
1450 void **txdlh, void **txdl_priv)
1451{
1452 struct __vxge_hw_channel *channel;
1453 enum vxge_hw_status status;
1454 int i;
1455
1456 channel = &fifo->channel;
1457
1458 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1459
1460 if (status == VXGE_HW_OK) {
1461 struct vxge_hw_fifo_txd *txdp =
1462 (struct vxge_hw_fifo_txd *)*txdlh;
1463 struct __vxge_hw_fifo_txdl_priv *priv;
1464
1465 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1466
1467
1468 priv->align_dma_offset = 0;
1469 priv->align_vaddr_start = priv->align_vaddr;
1470 priv->align_used_frags = 0;
1471 priv->frags = 0;
1472 priv->alloc_frags = fifo->config->max_frags;
1473 priv->next_txdl_priv = NULL;
1474
1475 *txdl_priv = (void *)(size_t)txdp->host_control;
1476
1477 for (i = 0; i < fifo->config->max_frags; i++) {
1478 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1479 txdp->control_0 = txdp->control_1 = 0;
1480 }
1481 }
1482
1483 return status;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1504 void *txdlh, u32 frag_idx,
1505 dma_addr_t dma_pointer, u32 size)
1506{
1507 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1508 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1509 struct __vxge_hw_channel *channel;
1510
1511 channel = &fifo->channel;
1512
1513 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1514 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1515
1516 if (frag_idx != 0)
1517 txdp->control_0 = txdp->control_1 = 0;
1518 else {
1519 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1520 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1521 txdp->control_1 |= fifo->interrupt_type;
1522 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1523 fifo->tx_intr_num);
1524 if (txdl_priv->frags) {
1525 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1526 (txdl_priv->frags - 1);
1527 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1528 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1529 }
1530 }
1531
1532 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1533
1534 txdp->buffer_pointer = (u64)dma_pointer;
1535 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1536 fifo->stats->total_buffers++;
1537 txdl_priv->frags++;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1553{
1554 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1555 struct vxge_hw_fifo_txd *txdp_last;
1556 struct vxge_hw_fifo_txd *txdp_first;
1557 struct __vxge_hw_channel *channel;
1558
1559 channel = &fifo->channel;
1560
1561 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1562 txdp_first = txdlh;
1563
1564 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1565 txdp_last->control_0 |=
1566 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1567 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1568
1569 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1570
1571 __vxge_hw_non_offload_db_post(fifo,
1572 (u64)txdl_priv->dma_addr,
1573 txdl_priv->frags - 1,
1574 fifo->no_snoop_bits);
1575
1576 fifo->stats->total_posts++;
1577 fifo->stats->common_stats.usage_cnt++;
1578 if (fifo->stats->common_stats.usage_max <
1579 fifo->stats->common_stats.usage_cnt)
1580 fifo->stats->common_stats.usage_max =
1581 fifo->stats->common_stats.usage_cnt;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1617 struct __vxge_hw_fifo *fifo, void **txdlh,
1618 enum vxge_hw_fifo_tcode *t_code)
1619{
1620 struct __vxge_hw_channel *channel;
1621 struct vxge_hw_fifo_txd *txdp;
1622 enum vxge_hw_status status = VXGE_HW_OK;
1623
1624 channel = &fifo->channel;
1625
1626 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1627
1628 txdp = *txdlh;
1629 if (txdp == NULL) {
1630 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1631 goto exit;
1632 }
1633
1634
1635 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1636
1637 vxge_assert(txdp->host_control != 0);
1638
1639 vxge_hw_channel_dtr_complete(channel);
1640
1641 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1642
1643 if (fifo->stats->common_stats.usage_cnt > 0)
1644 fifo->stats->common_stats.usage_cnt--;
1645
1646 status = VXGE_HW_OK;
1647 goto exit;
1648 }
1649
1650
1651 *txdlh = NULL;
1652 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1653exit:
1654 return status;
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1672 void *txdlh,
1673 enum vxge_hw_fifo_tcode t_code)
1674{
1675 struct __vxge_hw_channel *channel;
1676
1677 enum vxge_hw_status status = VXGE_HW_OK;
1678 channel = &fifo->channel;
1679
1680 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1681 status = VXGE_HW_ERR_INVALID_TCODE;
1682 goto exit;
1683 }
1684
1685 fifo->stats->txd_t_code_err_cnt[t_code]++;
1686exit:
1687 return status;
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1715{
1716 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1717 u32 max_frags;
1718 struct __vxge_hw_channel *channel;
1719
1720 channel = &fifo->channel;
1721
1722 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1723 (struct vxge_hw_fifo_txd *)txdlh);
1724
1725 max_frags = fifo->config->max_frags;
1726
1727 vxge_hw_channel_dtr_free(channel, txdlh);
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745enum vxge_hw_status
1746vxge_hw_vpath_mac_addr_add(
1747 struct __vxge_hw_vpath_handle *vp,
1748 u8 (macaddr)[ETH_ALEN],
1749 u8 (macaddr_mask)[ETH_ALEN],
1750 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1751{
1752 u32 i;
1753 u64 data1 = 0ULL;
1754 u64 data2 = 0ULL;
1755 enum vxge_hw_status status = VXGE_HW_OK;
1756
1757 if (vp == NULL) {
1758 status = VXGE_HW_ERR_INVALID_HANDLE;
1759 goto exit;
1760 }
1761
1762 for (i = 0; i < ETH_ALEN; i++) {
1763 data1 <<= 8;
1764 data1 |= (u8)macaddr[i];
1765
1766 data2 <<= 8;
1767 data2 |= (u8)macaddr_mask[i];
1768 }
1769
1770 switch (duplicate_mode) {
1771 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1772 i = 0;
1773 break;
1774 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1775 i = 1;
1776 break;
1777 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1778 i = 2;
1779 break;
1780 default:
1781 i = 0;
1782 break;
1783 }
1784
1785 status = __vxge_hw_vpath_rts_table_set(vp,
1786 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1787 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1788 0,
1789 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1790 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1791 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1792exit:
1793 return status;
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808enum vxge_hw_status
1809vxge_hw_vpath_mac_addr_get(
1810 struct __vxge_hw_vpath_handle *vp,
1811 u8 (macaddr)[ETH_ALEN],
1812 u8 (macaddr_mask)[ETH_ALEN])
1813{
1814 u32 i;
1815 u64 data1 = 0ULL;
1816 u64 data2 = 0ULL;
1817 enum vxge_hw_status status = VXGE_HW_OK;
1818
1819 if (vp == NULL) {
1820 status = VXGE_HW_ERR_INVALID_HANDLE;
1821 goto exit;
1822 }
1823
1824 status = __vxge_hw_vpath_rts_table_get(vp,
1825 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1826 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1827 0, &data1, &data2);
1828
1829 if (status != VXGE_HW_OK)
1830 goto exit;
1831
1832 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1833
1834 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1835
1836 for (i = ETH_ALEN; i > 0; i--) {
1837 macaddr[i-1] = (u8)(data1 & 0xFF);
1838 data1 >>= 8;
1839
1840 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1841 data2 >>= 8;
1842 }
1843exit:
1844 return status;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860enum vxge_hw_status
1861vxge_hw_vpath_mac_addr_get_next(
1862 struct __vxge_hw_vpath_handle *vp,
1863 u8 (macaddr)[ETH_ALEN],
1864 u8 (macaddr_mask)[ETH_ALEN])
1865{
1866 u32 i;
1867 u64 data1 = 0ULL;
1868 u64 data2 = 0ULL;
1869 enum vxge_hw_status status = VXGE_HW_OK;
1870
1871 if (vp == NULL) {
1872 status = VXGE_HW_ERR_INVALID_HANDLE;
1873 goto exit;
1874 }
1875
1876 status = __vxge_hw_vpath_rts_table_get(vp,
1877 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1878 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1879 0, &data1, &data2);
1880
1881 if (status != VXGE_HW_OK)
1882 goto exit;
1883
1884 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1885
1886 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1887
1888 for (i = ETH_ALEN; i > 0; i--) {
1889 macaddr[i-1] = (u8)(data1 & 0xFF);
1890 data1 >>= 8;
1891
1892 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1893 data2 >>= 8;
1894 }
1895
1896exit:
1897 return status;
1898}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913enum vxge_hw_status
1914vxge_hw_vpath_mac_addr_delete(
1915 struct __vxge_hw_vpath_handle *vp,
1916 u8 (macaddr)[ETH_ALEN],
1917 u8 (macaddr_mask)[ETH_ALEN])
1918{
1919 u32 i;
1920 u64 data1 = 0ULL;
1921 u64 data2 = 0ULL;
1922 enum vxge_hw_status status = VXGE_HW_OK;
1923
1924 if (vp == NULL) {
1925 status = VXGE_HW_ERR_INVALID_HANDLE;
1926 goto exit;
1927 }
1928
1929 for (i = 0; i < ETH_ALEN; i++) {
1930 data1 <<= 8;
1931 data1 |= (u8)macaddr[i];
1932
1933 data2 <<= 8;
1934 data2 |= (u8)macaddr_mask[i];
1935 }
1936
1937 status = __vxge_hw_vpath_rts_table_set(vp,
1938 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1939 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1940 0,
1941 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1942 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1943exit:
1944 return status;
1945}
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957enum vxge_hw_status
1958vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1959{
1960 enum vxge_hw_status status = VXGE_HW_OK;
1961
1962 if (vp == NULL) {
1963 status = VXGE_HW_ERR_INVALID_HANDLE;
1964 goto exit;
1965 }
1966
1967 status = __vxge_hw_vpath_rts_table_set(vp,
1968 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1969 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1970 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1971exit:
1972 return status;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985enum vxge_hw_status
1986vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1987{
1988 enum vxge_hw_status status = VXGE_HW_OK;
1989
1990 if (vp == NULL) {
1991 status = VXGE_HW_ERR_INVALID_HANDLE;
1992 goto exit;
1993 }
1994
1995 status = __vxge_hw_vpath_rts_table_set(vp,
1996 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1997 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1998 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1999exit:
2000 return status;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2012 struct __vxge_hw_vpath_handle *vp)
2013{
2014 u64 val64;
2015 struct __vxge_hw_virtualpath *vpath;
2016 enum vxge_hw_status status = VXGE_HW_OK;
2017
2018 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2019 status = VXGE_HW_ERR_INVALID_HANDLE;
2020 goto exit;
2021 }
2022
2023 vpath = vp->vpath;
2024
2025
2026 if (!(vpath->hldev->access_rights &
2027 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2028 return VXGE_HW_OK;
2029
2030 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2031
2032 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2033
2034 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2035 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2036 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2037 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2038
2039 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2040 }
2041exit:
2042 return status;
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2054 struct __vxge_hw_vpath_handle *vp)
2055{
2056 u64 val64;
2057 struct __vxge_hw_virtualpath *vpath;
2058 enum vxge_hw_status status = VXGE_HW_OK;
2059
2060 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2061 status = VXGE_HW_ERR_INVALID_HANDLE;
2062 goto exit;
2063 }
2064
2065 vpath = vp->vpath;
2066
2067 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2068
2069 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2070
2071 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2072 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2073 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2074
2075 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2076 }
2077exit:
2078 return status;
2079}
2080
2081
2082
2083
2084
2085
2086
2087enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2088 struct __vxge_hw_vpath_handle *vp)
2089{
2090 u64 val64;
2091 struct __vxge_hw_virtualpath *vpath;
2092 enum vxge_hw_status status = VXGE_HW_OK;
2093
2094 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2095 status = VXGE_HW_ERR_INVALID_HANDLE;
2096 goto exit;
2097 }
2098
2099 vpath = vp->vpath;
2100
2101 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2102
2103 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2104 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2105 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2106 }
2107exit:
2108 return status;
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2120 struct __vxge_hw_vpath_handle *vp)
2121{
2122 u64 val64;
2123 struct __vxge_hw_virtualpath *vpath;
2124 enum vxge_hw_status status = VXGE_HW_OK;
2125
2126 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2127 status = VXGE_HW_ERR_INVALID_HANDLE;
2128 goto exit;
2129 }
2130
2131 vpath = vp->vpath;
2132
2133 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2134
2135 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2136 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2137 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2138 }
2139exit:
2140 return status;
2141}
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152enum vxge_hw_status
2153vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2154{
2155 u64 val64;
2156 struct __vxge_hw_virtualpath *vpath;
2157 enum vxge_hw_status status = VXGE_HW_OK;
2158
2159 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2160 status = VXGE_HW_ERR_INVALID_HANDLE;
2161 goto exit;
2162 }
2163
2164 vpath = vp->vpath;
2165
2166 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2167
2168 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2169 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2170 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2171 }
2172exit:
2173 return status;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184enum vxge_hw_status vxge_hw_vpath_alarm_process(
2185 struct __vxge_hw_vpath_handle *vp,
2186 u32 skip_alarms)
2187{
2188 enum vxge_hw_status status = VXGE_HW_OK;
2189
2190 if (vp == NULL) {
2191 status = VXGE_HW_ERR_INVALID_HANDLE;
2192 goto exit;
2193 }
2194
2195 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2196exit:
2197 return status;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212void
2213vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2214 int alarm_msix_id)
2215{
2216 u64 val64;
2217 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2218 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2219 u32 vp_id = vp->vpath->vp_id;
2220
2221 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2222 (vp_id * 4) + tim_msix_id[0]) |
2223 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2224 (vp_id * 4) + tim_msix_id[1]);
2225
2226 writeq(val64, &vp_reg->interrupt_cfg0);
2227
2228 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2229 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2230 &vp_reg->interrupt_cfg2);
2231
2232 if (vpath->hldev->config.intr_mode ==
2233 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2234 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2235 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2236 0, 32), &vp_reg->one_shot_vect0_en);
2237 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2238 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2239 0, 32), &vp_reg->one_shot_vect1_en);
2240 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2241 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2242 0, 32), &vp_reg->one_shot_vect2_en);
2243 }
2244}
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258void
2259vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260{
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262 __vxge_hw_pio_mem_write32_upper(
2263 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2280{
2281 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2282
2283 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2284 __vxge_hw_pio_mem_write32_upper(
2285 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2286 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2287 else
2288 __vxge_hw_pio_mem_write32_upper(
2289 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2290 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2291}
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305void
2306vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2307{
2308 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309 __vxge_hw_pio_mem_write32_upper(
2310 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2311 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2323{
2324 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2325 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2326 u64 val64;
2327 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2328
2329 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2330 tim_int_mask1, vp->vpath->vp_id);
2331
2332 val64 = readq(&hldev->common_reg->tim_int_mask0);
2333
2334 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2335 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2336 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2337 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2338 &hldev->common_reg->tim_int_mask0);
2339 }
2340
2341 val64 = readl(&hldev->common_reg->tim_int_mask1);
2342
2343 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2344 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2345 __vxge_hw_pio_mem_write32_upper(
2346 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2347 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2348 &hldev->common_reg->tim_int_mask1);
2349 }
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2361{
2362 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2363 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2364 u64 val64;
2365 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2366
2367 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2368 tim_int_mask1, vp->vpath->vp_id);
2369
2370 val64 = readq(&hldev->common_reg->tim_int_mask0);
2371
2372 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2373 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2374 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2375 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2376 &hldev->common_reg->tim_int_mask0);
2377 }
2378
2379 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2380 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2381 __vxge_hw_pio_mem_write32_upper(
2382 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2383 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2384 &hldev->common_reg->tim_int_mask1);
2385 }
2386}
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2403{
2404 u8 t_code;
2405 enum vxge_hw_status status = VXGE_HW_OK;
2406 void *first_rxdh;
2407 u64 val64 = 0;
2408 int new_count = 0;
2409
2410 ring->cmpl_cnt = 0;
2411
2412 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2413 if (status == VXGE_HW_OK)
2414 ring->callback(ring, first_rxdh,
2415 t_code, ring->channel.userdata);
2416
2417 if (ring->cmpl_cnt != 0) {
2418 ring->doorbell_cnt += ring->cmpl_cnt;
2419 if (ring->doorbell_cnt >= ring->rxds_limit) {
2420
2421
2422
2423
2424 new_count = (ring->doorbell_cnt * 4);
2425
2426
2427 ring->total_db_cnt += ring->doorbell_cnt;
2428 if (ring->total_db_cnt >= ring->rxds_per_block) {
2429 new_count += 4;
2430
2431 ring->total_db_cnt %= ring->rxds_per_block;
2432 }
2433 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2434 &ring->vp_reg->prc_rxd_doorbell);
2435 val64 =
2436 readl(&ring->common_reg->titan_general_int_status);
2437 ring->doorbell_cnt = 0;
2438 }
2439 }
2440
2441 return status;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2457 struct sk_buff ***skb_ptr, int nr_skb,
2458 int *more)
2459{
2460 enum vxge_hw_fifo_tcode t_code;
2461 void *first_txdlh;
2462 enum vxge_hw_status status = VXGE_HW_OK;
2463 struct __vxge_hw_channel *channel;
2464
2465 channel = &fifo->channel;
2466
2467 status = vxge_hw_fifo_txdl_next_completed(fifo,
2468 &first_txdlh, &t_code);
2469 if (status == VXGE_HW_OK)
2470 if (fifo->callback(fifo, first_txdlh, t_code,
2471 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2472 status = VXGE_HW_COMPLETIONS_REMAIN;
2473
2474 return status;
2475}
2476