1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/etherdevice.h>
15#include <linux/prefetch.h>
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-main.h"
20
21
22
23
24
25
26
27
28
29
30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31{
32 u64 val64;
33
34 struct __vxge_hw_virtualpath *vpath;
35 struct vxge_hw_vpath_reg __iomem *vp_reg;
36 enum vxge_hw_status status = VXGE_HW_OK;
37 if (vp == NULL) {
38 status = VXGE_HW_ERR_INVALID_HANDLE;
39 goto exit;
40 }
41
42 vpath = vp->vpath;
43
44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 goto exit;
47 }
48
49 vp_reg = vpath->vp_reg;
50
51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52
53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 &vp_reg->general_errors_reg);
55
56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 &vp_reg->pci_config_errors_reg);
58
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->mrpcim_to_vpath_alarm_reg);
61
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->srpcim_to_vpath_alarm_reg);
64
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->vpath_ppif_int_status);
67
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_msg_to_vpath_reg);
70
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_pcipif_int_status);
73
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->prc_alarm_reg);
76
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->wrdma_alarm_status);
79
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->asic_ntwk_vp_err_reg);
82
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->xgmac_vp_int_status);
85
86 val64 = readq(&vp_reg->vpath_general_int_status);
87
88
89
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->vpath_pcipif_int_mask);
92
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->srpcim_msg_to_vpath_mask);
95
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->srpcim_to_vpath_alarm_mask);
98
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->mrpcim_to_vpath_alarm_mask);
101
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->pci_config_errors_mask);
104
105
106
107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 &vp_reg->general_errors_mask);
112
113 __vxge_hw_pio_mem_write32_upper(
114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 &vp_reg->kdfcctl_errors_mask);
121
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123
124 __vxge_hw_pio_mem_write32_upper(
125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 &vp_reg->prc_alarm_mask);
127
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130
131 if (vpath->hldev->first_vp_id != vpath->vp_id)
132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 &vp_reg->asic_ntwk_vp_err_mask);
134 else
135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 &vp_reg->asic_ntwk_vp_err_mask);
139
140 __vxge_hw_pio_mem_write32_upper(0,
141 &vp_reg->vpath_general_int_mask);
142exit:
143 return status;
144
145}
146
147
148
149
150
151
152
153
154
155
156enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 struct __vxge_hw_vpath_handle *vp)
158{
159 u64 val64;
160
161 struct __vxge_hw_virtualpath *vpath;
162 enum vxge_hw_status status = VXGE_HW_OK;
163 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 if (vp == NULL) {
165 status = VXGE_HW_ERR_INVALID_HANDLE;
166 goto exit;
167 }
168
169 vpath = vp->vpath;
170
171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 goto exit;
174 }
175 vp_reg = vpath->vp_reg;
176
177 __vxge_hw_pio_mem_write32_upper(
178 (u32)VXGE_HW_INTR_MASK_ALL,
179 &vp_reg->vpath_general_int_mask);
180
181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182
183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->general_errors_mask);
187
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->pci_config_errors_mask);
190
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->mrpcim_to_vpath_alarm_mask);
193
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_to_vpath_alarm_mask);
196
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_ppif_int_mask);
199
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_msg_to_vpath_mask);
202
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_pcipif_int_mask);
205
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->wrdma_alarm_mask);
208
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->prc_alarm_mask);
211
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->xgmac_vp_int_mask);
214
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->asic_ntwk_vp_err_mask);
217
218exit:
219 return status;
220}
221
222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223{
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
227
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
230
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 }
241}
242
243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244{
245 u64 val64 = ring->tim_rti_cfg1_saved;
246
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250}
251
252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253{
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
256
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263
264
265
266}
267
268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269{
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
272
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279
280
281
282}
283
284
285
286
287
288
289
290
291
292
293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294{
295
296 __vxge_hw_pio_mem_write32_upper(
297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299}
300
301
302
303
304
305
306
307
308
309
310void
311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312{
313
314 __vxge_hw_pio_mem_write32_upper(
315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317}
318
319
320
321
322
323
324
325
326
327
328
329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330{
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334}
335
336
337
338
339
340
341
342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343{
344
345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350
351 hldev->config.intr_mode = intr_mode;
352 return intr_mode;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367{
368 u32 i;
369 u64 val64;
370 u32 val32;
371
372 vxge_hw_device_mask_all(hldev);
373
374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375
376 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 continue;
378
379 vxge_hw_vpath_intr_enable(
380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 }
382
383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386
387 if (val64 != 0) {
388 writeq(val64, &hldev->common_reg->tim_int_status0);
389
390 writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 }
392
393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395
396 if (val32 != 0) {
397 __vxge_hw_pio_mem_write32_upper(val32,
398 &hldev->common_reg->tim_int_status1);
399
400 __vxge_hw_pio_mem_write32_upper(~val32,
401 &hldev->common_reg->tim_int_mask1);
402 }
403 }
404
405 val64 = readq(&hldev->common_reg->titan_general_int_status);
406
407 vxge_hw_device_unmask_all(hldev);
408}
409
410
411
412
413
414
415
416
417
418
419
420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421{
422 u32 i;
423
424 vxge_hw_device_mask_all(hldev);
425
426
427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 &hldev->common_reg->tim_int_mask1);
430
431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432
433 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 continue;
435
436 vxge_hw_vpath_intr_disable(
437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 }
439}
440
441
442
443
444
445
446
447
448
449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450{
451 u64 val64;
452
453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455
456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 &hldev->common_reg->titan_mask_all_int);
458}
459
460
461
462
463
464
465
466
467
468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469{
470 u64 val64 = 0;
471
472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474
475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 &hldev->common_reg->titan_mask_all_int);
477}
478
479
480
481
482
483
484
485
486
487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488{
489 u32 val32;
490
491 val32 = readl(&hldev->common_reg->titan_general_int_status);
492}
493
494
495
496
497
498
499
500
501
502static enum vxge_hw_status
503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
505{
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
532 }
533
534
535 if (hldev->uld_callbacks->crit_err)
536 hldev->uld_callbacks->crit_err(hldev,
537 type, vp_id);
538out:
539
540 return VXGE_HW_OK;
541}
542
543
544
545
546
547
548
549
550static enum vxge_hw_status
551__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552{
553
554
555
556 if (hldev->link_state == VXGE_HW_LINK_DOWN)
557 goto exit;
558
559 hldev->link_state = VXGE_HW_LINK_DOWN;
560
561
562 if (hldev->uld_callbacks->link_down)
563 hldev->uld_callbacks->link_down(hldev);
564exit:
565 return VXGE_HW_OK;
566}
567
568
569
570
571
572
573
574
575static enum vxge_hw_status
576__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
577{
578
579
580
581 if (hldev->link_state == VXGE_HW_LINK_UP)
582 goto exit;
583
584 hldev->link_state = VXGE_HW_LINK_UP;
585
586
587 if (hldev->uld_callbacks->link_up)
588 hldev->uld_callbacks->link_up(hldev);
589exit:
590 return VXGE_HW_OK;
591}
592
593
594
595
596
597
598
599
600
601static enum vxge_hw_status
602__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603 u32 skip_alarms)
604{
605 u64 val64;
606 u64 alarm_status;
607 u64 pic_status;
608 struct __vxge_hw_device *hldev = NULL;
609 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610 u64 mask64;
611 struct vxge_hw_vpath_stats_sw_info *sw_stats;
612 struct vxge_hw_vpath_reg __iomem *vp_reg;
613
614 if (vpath == NULL) {
615 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
616 alarm_event);
617 goto out2;
618 }
619
620 hldev = vpath->hldev;
621 vp_reg = vpath->vp_reg;
622 alarm_status = readq(&vp_reg->vpath_general_int_status);
623
624 if (alarm_status == VXGE_HW_ALL_FOXES) {
625 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
626 alarm_event);
627 goto out;
628 }
629
630 sw_stats = vpath->sw_stats;
631
632 if (alarm_status & ~(
633 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
637 sw_stats->error_stats.unknown_alarms++;
638
639 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
640 alarm_event);
641 goto out;
642 }
643
644 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
645
646 val64 = readq(&vp_reg->xgmac_vp_int_status);
647
648 if (val64 &
649 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
650
651 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
652
653 if (((val64 &
654 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
655 (!(val64 &
656 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
657 ((val64 &
658 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
659 (!(val64 &
660 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
661 ))) {
662 sw_stats->error_stats.network_sustained_fault++;
663
664 writeq(
665 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666 &vp_reg->asic_ntwk_vp_err_mask);
667
668 __vxge_hw_device_handle_link_down_ind(hldev);
669 alarm_event = VXGE_HW_SET_LEVEL(
670 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
671 }
672
673 if (((val64 &
674 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675 (!(val64 &
676 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677 ((val64 &
678 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679 (!(val64 &
680 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681 ))) {
682
683 sw_stats->error_stats.network_sustained_ok++;
684
685 writeq(
686 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687 &vp_reg->asic_ntwk_vp_err_mask);
688
689 __vxge_hw_device_handle_link_up_ind(hldev);
690 alarm_event = VXGE_HW_SET_LEVEL(
691 VXGE_HW_EVENT_LINK_UP, alarm_event);
692 }
693
694 writeq(VXGE_HW_INTR_MASK_ALL,
695 &vp_reg->asic_ntwk_vp_err_reg);
696
697 alarm_event = VXGE_HW_SET_LEVEL(
698 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
699
700 if (skip_alarms)
701 return VXGE_HW_OK;
702 }
703 }
704
705 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
706
707 pic_status = readq(&vp_reg->vpath_ppif_int_status);
708
709 if (pic_status &
710 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
711
712 val64 = readq(&vp_reg->general_errors_reg);
713 mask64 = readq(&vp_reg->general_errors_mask);
714
715 if ((val64 &
716 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
717 ~mask64) {
718 sw_stats->error_stats.ini_serr_det++;
719
720 alarm_event = VXGE_HW_SET_LEVEL(
721 VXGE_HW_EVENT_SERR, alarm_event);
722 }
723
724 if ((val64 &
725 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
726 ~mask64) {
727 sw_stats->error_stats.dblgen_fifo0_overflow++;
728
729 alarm_event = VXGE_HW_SET_LEVEL(
730 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
731 }
732
733 if ((val64 &
734 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
735 ~mask64)
736 sw_stats->error_stats.statsb_pif_chain_error++;
737
738 if ((val64 &
739 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
740 ~mask64)
741 sw_stats->error_stats.statsb_drop_timeout++;
742
743 if ((val64 &
744 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
745 ~mask64)
746 sw_stats->error_stats.target_illegal_access++;
747
748 if (!skip_alarms) {
749 writeq(VXGE_HW_INTR_MASK_ALL,
750 &vp_reg->general_errors_reg);
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_ALARM_CLEARED,
753 alarm_event);
754 }
755 }
756
757 if (pic_status &
758 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
759
760 val64 = readq(&vp_reg->kdfcctl_errors_reg);
761 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
762
763 if ((val64 &
764 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
765 ~mask64) {
766 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
767
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_FIFO_ERR,
770 alarm_event);
771 }
772
773 if ((val64 &
774 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
775 ~mask64) {
776 sw_stats->error_stats.kdfcctl_fifo0_poison++;
777
778 alarm_event = VXGE_HW_SET_LEVEL(
779 VXGE_HW_EVENT_FIFO_ERR,
780 alarm_event);
781 }
782
783 if ((val64 &
784 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
785 ~mask64) {
786 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
787
788 alarm_event = VXGE_HW_SET_LEVEL(
789 VXGE_HW_EVENT_FIFO_ERR,
790 alarm_event);
791 }
792
793 if (!skip_alarms) {
794 writeq(VXGE_HW_INTR_MASK_ALL,
795 &vp_reg->kdfcctl_errors_reg);
796 alarm_event = VXGE_HW_SET_LEVEL(
797 VXGE_HW_EVENT_ALARM_CLEARED,
798 alarm_event);
799 }
800 }
801
802 }
803
804 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
805
806 val64 = readq(&vp_reg->wrdma_alarm_status);
807
808 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
809
810 val64 = readq(&vp_reg->prc_alarm_reg);
811 mask64 = readq(&vp_reg->prc_alarm_mask);
812
813 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
814 ~mask64)
815 sw_stats->error_stats.prc_ring_bumps++;
816
817 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
818 ~mask64) {
819 sw_stats->error_stats.prc_rxdcm_sc_err++;
820
821 alarm_event = VXGE_HW_SET_LEVEL(
822 VXGE_HW_EVENT_VPATH_ERR,
823 alarm_event);
824 }
825
826 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
827 & ~mask64) {
828 sw_stats->error_stats.prc_rxdcm_sc_abort++;
829
830 alarm_event = VXGE_HW_SET_LEVEL(
831 VXGE_HW_EVENT_VPATH_ERR,
832 alarm_event);
833 }
834
835 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
836 & ~mask64) {
837 sw_stats->error_stats.prc_quanta_size_err++;
838
839 alarm_event = VXGE_HW_SET_LEVEL(
840 VXGE_HW_EVENT_VPATH_ERR,
841 alarm_event);
842 }
843
844 if (!skip_alarms) {
845 writeq(VXGE_HW_INTR_MASK_ALL,
846 &vp_reg->prc_alarm_reg);
847 alarm_event = VXGE_HW_SET_LEVEL(
848 VXGE_HW_EVENT_ALARM_CLEARED,
849 alarm_event);
850 }
851 }
852 }
853out:
854 hldev->stats.sw_dev_err_stats.vpath_alarms++;
855out2:
856 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
858 return VXGE_HW_OK;
859
860 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
861
862 if (alarm_event == VXGE_HW_EVENT_SERR)
863 return VXGE_HW_ERR_CRITICAL;
864
865 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
866 VXGE_HW_ERR_SLOT_FREEZE :
867 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
868 VXGE_HW_ERR_VPATH;
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
891 u32 skip_alarms, u64 *reason)
892{
893 u32 i;
894 u64 val64;
895 u64 adapter_status;
896 u64 vpath_mask;
897 enum vxge_hw_status ret = VXGE_HW_OK;
898
899 val64 = readq(&hldev->common_reg->titan_general_int_status);
900
901 if (unlikely(!val64)) {
902
903 *reason = 0;
904 ret = VXGE_HW_ERR_WRONG_IRQ;
905 goto exit;
906 }
907
908 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
909
910 adapter_status = readq(&hldev->common_reg->adapter_status);
911
912 if (adapter_status == VXGE_HW_ALL_FOXES) {
913
914 __vxge_hw_device_handle_error(hldev,
915 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
916 *reason = 0;
917 ret = VXGE_HW_ERR_SLOT_FREEZE;
918 goto exit;
919 }
920 }
921
922 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
923
924 *reason = val64;
925
926 vpath_mask = hldev->vpaths_deployed >>
927 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
928
929 if (val64 &
930 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
931 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
932
933 return VXGE_HW_OK;
934 }
935
936 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
937
938 if (unlikely(val64 &
939 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
940
941 enum vxge_hw_status error_level = VXGE_HW_OK;
942
943 hldev->stats.sw_dev_err_stats.vpath_alarms++;
944
945 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
946
947 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948 continue;
949
950 ret = __vxge_hw_vpath_alarm_process(
951 &hldev->virtual_paths[i], skip_alarms);
952
953 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
954
955 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
957 break;
958 }
959
960 ret = error_level;
961 }
962exit:
963 return ret;
964}
965
966
967
968
969
970
971
972
973
974
975
976void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
977{
978
979 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
981 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
982 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
983 &hldev->common_reg->tim_int_status0);
984 }
985
986 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988 __vxge_hw_pio_mem_write32_upper(
989 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
990 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
991 &hldev->common_reg->tim_int_status1);
992 }
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004static enum vxge_hw_status
1005vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1006{
1007 void **tmp_arr;
1008
1009 if (channel->reserve_ptr - channel->reserve_top > 0) {
1010_alloc_after_swap:
1011 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1012
1013 return VXGE_HW_OK;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022 if (channel->length - channel->free_ptr > 0) {
1023
1024 tmp_arr = channel->reserve_arr;
1025 channel->reserve_arr = channel->free_arr;
1026 channel->free_arr = tmp_arr;
1027 channel->reserve_ptr = channel->length;
1028 channel->reserve_top = channel->free_ptr;
1029 channel->free_ptr = channel->length;
1030
1031 channel->stats->reserve_free_swaps_cnt++;
1032
1033 goto _alloc_after_swap;
1034 }
1035
1036 channel->stats->full_cnt++;
1037
1038 *dtrh = NULL;
1039 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static void
1051vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1052{
1053 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1054
1055 channel->work_arr[channel->post_index++] = dtrh;
1056
1057
1058 if (channel->post_index == channel->length)
1059 channel->post_index = 0;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070void
1071vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1072{
1073 vxge_assert(channel->compl_index < channel->length);
1074
1075 *dtrh = channel->work_arr[channel->compl_index];
1076 prefetch(*dtrh);
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1087{
1088 channel->work_arr[channel->compl_index] = NULL;
1089
1090
1091 if (++channel->compl_index == channel->length)
1092 channel->compl_index = 0;
1093
1094 channel->stats->total_compl_cnt++;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1106{
1107 channel->free_arr[--channel->free_ptr] = dtrh;
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1118{
1119 return (channel->reserve_ptr - channel->reserve_top) +
1120 (channel->length - channel->free_ptr);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1138 void **rxdh)
1139{
1140 enum vxge_hw_status status;
1141 struct __vxge_hw_channel *channel;
1142
1143 channel = &ring->channel;
1144
1145 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1146
1147 if (status == VXGE_HW_OK) {
1148 struct vxge_hw_ring_rxd_1 *rxdp =
1149 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1150
1151 rxdp->control_0 = rxdp->control_1 = 0;
1152 }
1153
1154 return status;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1182{
1183 struct __vxge_hw_channel *channel;
1184
1185 channel = &ring->channel;
1186
1187 vxge_hw_channel_dtr_free(channel, rxdh);
1188
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1199{
1200 struct __vxge_hw_channel *channel;
1201
1202 channel = &ring->channel;
1203
1204 vxge_hw_channel_dtr_post(channel, rxdh);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1215{
1216 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1217 struct __vxge_hw_channel *channel;
1218
1219 channel = &ring->channel;
1220
1221 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1222
1223 if (ring->stats->common_stats.usage_cnt > 0)
1224 ring->stats->common_stats.usage_cnt--;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1238{
1239 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1240 struct __vxge_hw_channel *channel;
1241
1242 channel = &ring->channel;
1243
1244 wmb();
1245 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1246
1247 vxge_hw_channel_dtr_post(channel, rxdh);
1248
1249 if (ring->stats->common_stats.usage_cnt > 0)
1250 ring->stats->common_stats.usage_cnt--;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1261{
1262 wmb();
1263 vxge_hw_ring_rxd_post_post(ring, rxdh);
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1300 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1301{
1302 struct __vxge_hw_channel *channel;
1303 struct vxge_hw_ring_rxd_1 *rxdp;
1304 enum vxge_hw_status status = VXGE_HW_OK;
1305 u64 control_0, own;
1306
1307 channel = &ring->channel;
1308
1309 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1310
1311 rxdp = *rxdh;
1312 if (rxdp == NULL) {
1313 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1314 goto exit;
1315 }
1316
1317 control_0 = rxdp->control_0;
1318 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1319 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1320
1321
1322 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1323
1324 vxge_assert((rxdp)->host_control !=
1325 0);
1326
1327 ++ring->cmpl_cnt;
1328 vxge_hw_channel_dtr_complete(channel);
1329
1330 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1331
1332 ring->stats->common_stats.usage_cnt++;
1333 if (ring->stats->common_stats.usage_max <
1334 ring->stats->common_stats.usage_cnt)
1335 ring->stats->common_stats.usage_max =
1336 ring->stats->common_stats.usage_cnt;
1337
1338 status = VXGE_HW_OK;
1339 goto exit;
1340 }
1341
1342
1343
1344 *rxdh = NULL;
1345 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1346exit:
1347 return status;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364enum vxge_hw_status vxge_hw_ring_handle_tcode(
1365 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1366{
1367 struct __vxge_hw_channel *channel;
1368 enum vxge_hw_status status = VXGE_HW_OK;
1369
1370 channel = &ring->channel;
1371
1372
1373
1374
1375
1376
1377 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1378 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1379 status = VXGE_HW_OK;
1380 goto exit;
1381 }
1382
1383 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1384 status = VXGE_HW_ERR_INVALID_TCODE;
1385 goto exit;
1386 }
1387
1388 ring->stats->rxd_t_code_err_cnt[t_code]++;
1389exit:
1390 return status;
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1405 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1406{
1407 struct __vxge_hw_channel *channel;
1408
1409 channel = &fifo->channel;
1410
1411 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1412 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1413 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1414 &fifo->nofl_db->control_0);
1415
1416 mmiowb();
1417
1418 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1419
1420 mmiowb();
1421}
1422
1423
1424
1425
1426
1427
1428u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1429{
1430 return vxge_hw_channel_dtr_count(&fifoh->channel);
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1454 struct __vxge_hw_fifo *fifo,
1455 void **txdlh, void **txdl_priv)
1456{
1457 struct __vxge_hw_channel *channel;
1458 enum vxge_hw_status status;
1459 int i;
1460
1461 channel = &fifo->channel;
1462
1463 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1464
1465 if (status == VXGE_HW_OK) {
1466 struct vxge_hw_fifo_txd *txdp =
1467 (struct vxge_hw_fifo_txd *)*txdlh;
1468 struct __vxge_hw_fifo_txdl_priv *priv;
1469
1470 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1471
1472
1473 priv->align_dma_offset = 0;
1474 priv->align_vaddr_start = priv->align_vaddr;
1475 priv->align_used_frags = 0;
1476 priv->frags = 0;
1477 priv->alloc_frags = fifo->config->max_frags;
1478 priv->next_txdl_priv = NULL;
1479
1480 *txdl_priv = (void *)(size_t)txdp->host_control;
1481
1482 for (i = 0; i < fifo->config->max_frags; i++) {
1483 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1484 txdp->control_0 = txdp->control_1 = 0;
1485 }
1486 }
1487
1488 return status;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1509 void *txdlh, u32 frag_idx,
1510 dma_addr_t dma_pointer, u32 size)
1511{
1512 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1513 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1514 struct __vxge_hw_channel *channel;
1515
1516 channel = &fifo->channel;
1517
1518 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1519 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1520
1521 if (frag_idx != 0)
1522 txdp->control_0 = txdp->control_1 = 0;
1523 else {
1524 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1525 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1526 txdp->control_1 |= fifo->interrupt_type;
1527 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1528 fifo->tx_intr_num);
1529 if (txdl_priv->frags) {
1530 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1531 (txdl_priv->frags - 1);
1532 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1533 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1534 }
1535 }
1536
1537 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1538
1539 txdp->buffer_pointer = (u64)dma_pointer;
1540 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1541 fifo->stats->total_buffers++;
1542 txdl_priv->frags++;
1543}
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1558{
1559 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1560 struct vxge_hw_fifo_txd *txdp_last;
1561 struct vxge_hw_fifo_txd *txdp_first;
1562 struct __vxge_hw_channel *channel;
1563
1564 channel = &fifo->channel;
1565
1566 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1567 txdp_first = txdlh;
1568
1569 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1570 txdp_last->control_0 |=
1571 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1572 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1573
1574 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1575
1576 __vxge_hw_non_offload_db_post(fifo,
1577 (u64)txdl_priv->dma_addr,
1578 txdl_priv->frags - 1,
1579 fifo->no_snoop_bits);
1580
1581 fifo->stats->total_posts++;
1582 fifo->stats->common_stats.usage_cnt++;
1583 if (fifo->stats->common_stats.usage_max <
1584 fifo->stats->common_stats.usage_cnt)
1585 fifo->stats->common_stats.usage_max =
1586 fifo->stats->common_stats.usage_cnt;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1622 struct __vxge_hw_fifo *fifo, void **txdlh,
1623 enum vxge_hw_fifo_tcode *t_code)
1624{
1625 struct __vxge_hw_channel *channel;
1626 struct vxge_hw_fifo_txd *txdp;
1627 enum vxge_hw_status status = VXGE_HW_OK;
1628
1629 channel = &fifo->channel;
1630
1631 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1632
1633 txdp = *txdlh;
1634 if (txdp == NULL) {
1635 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1636 goto exit;
1637 }
1638
1639
1640 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1641
1642 vxge_assert(txdp->host_control != 0);
1643
1644 vxge_hw_channel_dtr_complete(channel);
1645
1646 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1647
1648 if (fifo->stats->common_stats.usage_cnt > 0)
1649 fifo->stats->common_stats.usage_cnt--;
1650
1651 status = VXGE_HW_OK;
1652 goto exit;
1653 }
1654
1655
1656 *txdlh = NULL;
1657 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1658exit:
1659 return status;
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1677 void *txdlh,
1678 enum vxge_hw_fifo_tcode t_code)
1679{
1680 struct __vxge_hw_channel *channel;
1681
1682 enum vxge_hw_status status = VXGE_HW_OK;
1683 channel = &fifo->channel;
1684
1685 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1686 status = VXGE_HW_ERR_INVALID_TCODE;
1687 goto exit;
1688 }
1689
1690 fifo->stats->txd_t_code_err_cnt[t_code]++;
1691exit:
1692 return status;
1693}
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1720{
1721 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1722 u32 max_frags;
1723 struct __vxge_hw_channel *channel;
1724
1725 channel = &fifo->channel;
1726
1727 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1728 (struct vxge_hw_fifo_txd *)txdlh);
1729
1730 max_frags = fifo->config->max_frags;
1731
1732 vxge_hw_channel_dtr_free(channel, txdlh);
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750enum vxge_hw_status
1751vxge_hw_vpath_mac_addr_add(
1752 struct __vxge_hw_vpath_handle *vp,
1753 u8 (macaddr)[ETH_ALEN],
1754 u8 (macaddr_mask)[ETH_ALEN],
1755 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1756{
1757 u32 i;
1758 u64 data1 = 0ULL;
1759 u64 data2 = 0ULL;
1760 enum vxge_hw_status status = VXGE_HW_OK;
1761
1762 if (vp == NULL) {
1763 status = VXGE_HW_ERR_INVALID_HANDLE;
1764 goto exit;
1765 }
1766
1767 for (i = 0; i < ETH_ALEN; i++) {
1768 data1 <<= 8;
1769 data1 |= (u8)macaddr[i];
1770
1771 data2 <<= 8;
1772 data2 |= (u8)macaddr_mask[i];
1773 }
1774
1775 switch (duplicate_mode) {
1776 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1777 i = 0;
1778 break;
1779 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1780 i = 1;
1781 break;
1782 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1783 i = 2;
1784 break;
1785 default:
1786 i = 0;
1787 break;
1788 }
1789
1790 status = __vxge_hw_vpath_rts_table_set(vp,
1791 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1792 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1793 0,
1794 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1795 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1796 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1797exit:
1798 return status;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813enum vxge_hw_status
1814vxge_hw_vpath_mac_addr_get(
1815 struct __vxge_hw_vpath_handle *vp,
1816 u8 (macaddr)[ETH_ALEN],
1817 u8 (macaddr_mask)[ETH_ALEN])
1818{
1819 u32 i;
1820 u64 data1 = 0ULL;
1821 u64 data2 = 0ULL;
1822 enum vxge_hw_status status = VXGE_HW_OK;
1823
1824 if (vp == NULL) {
1825 status = VXGE_HW_ERR_INVALID_HANDLE;
1826 goto exit;
1827 }
1828
1829 status = __vxge_hw_vpath_rts_table_get(vp,
1830 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1831 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1832 0, &data1, &data2);
1833
1834 if (status != VXGE_HW_OK)
1835 goto exit;
1836
1837 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1838
1839 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1840
1841 for (i = ETH_ALEN; i > 0; i--) {
1842 macaddr[i-1] = (u8)(data1 & 0xFF);
1843 data1 >>= 8;
1844
1845 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1846 data2 >>= 8;
1847 }
1848exit:
1849 return status;
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865enum vxge_hw_status
1866vxge_hw_vpath_mac_addr_get_next(
1867 struct __vxge_hw_vpath_handle *vp,
1868 u8 (macaddr)[ETH_ALEN],
1869 u8 (macaddr_mask)[ETH_ALEN])
1870{
1871 u32 i;
1872 u64 data1 = 0ULL;
1873 u64 data2 = 0ULL;
1874 enum vxge_hw_status status = VXGE_HW_OK;
1875
1876 if (vp == NULL) {
1877 status = VXGE_HW_ERR_INVALID_HANDLE;
1878 goto exit;
1879 }
1880
1881 status = __vxge_hw_vpath_rts_table_get(vp,
1882 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1883 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1884 0, &data1, &data2);
1885
1886 if (status != VXGE_HW_OK)
1887 goto exit;
1888
1889 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1890
1891 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1892
1893 for (i = ETH_ALEN; i > 0; i--) {
1894 macaddr[i-1] = (u8)(data1 & 0xFF);
1895 data1 >>= 8;
1896
1897 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1898 data2 >>= 8;
1899 }
1900
1901exit:
1902 return status;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918enum vxge_hw_status
1919vxge_hw_vpath_mac_addr_delete(
1920 struct __vxge_hw_vpath_handle *vp,
1921 u8 (macaddr)[ETH_ALEN],
1922 u8 (macaddr_mask)[ETH_ALEN])
1923{
1924 u32 i;
1925 u64 data1 = 0ULL;
1926 u64 data2 = 0ULL;
1927 enum vxge_hw_status status = VXGE_HW_OK;
1928
1929 if (vp == NULL) {
1930 status = VXGE_HW_ERR_INVALID_HANDLE;
1931 goto exit;
1932 }
1933
1934 for (i = 0; i < ETH_ALEN; i++) {
1935 data1 <<= 8;
1936 data1 |= (u8)macaddr[i];
1937
1938 data2 <<= 8;
1939 data2 |= (u8)macaddr_mask[i];
1940 }
1941
1942 status = __vxge_hw_vpath_rts_table_set(vp,
1943 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1945 0,
1946 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1947 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1948exit:
1949 return status;
1950}
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963enum vxge_hw_status
1964vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1965{
1966 enum vxge_hw_status status = VXGE_HW_OK;
1967
1968 if (vp == NULL) {
1969 status = VXGE_HW_ERR_INVALID_HANDLE;
1970 goto exit;
1971 }
1972
1973 status = __vxge_hw_vpath_rts_table_set(vp,
1974 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1975 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1976 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1977exit:
1978 return status;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991enum vxge_hw_status
1992vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1993{
1994 u64 data;
1995 enum vxge_hw_status status = VXGE_HW_OK;
1996
1997 if (vp == NULL) {
1998 status = VXGE_HW_ERR_INVALID_HANDLE;
1999 goto exit;
2000 }
2001
2002 status = __vxge_hw_vpath_rts_table_get(vp,
2003 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
2004 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2005 0, vid, &data);
2006
2007 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
2008exit:
2009 return status;
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023enum vxge_hw_status
2024vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
2025{
2026 enum vxge_hw_status status = VXGE_HW_OK;
2027
2028 if (vp == NULL) {
2029 status = VXGE_HW_ERR_INVALID_HANDLE;
2030 goto exit;
2031 }
2032
2033 status = __vxge_hw_vpath_rts_table_set(vp,
2034 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
2035 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2036 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
2037exit:
2038 return status;
2039}
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2050 struct __vxge_hw_vpath_handle *vp)
2051{
2052 u64 val64;
2053 struct __vxge_hw_virtualpath *vpath;
2054 enum vxge_hw_status status = VXGE_HW_OK;
2055
2056 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2057 status = VXGE_HW_ERR_INVALID_HANDLE;
2058 goto exit;
2059 }
2060
2061 vpath = vp->vpath;
2062
2063
2064 if (!(vpath->hldev->access_rights &
2065 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2066 return VXGE_HW_OK;
2067
2068 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2069
2070 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2071
2072 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2073 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2074 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2075 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2076
2077 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2078 }
2079exit:
2080 return status;
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2092 struct __vxge_hw_vpath_handle *vp)
2093{
2094 u64 val64;
2095 struct __vxge_hw_virtualpath *vpath;
2096 enum vxge_hw_status status = VXGE_HW_OK;
2097
2098 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2099 status = VXGE_HW_ERR_INVALID_HANDLE;
2100 goto exit;
2101 }
2102
2103 vpath = vp->vpath;
2104
2105 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2106
2107 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2108
2109 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2110 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2111 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2112
2113 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2114 }
2115exit:
2116 return status;
2117}
2118
2119
2120
2121
2122
2123
2124
2125enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2126 struct __vxge_hw_vpath_handle *vp)
2127{
2128 u64 val64;
2129 struct __vxge_hw_virtualpath *vpath;
2130 enum vxge_hw_status status = VXGE_HW_OK;
2131
2132 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2133 status = VXGE_HW_ERR_INVALID_HANDLE;
2134 goto exit;
2135 }
2136
2137 vpath = vp->vpath;
2138
2139 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2140
2141 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2142 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2143 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2144 }
2145exit:
2146 return status;
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2158 struct __vxge_hw_vpath_handle *vp)
2159{
2160 u64 val64;
2161 struct __vxge_hw_virtualpath *vpath;
2162 enum vxge_hw_status status = VXGE_HW_OK;
2163
2164 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2165 status = VXGE_HW_ERR_INVALID_HANDLE;
2166 goto exit;
2167 }
2168
2169 vpath = vp->vpath;
2170
2171 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2172
2173 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2174 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2175 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2176 }
2177exit:
2178 return status;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190enum vxge_hw_status
2191vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2192{
2193 u64 val64;
2194 struct __vxge_hw_virtualpath *vpath;
2195 enum vxge_hw_status status = VXGE_HW_OK;
2196
2197 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2198 status = VXGE_HW_ERR_INVALID_HANDLE;
2199 goto exit;
2200 }
2201
2202 vpath = vp->vpath;
2203
2204 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2205
2206 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2207 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2208 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2209 }
2210exit:
2211 return status;
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222enum vxge_hw_status vxge_hw_vpath_alarm_process(
2223 struct __vxge_hw_vpath_handle *vp,
2224 u32 skip_alarms)
2225{
2226 enum vxge_hw_status status = VXGE_HW_OK;
2227
2228 if (vp == NULL) {
2229 status = VXGE_HW_ERR_INVALID_HANDLE;
2230 goto exit;
2231 }
2232
2233 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2234exit:
2235 return status;
2236}
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250void
2251vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2252 int alarm_msix_id)
2253{
2254 u64 val64;
2255 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2256 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2257 u32 vp_id = vp->vpath->vp_id;
2258
2259 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2260 (vp_id * 4) + tim_msix_id[0]) |
2261 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2262 (vp_id * 4) + tim_msix_id[1]);
2263
2264 writeq(val64, &vp_reg->interrupt_cfg0);
2265
2266 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2267 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2268 &vp_reg->interrupt_cfg2);
2269
2270 if (vpath->hldev->config.intr_mode ==
2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2277 0, 32), &vp_reg->one_shot_vect1_en);
2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2280 0, 32), &vp_reg->one_shot_vect2_en);
2281 }
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296void
2297vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2298{
2299 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2300 __vxge_hw_pio_mem_write32_upper(
2301 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2302 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343void
2344vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2345{
2346 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2347 __vxge_hw_pio_mem_write32_upper(
2348 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2349 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2361{
2362 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2363 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2364 u64 val64;
2365 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2366
2367 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2368 tim_int_mask1, vp->vpath->vp_id);
2369
2370 val64 = readq(&hldev->common_reg->tim_int_mask0);
2371
2372 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2373 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2374 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2375 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2376 &hldev->common_reg->tim_int_mask0);
2377 }
2378
2379 val64 = readl(&hldev->common_reg->tim_int_mask1);
2380
2381 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2382 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2383 __vxge_hw_pio_mem_write32_upper(
2384 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2385 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2386 &hldev->common_reg->tim_int_mask1);
2387 }
2388}
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2399{
2400 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2401 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2402 u64 val64;
2403 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2404
2405 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2406 tim_int_mask1, vp->vpath->vp_id);
2407
2408 val64 = readq(&hldev->common_reg->tim_int_mask0);
2409
2410 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2411 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2412 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2413 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2414 &hldev->common_reg->tim_int_mask0);
2415 }
2416
2417 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2418 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2419 __vxge_hw_pio_mem_write32_upper(
2420 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2421 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2422 &hldev->common_reg->tim_int_mask1);
2423 }
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2441{
2442 u8 t_code;
2443 enum vxge_hw_status status = VXGE_HW_OK;
2444 void *first_rxdh;
2445 u64 val64 = 0;
2446 int new_count = 0;
2447
2448 ring->cmpl_cnt = 0;
2449
2450 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2451 if (status == VXGE_HW_OK)
2452 ring->callback(ring, first_rxdh,
2453 t_code, ring->channel.userdata);
2454
2455 if (ring->cmpl_cnt != 0) {
2456 ring->doorbell_cnt += ring->cmpl_cnt;
2457 if (ring->doorbell_cnt >= ring->rxds_limit) {
2458
2459
2460
2461
2462 new_count = (ring->doorbell_cnt * 4);
2463
2464
2465 ring->total_db_cnt += ring->doorbell_cnt;
2466 if (ring->total_db_cnt >= ring->rxds_per_block) {
2467 new_count += 4;
2468
2469 ring->total_db_cnt %= ring->rxds_per_block;
2470 }
2471 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2472 &ring->vp_reg->prc_rxd_doorbell);
2473 val64 =
2474 readl(&ring->common_reg->titan_general_int_status);
2475 ring->doorbell_cnt = 0;
2476 }
2477 }
2478
2479 return status;
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2495 struct sk_buff ***skb_ptr, int nr_skb,
2496 int *more)
2497{
2498 enum vxge_hw_fifo_tcode t_code;
2499 void *first_txdlh;
2500 enum vxge_hw_status status = VXGE_HW_OK;
2501 struct __vxge_hw_channel *channel;
2502
2503 channel = &fifo->channel;
2504
2505 status = vxge_hw_fifo_txdl_next_completed(fifo,
2506 &first_txdlh, &t_code);
2507 if (status == VXGE_HW_OK)
2508 if (fifo->callback(fifo, first_txdlh, t_code,
2509 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2510 status = VXGE_HW_COMPLETIONS_REMAIN;
2511
2512 return status;
2513}
2514