1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "fm10k_common.h"
22
23
24
25
26
27
28
29
30s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
31{
32 u16 link_cap, link_status, device_cap, device_control;
33
34
35 link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP);
36
37 switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
38 case FM10K_PCIE_LINK_WIDTH_1:
39 hw->bus_caps.width = fm10k_bus_width_pcie_x1;
40 break;
41 case FM10K_PCIE_LINK_WIDTH_2:
42 hw->bus_caps.width = fm10k_bus_width_pcie_x2;
43 break;
44 case FM10K_PCIE_LINK_WIDTH_4:
45 hw->bus_caps.width = fm10k_bus_width_pcie_x4;
46 break;
47 case FM10K_PCIE_LINK_WIDTH_8:
48 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
49 break;
50 default:
51 hw->bus_caps.width = fm10k_bus_width_unknown;
52 break;
53 }
54
55 switch (link_cap & FM10K_PCIE_LINK_SPEED) {
56 case FM10K_PCIE_LINK_SPEED_2500:
57 hw->bus_caps.speed = fm10k_bus_speed_2500;
58 break;
59 case FM10K_PCIE_LINK_SPEED_5000:
60 hw->bus_caps.speed = fm10k_bus_speed_5000;
61 break;
62 case FM10K_PCIE_LINK_SPEED_8000:
63 hw->bus_caps.speed = fm10k_bus_speed_8000;
64 break;
65 default:
66 hw->bus_caps.speed = fm10k_bus_speed_unknown;
67 break;
68 }
69
70
71 device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP);
72
73 switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
74 case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
75 hw->bus_caps.payload = fm10k_bus_payload_128;
76 break;
77 case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
78 hw->bus_caps.payload = fm10k_bus_payload_256;
79 break;
80 case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
81 hw->bus_caps.payload = fm10k_bus_payload_512;
82 break;
83 default:
84 hw->bus_caps.payload = fm10k_bus_payload_unknown;
85 break;
86 }
87
88
89 link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS);
90
91 switch (link_status & FM10K_PCIE_LINK_WIDTH) {
92 case FM10K_PCIE_LINK_WIDTH_1:
93 hw->bus.width = fm10k_bus_width_pcie_x1;
94 break;
95 case FM10K_PCIE_LINK_WIDTH_2:
96 hw->bus.width = fm10k_bus_width_pcie_x2;
97 break;
98 case FM10K_PCIE_LINK_WIDTH_4:
99 hw->bus.width = fm10k_bus_width_pcie_x4;
100 break;
101 case FM10K_PCIE_LINK_WIDTH_8:
102 hw->bus.width = fm10k_bus_width_pcie_x8;
103 break;
104 default:
105 hw->bus.width = fm10k_bus_width_unknown;
106 break;
107 }
108
109 switch (link_status & FM10K_PCIE_LINK_SPEED) {
110 case FM10K_PCIE_LINK_SPEED_2500:
111 hw->bus.speed = fm10k_bus_speed_2500;
112 break;
113 case FM10K_PCIE_LINK_SPEED_5000:
114 hw->bus.speed = fm10k_bus_speed_5000;
115 break;
116 case FM10K_PCIE_LINK_SPEED_8000:
117 hw->bus.speed = fm10k_bus_speed_8000;
118 break;
119 default:
120 hw->bus.speed = fm10k_bus_speed_unknown;
121 break;
122 }
123
124
125 device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL);
126
127 switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
128 case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
129 hw->bus.payload = fm10k_bus_payload_128;
130 break;
131 case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
132 hw->bus.payload = fm10k_bus_payload_256;
133 break;
134 case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
135 hw->bus.payload = fm10k_bus_payload_512;
136 break;
137 default:
138 hw->bus.payload = fm10k_bus_payload_unknown;
139 break;
140 }
141
142 return 0;
143}
144
145static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
146{
147 u16 msix_count;
148
149
150 msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL);
151 msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
152
153
154 msix_count++;
155
156 if (msix_count > FM10K_MAX_MSIX_VECTORS)
157 msix_count = FM10K_MAX_MSIX_VECTORS;
158
159 return msix_count;
160}
161
162
163
164
165
166
167
168s32 fm10k_get_invariants_generic(struct fm10k_hw *hw)
169{
170 struct fm10k_mac_info *mac = &hw->mac;
171
172
173 mac->dglort_map = FM10K_DGLORTMAP_NONE;
174
175
176 mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
177
178 return 0;
179}
180
181
182
183
184
185
186
187
188s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
189{
190
191 hw->mac.tx_ready = true;
192
193 return 0;
194}
195
196
197
198
199
200
201
202s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
203{
204 u32 reg;
205 u16 i, time;
206
207
208 hw->mac.tx_ready = false;
209
210 if (FM10K_REMOVED(hw->hw_addr))
211 return 0;
212
213
214 for (i = 0; i < q_cnt; i++) {
215 reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
216 fm10k_write_reg(hw, FM10K_TXDCTL(i),
217 reg & ~FM10K_TXDCTL_ENABLE);
218 reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
219 fm10k_write_reg(hw, FM10K_RXQCTL(i),
220 reg & ~FM10K_RXQCTL_ENABLE);
221 }
222
223 fm10k_write_flush(hw);
224 udelay(1);
225
226
227 for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
228
229 if (i == q_cnt)
230 return 0;
231
232
233 reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
234 if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
235 reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
236 if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
237 i++;
238 continue;
239 }
240 }
241
242
243 time--;
244 if (time)
245 udelay(1);
246 }
247
248 return FM10K_ERR_REQUESTS_PENDING;
249}
250
251
252
253
254
255
256s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
257{
258 return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
259}
260
261
262
263
264
265
266
267
268
269u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
270 struct fm10k_hw_stat *stat)
271{
272 u32 delta = fm10k_read_reg(hw, addr) - stat->base_l;
273
274 if (FM10K_REMOVED(hw->hw_addr))
275 stat->base_h = 0;
276
277 return delta;
278}
279
280
281
282
283
284
285
286
287
288
289
290static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
291 struct fm10k_hw_stat *stat)
292{
293 u32 count_l;
294 u32 count_h;
295 u32 count_tmp;
296 u64 delta;
297
298 count_h = fm10k_read_reg(hw, addr + 1);
299
300
301 do {
302 count_tmp = count_h;
303 count_l = fm10k_read_reg(hw, addr);
304 count_h = fm10k_read_reg(hw, addr + 1);
305 } while (count_h != count_tmp);
306
307 delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
308 delta -= stat->base_l;
309
310 return delta & FM10K_48_BIT_MASK;
311}
312
313
314
315
316
317
318
319
320
321static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
322{
323 if (!delta)
324 return;
325
326
327 delta += stat->base_l;
328 stat->base_l = (u32)delta;
329
330
331 stat->base_h += (u32)(delta >> 32);
332}
333
334
335
336
337
338
339
340
341
342
343static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
344 struct fm10k_hw_stats_q *q,
345 u32 idx)
346{
347 u32 id_tx, id_tx_prev, tx_packets;
348 u64 tx_bytes = 0;
349
350
351 id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
352
353
354 do {
355 tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
356 &q->tx_packets);
357
358 if (tx_packets)
359 tx_bytes = fm10k_read_hw_stats_48b(hw,
360 FM10K_QBTC_L(idx),
361 &q->tx_bytes);
362
363
364 id_tx_prev = id_tx;
365 id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
366 } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
367
368
369 id_tx &= FM10K_TXQCTL_ID_MASK;
370 id_tx |= FM10K_STAT_VALID;
371
372
373 if (q->tx_stats_idx == id_tx) {
374 q->tx_packets.count += tx_packets;
375 q->tx_bytes.count += tx_bytes;
376 }
377
378
379 fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
380 fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
381
382 q->tx_stats_idx = id_tx;
383}
384
385
386
387
388
389
390
391
392
393
394static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
395 struct fm10k_hw_stats_q *q,
396 u32 idx)
397{
398 u32 id_rx, id_rx_prev, rx_packets, rx_drops;
399 u64 rx_bytes = 0;
400
401
402 id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
403
404
405 do {
406 rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
407 &q->rx_drops);
408
409 rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
410 &q->rx_packets);
411
412 if (rx_packets)
413 rx_bytes = fm10k_read_hw_stats_48b(hw,
414 FM10K_QBRC_L(idx),
415 &q->rx_bytes);
416
417
418 id_rx_prev = id_rx;
419 id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
420 } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
421
422
423 id_rx &= FM10K_RXQCTL_ID_MASK;
424 id_rx |= FM10K_STAT_VALID;
425
426
427 if (q->rx_stats_idx == id_rx) {
428 q->rx_drops.count += rx_drops;
429 q->rx_packets.count += rx_packets;
430 q->rx_bytes.count += rx_bytes;
431 }
432
433
434 fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
435 fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
436 fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
437
438 q->rx_stats_idx = id_rx;
439}
440
441
442
443
444
445
446
447
448
449
450
451void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
452 u32 idx, u32 count)
453{
454 u32 i;
455
456 for (i = 0; i < count; i++, idx++, q++) {
457 fm10k_update_hw_stats_tx_q(hw, q, idx);
458 fm10k_update_hw_stats_rx_q(hw, q, idx);
459 }
460}
461
462
463
464
465
466
467
468
469
470
471
472void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
473{
474 u32 i;
475
476 for (i = 0; i < count; i++, idx++, q++) {
477 q->rx_stats_idx = 0;
478 q->tx_stats_idx = 0;
479 }
480}
481
482
483
484
485
486
487
488
489
490s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
491{
492 struct fm10k_mbx_info *mbx = &hw->mbx;
493 struct fm10k_mac_info *mac = &hw->mac;
494 s32 ret_val = 0;
495 u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0));
496
497
498 mbx->ops.process(hw, mbx);
499
500
501 if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
502 mac->get_host_state = true;
503
504
505 if (!mac->get_host_state || !(~txdctl))
506 goto out;
507
508
509 if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
510 ret_val = FM10K_ERR_RESET_REQUESTED;
511 goto out;
512 }
513
514
515 if (!mbx->timeout) {
516 ret_val = FM10K_ERR_RESET_REQUESTED;
517 goto out;
518 }
519
520
521 if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
522 goto out;
523
524
525 if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
526 if (mac->ops.request_lport_map)
527 ret_val = mac->ops.request_lport_map(hw);
528
529 goto out;
530 }
531
532
533
534
535 mac->get_host_state = false;
536
537out:
538 *host_ready = !mac->get_host_state;
539 return ret_val;
540}
541