1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "fm10k_common.h"
23
24
25
26
27
28
29
30
31s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
32{
33 u16 link_cap, link_status, device_cap, device_control;
34
35
36 link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP);
37
38 switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
39 case FM10K_PCIE_LINK_WIDTH_1:
40 hw->bus_caps.width = fm10k_bus_width_pcie_x1;
41 break;
42 case FM10K_PCIE_LINK_WIDTH_2:
43 hw->bus_caps.width = fm10k_bus_width_pcie_x2;
44 break;
45 case FM10K_PCIE_LINK_WIDTH_4:
46 hw->bus_caps.width = fm10k_bus_width_pcie_x4;
47 break;
48 case FM10K_PCIE_LINK_WIDTH_8:
49 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
50 break;
51 default:
52 hw->bus_caps.width = fm10k_bus_width_unknown;
53 break;
54 }
55
56 switch (link_cap & FM10K_PCIE_LINK_SPEED) {
57 case FM10K_PCIE_LINK_SPEED_2500:
58 hw->bus_caps.speed = fm10k_bus_speed_2500;
59 break;
60 case FM10K_PCIE_LINK_SPEED_5000:
61 hw->bus_caps.speed = fm10k_bus_speed_5000;
62 break;
63 case FM10K_PCIE_LINK_SPEED_8000:
64 hw->bus_caps.speed = fm10k_bus_speed_8000;
65 break;
66 default:
67 hw->bus_caps.speed = fm10k_bus_speed_unknown;
68 break;
69 }
70
71
72 device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP);
73
74 switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
75 case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
76 hw->bus_caps.payload = fm10k_bus_payload_128;
77 break;
78 case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
79 hw->bus_caps.payload = fm10k_bus_payload_256;
80 break;
81 case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
82 hw->bus_caps.payload = fm10k_bus_payload_512;
83 break;
84 default:
85 hw->bus_caps.payload = fm10k_bus_payload_unknown;
86 break;
87 }
88
89
90 link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS);
91
92 switch (link_status & FM10K_PCIE_LINK_WIDTH) {
93 case FM10K_PCIE_LINK_WIDTH_1:
94 hw->bus.width = fm10k_bus_width_pcie_x1;
95 break;
96 case FM10K_PCIE_LINK_WIDTH_2:
97 hw->bus.width = fm10k_bus_width_pcie_x2;
98 break;
99 case FM10K_PCIE_LINK_WIDTH_4:
100 hw->bus.width = fm10k_bus_width_pcie_x4;
101 break;
102 case FM10K_PCIE_LINK_WIDTH_8:
103 hw->bus.width = fm10k_bus_width_pcie_x8;
104 break;
105 default:
106 hw->bus.width = fm10k_bus_width_unknown;
107 break;
108 }
109
110 switch (link_status & FM10K_PCIE_LINK_SPEED) {
111 case FM10K_PCIE_LINK_SPEED_2500:
112 hw->bus.speed = fm10k_bus_speed_2500;
113 break;
114 case FM10K_PCIE_LINK_SPEED_5000:
115 hw->bus.speed = fm10k_bus_speed_5000;
116 break;
117 case FM10K_PCIE_LINK_SPEED_8000:
118 hw->bus.speed = fm10k_bus_speed_8000;
119 break;
120 default:
121 hw->bus.speed = fm10k_bus_speed_unknown;
122 break;
123 }
124
125
126 device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL);
127
128 switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
129 case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
130 hw->bus.payload = fm10k_bus_payload_128;
131 break;
132 case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
133 hw->bus.payload = fm10k_bus_payload_256;
134 break;
135 case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
136 hw->bus.payload = fm10k_bus_payload_512;
137 break;
138 default:
139 hw->bus.payload = fm10k_bus_payload_unknown;
140 break;
141 }
142
143 return 0;
144}
145
146static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
147{
148 u16 msix_count;
149
150
151 msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL);
152 msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
153
154
155 msix_count++;
156
157 if (msix_count > FM10K_MAX_MSIX_VECTORS)
158 msix_count = FM10K_MAX_MSIX_VECTORS;
159
160 return msix_count;
161}
162
163
164
165
166
167
168
169s32 fm10k_get_invariants_generic(struct fm10k_hw *hw)
170{
171 struct fm10k_mac_info *mac = &hw->mac;
172
173
174 mac->dglort_map = FM10K_DGLORTMAP_NONE;
175
176
177 mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
178
179 return 0;
180}
181
182
183
184
185
186
187
188
189s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
190{
191
192 hw->mac.tx_ready = true;
193
194 return 0;
195}
196
197
198
199
200
201
202
203s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
204{
205 u32 reg;
206 u16 i, time;
207
208
209 hw->mac.tx_ready = false;
210
211 if (FM10K_REMOVED(hw->hw_addr))
212 return 0;
213
214
215 for (i = 0; i < q_cnt; i++) {
216 reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
217 fm10k_write_reg(hw, FM10K_TXDCTL(i),
218 reg & ~FM10K_TXDCTL_ENABLE);
219 reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
220 fm10k_write_reg(hw, FM10K_RXQCTL(i),
221 reg & ~FM10K_RXQCTL_ENABLE);
222 }
223
224 fm10k_write_flush(hw);
225 udelay(1);
226
227
228 for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
229
230 if (i == q_cnt)
231 return 0;
232
233
234 reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
235 if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
236 reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
237 if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
238 i++;
239 continue;
240 }
241 }
242
243
244 time--;
245 if (time)
246 udelay(1);
247 }
248
249 return FM10K_ERR_REQUESTS_PENDING;
250}
251
252
253
254
255
256
257s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
258{
259 return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
260}
261
262
263
264
265
266
267
268
269
270
271u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
272 struct fm10k_hw_stat *stat)
273{
274 u32 delta = fm10k_read_reg(hw, addr) - stat->base_l;
275
276 if (FM10K_REMOVED(hw->hw_addr))
277 stat->base_h = 0;
278
279 return delta;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
294 struct fm10k_hw_stat *stat)
295{
296 u32 count_l;
297 u32 count_h;
298 u32 count_tmp;
299 u64 delta;
300
301 count_h = fm10k_read_reg(hw, addr + 1);
302
303
304 do {
305 count_tmp = count_h;
306 count_l = fm10k_read_reg(hw, addr);
307 count_h = fm10k_read_reg(hw, addr + 1);
308 } while (count_h != count_tmp);
309
310 delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
311 delta -= stat->base_l;
312
313 return delta & FM10K_48_BIT_MASK;
314}
315
316
317
318
319
320
321
322
323
324static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
325{
326 if (!delta)
327 return;
328
329
330 delta += stat->base_l;
331 stat->base_l = (u32)delta;
332
333
334 stat->base_h += (u32)(delta >> 32);
335}
336
337
338
339
340
341
342
343
344
345
346static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
347 struct fm10k_hw_stats_q *q,
348 u32 idx)
349{
350 u32 id_tx, id_tx_prev, tx_packets;
351 u64 tx_bytes = 0;
352
353
354 id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
355
356
357 do {
358 tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
359 &q->tx_packets);
360
361 if (tx_packets)
362 tx_bytes = fm10k_read_hw_stats_48b(hw,
363 FM10K_QBTC_L(idx),
364 &q->tx_bytes);
365
366
367 id_tx_prev = id_tx;
368 id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
369 } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
370
371
372 id_tx &= FM10K_TXQCTL_ID_MASK;
373 id_tx |= FM10K_STAT_VALID;
374
375
376 if (q->tx_stats_idx == id_tx) {
377 q->tx_packets.count += tx_packets;
378 q->tx_bytes.count += tx_bytes;
379 }
380
381
382 fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
383 fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
384
385 q->tx_stats_idx = id_tx;
386}
387
388
389
390
391
392
393
394
395
396
397static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
398 struct fm10k_hw_stats_q *q,
399 u32 idx)
400{
401 u32 id_rx, id_rx_prev, rx_packets, rx_drops;
402 u64 rx_bytes = 0;
403
404
405 id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
406
407
408 do {
409 rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
410 &q->rx_drops);
411
412 rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
413 &q->rx_packets);
414
415 if (rx_packets)
416 rx_bytes = fm10k_read_hw_stats_48b(hw,
417 FM10K_QBRC_L(idx),
418 &q->rx_bytes);
419
420
421 id_rx_prev = id_rx;
422 id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
423 } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
424
425
426 id_rx &= FM10K_RXQCTL_ID_MASK;
427 id_rx |= FM10K_STAT_VALID;
428
429
430 if (q->rx_stats_idx == id_rx) {
431 q->rx_drops.count += rx_drops;
432 q->rx_packets.count += rx_packets;
433 q->rx_bytes.count += rx_bytes;
434 }
435
436
437 fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
438 fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
439 fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
440
441 q->rx_stats_idx = id_rx;
442}
443
444
445
446
447
448
449
450
451
452
453
454void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
455 u32 idx, u32 count)
456{
457 u32 i;
458
459 for (i = 0; i < count; i++, idx++, q++) {
460 fm10k_update_hw_stats_tx_q(hw, q, idx);
461 fm10k_update_hw_stats_rx_q(hw, q, idx);
462 }
463}
464
465
466
467
468
469
470
471
472
473
474void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
475{
476 u32 i;
477
478 for (i = 0; i < count; i++, idx++, q++) {
479 q->rx_stats_idx = 0;
480 q->tx_stats_idx = 0;
481 }
482}
483
484
485
486
487
488
489
490
491
492s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
493{
494 struct fm10k_mbx_info *mbx = &hw->mbx;
495 struct fm10k_mac_info *mac = &hw->mac;
496 s32 ret_val = 0;
497 u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0));
498
499
500 mbx->ops.process(hw, mbx);
501
502
503 if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
504 mac->get_host_state = true;
505
506
507 if (!mac->get_host_state || !(~txdctl))
508 goto out;
509
510
511 if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
512 ret_val = FM10K_ERR_RESET_REQUESTED;
513 goto out;
514 }
515
516
517 if (!mbx->timeout) {
518 ret_val = FM10K_ERR_RESET_REQUESTED;
519 goto out;
520 }
521
522
523 if (mbx->state != FM10K_STATE_OPEN)
524 goto out;
525
526
527 if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
528 if (mac->ops.request_lport_map)
529 ret_val = mac->ops.request_lport_map(hw);
530
531 goto out;
532 }
533
534
535
536
537 mac->get_host_state = false;
538
539out:
540 *host_ready = !mac->get_host_state;
541 return ret_val;
542}
543