1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "fm10k_vf.h"
22
23
24
25
26
27
28static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
29{
30 u8 *perm_addr = hw->mac.perm_addr;
31 u32 bal = 0, bah = 0, tdlen;
32 s32 err;
33 u16 i;
34
35
36 err = fm10k_stop_hw_generic(hw);
37 if (err && err != FM10K_ERR_REQUESTS_PENDING)
38 return err;
39
40
41 if (is_valid_ether_addr(perm_addr)) {
42 bal = (((u32)perm_addr[3]) << 24) |
43 (((u32)perm_addr[4]) << 16) |
44 (((u32)perm_addr[5]) << 8);
45 bah = (((u32)0xFF) << 24) |
46 (((u32)perm_addr[0]) << 16) |
47 (((u32)perm_addr[1]) << 8) |
48 ((u32)perm_addr[2]);
49 }
50
51
52 tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT;
53
54
55
56
57 for (i = 0; i < hw->mac.max_queues; i++) {
58 fm10k_write_reg(hw, FM10K_TDBAL(i), bal);
59 fm10k_write_reg(hw, FM10K_TDBAH(i), bah);
60 fm10k_write_reg(hw, FM10K_RDBAL(i), bal);
61 fm10k_write_reg(hw, FM10K_RDBAH(i), bah);
62
63
64
65
66
67 fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen);
68 }
69
70 return err;
71}
72
73
74
75
76
77
78
79
80static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
81{
82 s32 err;
83
84
85 err = fm10k_stop_hw_vf(hw);
86 if (err == FM10K_ERR_REQUESTS_PENDING)
87 hw->mac.reset_while_pending++;
88 else if (err)
89 return err;
90
91
92 fm10k_write_reg(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST);
93
94
95 fm10k_write_flush(hw);
96 udelay(FM10K_RESET_TIMEOUT);
97
98
99 fm10k_write_reg(hw, FM10K_VFCTRL, 0);
100 if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
101 return FM10K_ERR_RESET_FAILED;
102
103 return 0;
104}
105
106
107
108
109
110
111static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
112{
113 u32 tqdloc, tqdloc0 = ~fm10k_read_reg(hw, FM10K_TQDLOC(0));
114 s32 err;
115 u16 i;
116
117
118 if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
119 !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
120 err = FM10K_ERR_NO_RESOURCES;
121 goto reset_max_queues;
122 }
123
124
125 for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
126
127 tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
128 if (!tqdloc || (tqdloc == tqdloc0))
129 break;
130
131
132 if (!~fm10k_read_reg(hw, FM10K_TXQCTL(i)) ||
133 !~fm10k_read_reg(hw, FM10K_RXQCTL(i)))
134 break;
135 }
136
137
138 err = fm10k_disable_queues_generic(hw, i);
139 if (err)
140 goto reset_max_queues;
141
142
143 hw->mac.max_queues = i;
144
145
146 hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
147 FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
148
149
150
151
152 hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) &
153 FM10K_TDLEN_ITR_SCALE_MASK) >>
154 FM10K_TDLEN_ITR_SCALE_SHIFT;
155
156 return 0;
157
158reset_max_queues:
159 hw->mac.max_queues = 0;
160
161 return err;
162}
163
164
165const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
166 FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
167 FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET),
168 FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC),
169 FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC),
170 FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST),
171 FM10K_TLV_ATTR_LAST
172};
173
174
175
176
177
178
179
180
181
182
183
184static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
185{
186 struct fm10k_mbx_info *mbx = &hw->mbx;
187 u32 msg[4];
188
189
190 if (vsi)
191 return FM10K_ERR_PARAM;
192
193
194 if ((vid << 16 | vid) >> 28)
195 return FM10K_ERR_PARAM;
196
197
198 if (!set)
199 vid |= FM10K_VLAN_CLEAR;
200
201
202 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
203 fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid);
204
205
206 return mbx->ops.enqueue_tx(hw, mbx, msg);
207}
208
209
210
211
212
213
214
215
216
217s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
218 struct fm10k_mbx_info *mbx)
219{
220 u8 perm_addr[ETH_ALEN];
221 u16 vid;
222 s32 err;
223
224
225 err = fm10k_tlv_attr_get_mac_vlan(
226 results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC],
227 perm_addr, &vid);
228 if (err)
229 return err;
230
231 ether_addr_copy(hw->mac.perm_addr, perm_addr);
232 hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
233 hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
234
235 return 0;
236}
237
238
239
240
241
242
243
244static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
245{
246 u8 perm_addr[ETH_ALEN];
247 u32 base_addr;
248
249 base_addr = fm10k_read_reg(hw, FM10K_TDBAL(0));
250
251
252 if (base_addr << 24)
253 return FM10K_ERR_INVALID_MAC_ADDR;
254
255 perm_addr[3] = (u8)(base_addr >> 24);
256 perm_addr[4] = (u8)(base_addr >> 16);
257 perm_addr[5] = (u8)(base_addr >> 8);
258
259 base_addr = fm10k_read_reg(hw, FM10K_TDBAH(0));
260
261
262 if ((~base_addr) >> 24)
263 return FM10K_ERR_INVALID_MAC_ADDR;
264
265 perm_addr[0] = (u8)(base_addr >> 16);
266 perm_addr[1] = (u8)(base_addr >> 8);
267 perm_addr[2] = (u8)(base_addr);
268
269 ether_addr_copy(hw->mac.perm_addr, perm_addr);
270 ether_addr_copy(hw->mac.addr, perm_addr);
271
272 return 0;
273}
274
275
276
277
278
279
280
281
282
283
284
285
286
287static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
288 const u8 *mac, u16 vid, bool add, u8 flags)
289{
290 struct fm10k_mbx_info *mbx = &hw->mbx;
291 u32 msg[7];
292
293
294 if (vid >= FM10K_VLAN_TABLE_VID_MAX)
295 return FM10K_ERR_PARAM;
296
297
298 if (!is_valid_ether_addr(mac))
299 return FM10K_ERR_PARAM;
300
301
302 if (is_valid_ether_addr(hw->mac.perm_addr) &&
303 !ether_addr_equal(hw->mac.perm_addr, mac))
304 return FM10K_ERR_PARAM;
305
306
307 if (!add)
308 vid |= FM10K_VLAN_CLEAR;
309
310
311 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
312 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid);
313
314
315 return mbx->ops.enqueue_tx(hw, mbx, msg);
316}
317
318
319
320
321
322
323
324
325
326
327
328
329static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
330 const u8 *mac, u16 vid, bool add)
331{
332 struct fm10k_mbx_info *mbx = &hw->mbx;
333 u32 msg[7];
334
335
336 if (vid >= FM10K_VLAN_TABLE_VID_MAX)
337 return FM10K_ERR_PARAM;
338
339
340 if (!is_multicast_ether_addr(mac))
341 return FM10K_ERR_PARAM;
342
343
344 if (!add)
345 vid |= FM10K_VLAN_CLEAR;
346
347
348 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
349 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST,
350 mac, vid);
351
352
353 return mbx->ops.enqueue_tx(hw, mbx, msg);
354}
355
356
357
358
359
360
361
362
363static void fm10k_update_int_moderator_vf(struct fm10k_hw *hw)
364{
365 struct fm10k_mbx_info *mbx = &hw->mbx;
366 u32 msg[1];
367
368
369 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX);
370
371
372 mbx->ops.enqueue_tx(hw, mbx, msg);
373}
374
375
376const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
377 FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE),
378 FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE),
379 FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY),
380 FM10K_TLV_ATTR_LAST
381};
382
383
384
385
386
387
388
389
390
391
392s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
393 struct fm10k_mbx_info *mbx)
394{
395 hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
396 FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
397
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410
411
412static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
413 u16 count, bool enable)
414{
415 struct fm10k_mbx_info *mbx = &hw->mbx;
416 u32 msg[2];
417
418
419 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
420
421
422 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
423 if (!enable)
424 fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE);
425
426
427 return mbx->ops.enqueue_tx(hw, mbx, msg);
428}
429
430
431
432
433
434
435
436
437
438
439
440static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
441{
442 struct fm10k_mbx_info *mbx = &hw->mbx;
443 u32 msg[3];
444
445 if (mode > FM10K_XCAST_MODE_NONE)
446 return FM10K_ERR_PARAM;
447
448
449 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
450 fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
451
452
453 return mbx->ops.enqueue_tx(hw, mbx, msg);
454}
455
456
457
458
459
460
461
462
463static void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
464 struct fm10k_hw_stats *stats)
465{
466 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
467}
468
469
470
471
472
473
474
475
476static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
477 struct fm10k_hw_stats *stats)
478{
479
480 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
481
482
483 fm10k_update_hw_stats_vf(hw, stats);
484}
485
486
487
488
489
490
491
492
493
494
495static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
496 struct fm10k_dglort_cfg *dglort)
497{
498
499 if (!dglort)
500 return FM10K_ERR_PARAM;
501
502
503
504 return 0;
505}
506
507static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
508 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
509 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
510 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
511 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
512};
513
514static const struct fm10k_mac_ops mac_ops_vf = {
515 .get_bus_info = fm10k_get_bus_info_generic,
516 .reset_hw = fm10k_reset_hw_vf,
517 .init_hw = fm10k_init_hw_vf,
518 .start_hw = fm10k_start_hw_generic,
519 .stop_hw = fm10k_stop_hw_vf,
520 .update_vlan = fm10k_update_vlan_vf,
521 .read_mac_addr = fm10k_read_mac_addr_vf,
522 .update_uc_addr = fm10k_update_uc_addr_vf,
523 .update_mc_addr = fm10k_update_mc_addr_vf,
524 .update_xcast_mode = fm10k_update_xcast_mode_vf,
525 .update_int_moderator = fm10k_update_int_moderator_vf,
526 .update_lport_state = fm10k_update_lport_state_vf,
527 .update_hw_stats = fm10k_update_hw_stats_vf,
528 .rebind_hw_stats = fm10k_rebind_hw_stats_vf,
529 .configure_dglort_map = fm10k_configure_dglort_map_vf,
530 .get_host_state = fm10k_get_host_state_generic,
531};
532
533static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
534{
535 fm10k_get_invariants_generic(hw);
536
537 return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
538}
539
540const struct fm10k_info fm10k_vf_info = {
541 .mac = fm10k_mac_vf,
542 .get_invariants = fm10k_get_invariants_vf,
543 .mac_ops = &mac_ops_vf,
544};
545