1
2
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8#include "ice_flow.h"
9#include "ice_virtchnl_allowlist.h"
10
11#define FIELD_SELECTOR(proto_hdr_field) \
12 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
13
14struct ice_vc_hdr_match_type {
15 u32 vc_hdr;
16 u32 ice_hdr;
17};
18
19static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
20 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
21 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
22 ICE_FLOW_SEG_HDR_IPV_OTHER},
23 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
24 ICE_FLOW_SEG_HDR_IPV_OTHER},
25 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
26 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
27 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
28};
29
30static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
31 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
32 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
33 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
34 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
35 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
36 ICE_FLOW_SEG_HDR_IPV_OTHER},
37 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
38 ICE_FLOW_SEG_HDR_IPV_OTHER},
39 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
40 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
41 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
42 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
43 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
44 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
45 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
46 ICE_FLOW_SEG_HDR_GTPU_DWN},
47 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
48 ICE_FLOW_SEG_HDR_GTPU_UP},
49 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
50 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
51 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
52 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
53};
54
55struct ice_vc_hash_field_match_type {
56 u32 vc_hdr;
57
58
59 u32 vc_hash_field;
60
61
62 u64 ice_hash_field;
63
64
65};
66
67static const struct
68ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
69 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
70 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
71 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
73 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
75 ICE_FLOW_HASH_IPV4},
76 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
79 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
80 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
81 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
83 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
84 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
85 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
86 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
87 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
88 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
91 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
92 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
94 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
96 ICE_FLOW_HASH_IPV6},
97 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
98 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
100 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
101 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
102 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
104 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
105 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
106 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
107 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
108 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
109 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_TCP,
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
114 {VIRTCHNL_PROTO_HDR_TCP,
115 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
116 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
117 {VIRTCHNL_PROTO_HDR_TCP,
118 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
119 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
120 ICE_FLOW_HASH_TCP_PORT},
121 {VIRTCHNL_PROTO_HDR_UDP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_UDP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_UDP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
130 ICE_FLOW_HASH_UDP_PORT},
131 {VIRTCHNL_PROTO_HDR_SCTP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_SCTP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_SCTP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
140 ICE_FLOW_HASH_SCTP_PORT},
141};
142
143static const struct
144ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
145 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
147 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
148 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
149 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
150 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
151 ICE_FLOW_HASH_ETH},
152 {VIRTCHNL_PROTO_HDR_ETH,
153 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
154 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
155 {VIRTCHNL_PROTO_HDR_S_VLAN,
156 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
157 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
158 {VIRTCHNL_PROTO_HDR_C_VLAN,
159 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
160 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
161 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
162 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
163 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
164 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
165 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
166 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
167 ICE_FLOW_HASH_IPV4},
168 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
169 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
170 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
171 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
172 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
173 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
174 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
175 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
176 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
177 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
178 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
179 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
180 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
181 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
182 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
183 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
184 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
185 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
186 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
187 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
188 ICE_FLOW_HASH_IPV6},
189 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
190 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
191 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
192 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
193 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
194 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
195 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
196 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
197 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
198 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
199 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
200 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
201 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
202 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
203 {VIRTCHNL_PROTO_HDR_TCP,
204 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
205 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
206 {VIRTCHNL_PROTO_HDR_TCP,
207 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
208 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
209 {VIRTCHNL_PROTO_HDR_TCP,
210 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
211 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
212 ICE_FLOW_HASH_TCP_PORT},
213 {VIRTCHNL_PROTO_HDR_UDP,
214 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
215 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
216 {VIRTCHNL_PROTO_HDR_UDP,
217 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
218 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
219 {VIRTCHNL_PROTO_HDR_UDP,
220 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
221 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
222 ICE_FLOW_HASH_UDP_PORT},
223 {VIRTCHNL_PROTO_HDR_SCTP,
224 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
225 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
226 {VIRTCHNL_PROTO_HDR_SCTP,
227 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
228 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
229 {VIRTCHNL_PROTO_HDR_SCTP,
230 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
231 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
232 ICE_FLOW_HASH_SCTP_PORT},
233 {VIRTCHNL_PROTO_HDR_PPPOE,
234 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
235 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
236 {VIRTCHNL_PROTO_HDR_GTPU_IP,
237 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
238 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
239 {VIRTCHNL_PROTO_HDR_L2TPV3,
240 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
241 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
242 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
243 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
244 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
245 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
246 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
247 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
248};
249
250
251
252
253
254static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
255{
256 return vf->pf->vsi[vf->lan_vsi_idx];
257}
258
259
260
261
262
263
264static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
265{
266
267 if (vf_id >= pf->num_alloc_vfs) {
268 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
269 return -EINVAL;
270 }
271 return 0;
272}
273
274
275
276
277
278
279static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
280{
281 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
282 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
283 vf->vf_id);
284 return -EBUSY;
285 }
286 return 0;
287}
288
289
290
291
292
293static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
294{
295 switch (ice_err) {
296 case ICE_SUCCESS:
297 return VIRTCHNL_STATUS_SUCCESS;
298 case ICE_ERR_BAD_PTR:
299 case ICE_ERR_INVAL_SIZE:
300 case ICE_ERR_DEVICE_NOT_SUPPORTED:
301 case ICE_ERR_PARAM:
302 case ICE_ERR_CFG:
303 return VIRTCHNL_STATUS_ERR_PARAM;
304 case ICE_ERR_NO_MEMORY:
305 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
306 case ICE_ERR_NOT_READY:
307 case ICE_ERR_RESET_FAILED:
308 case ICE_ERR_FW_API_VER:
309 case ICE_ERR_AQ_ERROR:
310 case ICE_ERR_AQ_TIMEOUT:
311 case ICE_ERR_AQ_FULL:
312 case ICE_ERR_AQ_NO_WORK:
313 case ICE_ERR_AQ_EMPTY:
314 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
315 default:
316 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
317 }
318}
319
320
321
322
323
324
325
326
327
328static void
329ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
330 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
331{
332 struct ice_hw *hw = &pf->hw;
333 unsigned int i;
334
335 ice_for_each_vf(pf, i) {
336 struct ice_vf *vf = &pf->vf[i];
337
338
339 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
340 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
341 continue;
342
343
344
345
346 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
347 msglen, NULL);
348 }
349}
350
351
352
353
354
355
356
357
358static void
359ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
360 int ice_link_speed, bool link_up)
361{
362 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
363 pfe->event_data.link_event_adv.link_status = link_up;
364
365 pfe->event_data.link_event_adv.link_speed =
366 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
367 } else {
368 pfe->event_data.link_event.link_status = link_up;
369
370 pfe->event_data.link_event.link_speed =
371 (enum virtchnl_link_speed)
372 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
373 }
374}
375
376
377
378
379
380
381
382
383static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
384{
385 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
386 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
387}
388
389
390
391
392
393static bool ice_is_vf_link_up(struct ice_vf *vf)
394{
395 struct ice_pf *pf = vf->pf;
396
397 if (ice_check_vf_init(pf, vf))
398 return false;
399
400 if (ice_vf_has_no_qs_ena(vf))
401 return false;
402 else if (vf->link_forced)
403 return vf->link_up;
404 else
405 return pf->hw.port_info->phy.link_info.link_info &
406 ICE_AQ_LINK_UP;
407}
408
409
410
411
412
413
414
415static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
416{
417 struct virtchnl_pf_event pfe = { 0 };
418 struct ice_hw *hw = &vf->pf->hw;
419
420 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
421 pfe.severity = PF_EVENT_SEVERITY_INFO;
422
423 if (ice_is_vf_link_up(vf))
424 ice_set_pfe_link(vf, &pfe,
425 hw->port_info->phy.link_info.link_speed, true);
426 else
427 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
428
429 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
430 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
431 sizeof(pfe), NULL);
432}
433
434
435
436
437
438static void ice_vf_invalidate_vsi(struct ice_vf *vf)
439{
440 vf->lan_vsi_idx = ICE_NO_VSI;
441 vf->lan_vsi_num = ICE_NO_VSI;
442}
443
444
445
446
447
448static void ice_vf_vsi_release(struct ice_vf *vf)
449{
450 ice_vsi_release(ice_get_vf_vsi(vf));
451 ice_vf_invalidate_vsi(vf);
452}
453
454
455
456
457
458static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
459{
460 vf->ctrl_vsi_idx = ICE_NO_VSI;
461}
462
463
464
465
466
467static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
468{
469 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
470 ice_vf_ctrl_invalidate_vsi(vf);
471}
472
473
474
475
476
477static void ice_free_vf_res(struct ice_vf *vf)
478{
479 struct ice_pf *pf = vf->pf;
480 int i, last_vector_idx;
481
482
483
484
485 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
486 ice_vf_fdir_exit(vf);
487
488 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
489 ice_vf_ctrl_vsi_release(vf);
490
491
492 if (vf->lan_vsi_idx != ICE_NO_VSI) {
493 ice_vf_vsi_release(vf);
494 vf->num_mac = 0;
495 }
496
497 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
498
499
500 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
501 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
502
503
504 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
505 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
506 ice_flush(&pf->hw);
507 }
508
509 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
510 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
511}
512
513
514
515
516
517static void ice_dis_vf_mappings(struct ice_vf *vf)
518{
519 struct ice_pf *pf = vf->pf;
520 struct ice_vsi *vsi;
521 struct device *dev;
522 int first, last, v;
523 struct ice_hw *hw;
524
525 hw = &pf->hw;
526 vsi = ice_get_vf_vsi(vf);
527
528 dev = ice_pf_to_dev(pf);
529 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
530 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
531
532 first = vf->first_vector_idx;
533 last = first + pf->num_msix_per_vf - 1;
534 for (v = first; v <= last; v++) {
535 u32 reg;
536
537 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
538 GLINT_VECT2FUNC_IS_PF_M) |
539 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
540 GLINT_VECT2FUNC_PF_NUM_M));
541 wr32(hw, GLINT_VECT2FUNC(v), reg);
542 }
543
544 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
545 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
546 else
547 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
548
549 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
550 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
551 else
552 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
553}
554
555
556
557
558
559
560
561
562
563
564static int ice_sriov_free_msix_res(struct ice_pf *pf)
565{
566 struct ice_res_tracker *res;
567
568 if (!pf)
569 return -EINVAL;
570
571 res = pf->irq_tracker;
572 if (!res)
573 return -EINVAL;
574
575
576 WARN_ON(pf->sriov_base_vector < res->num_entries);
577
578 pf->sriov_base_vector = 0;
579
580 return 0;
581}
582
583
584
585
586
587void ice_set_vf_state_qs_dis(struct ice_vf *vf)
588{
589
590 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
591 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
592 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
593}
594
595
596
597
598
599static void ice_dis_vf_qs(struct ice_vf *vf)
600{
601 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
602
603 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
604 ice_vsi_stop_all_rx_rings(vsi);
605 ice_set_vf_state_qs_dis(vf);
606}
607
608
609
610
611
612void ice_free_vfs(struct ice_pf *pf)
613{
614 struct device *dev = ice_pf_to_dev(pf);
615 struct ice_hw *hw = &pf->hw;
616 unsigned int tmp, i;
617
618 set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
619
620 if (!pf->vf)
621 return;
622
623 while (test_and_set_bit(ICE_VF_DIS, pf->state))
624 usleep_range(1000, 2000);
625
626
627
628
629
630 if (!pci_vfs_assigned(pf->pdev))
631 pci_disable_sriov(pf->pdev);
632 else
633 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
634
635
636 ice_for_each_vf(pf, i)
637 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
638 ice_dis_vf_qs(&pf->vf[i]);
639
640 tmp = pf->num_alloc_vfs;
641 pf->num_qps_per_vf = 0;
642 pf->num_alloc_vfs = 0;
643 for (i = 0; i < tmp; i++) {
644 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
645
646 ice_dis_vf_mappings(&pf->vf[i]);
647 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
648 ice_free_vf_res(&pf->vf[i]);
649 }
650 }
651
652 if (ice_sriov_free_msix_res(pf))
653 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
654
655 devm_kfree(dev, pf->vf);
656 pf->vf = NULL;
657
658
659
660
661
662 if (!pci_vfs_assigned(pf->pdev)) {
663 unsigned int vf_id;
664
665
666
667
668 for (vf_id = 0; vf_id < tmp; vf_id++) {
669 u32 reg_idx, bit_idx;
670
671 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
672 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
673 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
674 }
675 }
676
677
678 for (i = 0; i < tmp; i++)
679 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
680 ICE_MAX_VF_COUNT, i))
681 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
682 i);
683
684 clear_bit(ICE_VF_DIS, pf->state);
685 clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
686 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
687}
688
689
690
691
692
693
694
695
696
697
698
699static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
700{
701 struct ice_pf *pf = vf->pf;
702 u32 reg, reg_idx, bit_idx;
703 unsigned int vf_abs_id, i;
704 struct device *dev;
705 struct ice_hw *hw;
706
707 dev = ice_pf_to_dev(pf);
708 hw = &pf->hw;
709 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
710
711
712 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
713
714
715
716
717 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
718
719
720
721
722
723
724 if (!is_pfr) {
725 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
726 wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
727 }
728
729
730
731
732 if (!is_vflr) {
733
734 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
735 reg |= VPGEN_VFRTRIG_VFSWR_M;
736 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
737 }
738
739 reg_idx = (vf_abs_id) / 32;
740 bit_idx = (vf_abs_id) % 32;
741 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
742 ice_flush(hw);
743
744 wr32(hw, PF_PCI_CIAA,
745 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
746 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
747 reg = rd32(hw, PF_PCI_CIAD);
748
749 if ((reg & VF_TRANS_PENDING_M) == 0)
750 break;
751
752 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
753 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
754 }
755}
756
757
758
759
760
761
762
763static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
764{
765 struct ice_hw *hw = &vsi->back->hw;
766 struct ice_aqc_vsi_props *info;
767 struct ice_vsi_ctx *ctxt;
768 enum ice_status status;
769 int ret = 0;
770
771 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
772 if (!ctxt)
773 return -ENOMEM;
774
775 ctxt->info = vsi->info;
776 info = &ctxt->info;
777 if (enable) {
778 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
779 ICE_AQ_VSI_PVLAN_INSERT_PVID |
780 ICE_AQ_VSI_VLAN_EMOD_STR;
781 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
782 } else {
783 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
784 ICE_AQ_VSI_VLAN_MODE_ALL;
785 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
786 }
787
788 info->pvid = cpu_to_le16(pvid_info);
789 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
790 ICE_AQ_VSI_PROP_SW_VALID);
791
792 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
793 if (status) {
794 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
795 ice_stat_str(status),
796 ice_aq_str(hw->adminq.sq_last_status));
797 ret = -EIO;
798 goto out;
799 }
800
801 vsi->info.vlan_flags = info->vlan_flags;
802 vsi->info.sw_flags2 = info->sw_flags2;
803 vsi->info.pvid = info->pvid;
804out:
805 kfree(ctxt);
806 return ret;
807}
808
809
810
811
812
813static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
814{
815 return vf->pf->hw.port_info;
816}
817
818
819
820
821
822
823
824
825static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
826{
827 struct ice_port_info *pi = ice_vf_get_port_info(vf);
828 struct ice_pf *pf = vf->pf;
829 struct ice_vsi *vsi;
830
831 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
832
833 if (!vsi) {
834 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
835 ice_vf_invalidate_vsi(vf);
836 return NULL;
837 }
838
839 vf->lan_vsi_idx = vsi->idx;
840 vf->lan_vsi_num = vsi->vsi_num;
841
842 return vsi;
843}
844
845
846
847
848
849
850
851
852struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
853{
854 struct ice_port_info *pi = ice_vf_get_port_info(vf);
855 struct ice_pf *pf = vf->pf;
856 struct ice_vsi *vsi;
857
858 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
859 if (!vsi) {
860 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
861 ice_vf_ctrl_invalidate_vsi(vf);
862 }
863
864 return vsi;
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
880{
881 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
882}
883
884
885
886
887
888
889
890
891static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
892{
893 struct device *dev = ice_pf_to_dev(vf->pf);
894 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
895 u16 vlan_id = 0;
896 int err;
897
898 if (vf->port_vlan_info) {
899 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
900 if (err) {
901 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
902 vf->vf_id, err);
903 return err;
904 }
905
906 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
907 }
908
909
910 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
911 if (err) {
912 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
913 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
914 err);
915 return err;
916 }
917
918 return 0;
919}
920
921
922
923
924
925
926
927
928static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
929{
930 struct device *dev = ice_pf_to_dev(vf->pf);
931 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
932 enum ice_status status;
933 u8 broadcast[ETH_ALEN];
934
935 eth_broadcast_addr(broadcast);
936 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
937 if (status) {
938 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
939 vf->vf_id, ice_stat_str(status));
940 return ice_status_to_errno(status);
941 }
942
943 vf->num_mac++;
944
945 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
946 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
947 ICE_FWD_TO_VSI);
948 if (status) {
949 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
950 &vf->hw_lan_addr.addr[0], vf->vf_id,
951 ice_stat_str(status));
952 return ice_status_to_errno(status);
953 }
954 vf->num_mac++;
955
956 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
957 }
958
959 return 0;
960}
961
962
963
964
965
966static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
967{
968 if (vf->trusted)
969 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
970 else
971 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
972}
973
974
975
976
977
978
979
980
981
982static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
983{
984 int device_based_first_msix, device_based_last_msix;
985 int pf_based_first_msix, pf_based_last_msix, v;
986 struct ice_pf *pf = vf->pf;
987 int device_based_vf_id;
988 struct ice_hw *hw;
989 u32 reg;
990
991 hw = &pf->hw;
992 pf_based_first_msix = vf->first_vector_idx;
993 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
994
995 device_based_first_msix = pf_based_first_msix +
996 pf->hw.func_caps.common_cap.msix_vector_first_id;
997 device_based_last_msix =
998 (device_based_first_msix + pf->num_msix_per_vf) - 1;
999 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1000
1001 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
1002 VPINT_ALLOC_FIRST_M) |
1003 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
1004 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
1005 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
1006
1007 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1008 & VPINT_ALLOC_PCI_FIRST_M) |
1009 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
1010 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
1011 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
1012
1013
1014 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
1015 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
1016 GLINT_VECT2FUNC_VF_NUM_M) |
1017 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1018 GLINT_VECT2FUNC_PF_NUM_M));
1019 wr32(hw, GLINT_VECT2FUNC(v), reg);
1020 }
1021
1022
1023 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1024}
1025
1026
1027
1028
1029
1030
1031
1032static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1033{
1034 struct device *dev = ice_pf_to_dev(vf->pf);
1035 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1036 struct ice_hw *hw = &vf->pf->hw;
1037 u32 reg;
1038
1039
1040 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1041
1042
1043 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1044
1045
1046
1047
1048 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1049 VPLAN_TX_QBASE_VFFIRSTQ_M) |
1050 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
1051 VPLAN_TX_QBASE_VFNUMQ_M));
1052 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1053 } else {
1054 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
1055 }
1056
1057
1058 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1059
1060
1061 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1062
1063
1064
1065
1066 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1067 VPLAN_RX_QBASE_VFFIRSTQ_M) |
1068 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
1069 VPLAN_RX_QBASE_VFNUMQ_M));
1070 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1071 } else {
1072 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
1073 }
1074}
1075
1076
1077
1078
1079
1080static void ice_ena_vf_mappings(struct ice_vf *vf)
1081{
1082 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1083
1084 ice_ena_vf_msix_mappings(vf);
1085 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098static int
1099ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
1100{
1101 bool checked_min_res = false;
1102 int res;
1103
1104
1105
1106
1107
1108
1109
1110
1111 res = max_res;
1112 while ((res >= min_res) && !checked_min_res) {
1113 int num_all_res;
1114
1115 num_all_res = pf->num_alloc_vfs * res;
1116 if (num_all_res <= avail_res)
1117 return res;
1118
1119 if (res == min_res)
1120 checked_min_res = true;
1121
1122 res = DIV_ROUND_UP(res, 2);
1123 }
1124 return 0;
1125}
1126
1127
1128
1129
1130
1131
1132int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1133{
1134 struct ice_pf *pf;
1135
1136 if (!vf || !q_vector)
1137 return -EINVAL;
1138
1139 pf = vf->pf;
1140
1141
1142 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
1143 q_vector->v_idx + 1;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1156{
1157 int i;
1158
1159 if (!res)
1160 return -EINVAL;
1161
1162 for (i = res->num_entries - 1; i >= 0; i--)
1163 if (res->list[i] & ICE_RES_VALID_BIT)
1164 return i;
1165
1166 return 0;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1185{
1186 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1187 int vectors_used = pf->irq_tracker->num_entries;
1188 int sriov_base_vector;
1189
1190 sriov_base_vector = total_vectors - num_msix_needed;
1191
1192
1193
1194
1195 if (sriov_base_vector < vectors_used)
1196 return -EINVAL;
1197
1198 pf->sriov_base_vector = sriov_base_vector;
1199
1200 return 0;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static int ice_set_per_vf_res(struct ice_pf *pf)
1225{
1226 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
1227 int msix_avail_per_vf, msix_avail_for_sriov;
1228 struct device *dev = ice_pf_to_dev(pf);
1229 u16 num_msix_per_vf, num_txq, num_rxq;
1230
1231 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
1232 return -EINVAL;
1233
1234
1235 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1236 pf->irq_tracker->num_entries;
1237 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
1238 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1239 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1240 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1241 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
1242 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1243 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
1244 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1245 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
1246 } else {
1247 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1248 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
1249 pf->num_alloc_vfs);
1250 return -EIO;
1251 }
1252
1253
1254 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
1255 min_t(u16,
1256 num_msix_per_vf - ICE_NONQ_VECS_VF,
1257 ICE_MAX_RSS_QS_PER_VF),
1258 ICE_MIN_QS_PER_VF);
1259
1260 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
1261 min_t(u16,
1262 num_msix_per_vf - ICE_NONQ_VECS_VF,
1263 ICE_MAX_RSS_QS_PER_VF),
1264 ICE_MIN_QS_PER_VF);
1265
1266 if (!num_txq || !num_rxq) {
1267 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1268 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
1269 return -EIO;
1270 }
1271
1272 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
1273 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1274 pf->num_alloc_vfs);
1275 return -EINVAL;
1276 }
1277
1278
1279 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1280 pf->num_msix_per_vf = num_msix_per_vf;
1281 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
1282 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
1283
1284 return 0;
1285}
1286
1287
1288
1289
1290
1291static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1292{
1293 struct ice_hw *hw = &vf->pf->hw;
1294 u32 reg;
1295
1296 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1297 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1298 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1299 ice_flush(hw);
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static enum ice_status
1313ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1314 bool rm_promisc)
1315{
1316 struct ice_pf *pf = vf->pf;
1317 enum ice_status status = 0;
1318 struct ice_hw *hw;
1319
1320 hw = &pf->hw;
1321 if (vsi->num_vlan) {
1322 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1323 rm_promisc);
1324 } else if (vf->port_vlan_info) {
1325 if (rm_promisc)
1326 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1327 vf->port_vlan_info);
1328 else
1329 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1330 vf->port_vlan_info);
1331 } else {
1332 if (rm_promisc)
1333 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1334 0);
1335 else
1336 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1337 0);
1338 }
1339
1340 return status;
1341}
1342
1343static void ice_vf_clear_counters(struct ice_vf *vf)
1344{
1345 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1346
1347 vf->num_mac = 0;
1348 vsi->num_vlan = 0;
1349 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1350 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1361{
1362 ice_vf_clear_counters(vf);
1363 ice_clear_vf_reset_trigger(vf);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1374{
1375 struct ice_pf *pf = vsi->back;
1376 enum ice_status status;
1377 struct device *dev;
1378
1379 if (!vsi->agg_node)
1380 return;
1381
1382 dev = ice_pf_to_dev(pf);
1383 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1384 dev_dbg(dev,
1385 "agg_id %u already has reached max_num_vsis %u\n",
1386 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1387 return;
1388 }
1389
1390 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1391 vsi->idx, vsi->tc_cfg.ena_tc);
1392 if (status)
1393 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1394 vsi->idx, vsi->agg_node->agg_id);
1395 else
1396 vsi->agg_node->num_vsis++;
1397}
1398
1399
1400
1401
1402
1403static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1404{
1405 struct device *dev = ice_pf_to_dev(vf->pf);
1406 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1407
1408 ice_vf_set_host_trust_cfg(vf);
1409
1410 if (ice_vf_rebuild_host_mac_cfg(vf))
1411 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1412 vf->vf_id);
1413
1414 if (ice_vf_rebuild_host_vlan_cfg(vf))
1415 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1416 vf->vf_id);
1417
1418 ice_vf_rebuild_aggregator_node_cfg(vsi);
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1429{
1430 ice_vf_vsi_release(vf);
1431 if (!ice_vf_vsi_setup(vf))
1432 return -ENOMEM;
1433
1434 return 0;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1445{
1446 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1447 struct ice_pf *pf = vf->pf;
1448
1449 if (ice_vsi_rebuild(vsi, true)) {
1450 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1451 vf->vf_id);
1452 return -EIO;
1453 }
1454
1455
1456
1457 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1458 vf->lan_vsi_num = vsi->vsi_num;
1459
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470static void ice_vf_set_initialized(struct ice_vf *vf)
1471{
1472 ice_set_vf_state_qs_dis(vf);
1473 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1474 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1475 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1476 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1477}
1478
1479
1480
1481
1482
1483static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1484{
1485 struct ice_pf *pf = vf->pf;
1486 struct ice_hw *hw;
1487
1488 hw = &pf->hw;
1489
1490 ice_vf_rebuild_host_cfg(vf);
1491
1492 ice_vf_set_initialized(vf);
1493 ice_ena_vf_mappings(vf);
1494 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1510{
1511 struct device *dev = ice_pf_to_dev(pf);
1512 struct ice_hw *hw = &pf->hw;
1513 struct ice_vf *vf;
1514 int v, i;
1515
1516
1517 if (!pf->num_alloc_vfs)
1518 return false;
1519
1520
1521 ice_for_each_vf(pf, i)
1522 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1523 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1524
1525
1526 if (test_and_set_bit(ICE_VF_DIS, pf->state))
1527 return false;
1528
1529
1530 ice_for_each_vf(pf, v)
1531 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1532
1533
1534
1535
1536
1537
1538
1539 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1540
1541 while (v < pf->num_alloc_vfs) {
1542 u32 reg;
1543
1544 vf = &pf->vf[v];
1545 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1546 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1547
1548 usleep_range(10, 20);
1549 break;
1550 }
1551
1552
1553
1554
1555 v++;
1556 }
1557 }
1558
1559
1560
1561
1562 if (v < pf->num_alloc_vfs)
1563 dev_warn(dev, "VF reset check timeout\n");
1564
1565
1566 ice_for_each_vf(pf, v) {
1567 vf = &pf->vf[v];
1568
1569 vf->driver_caps = 0;
1570 ice_vc_set_default_allowlist(vf);
1571
1572 ice_vf_fdir_exit(vf);
1573
1574
1575
1576 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1577 ice_vf_ctrl_invalidate_vsi(vf);
1578
1579 ice_vf_pre_vsi_rebuild(vf);
1580 ice_vf_rebuild_vsi(vf);
1581 ice_vf_post_vsi_rebuild(vf);
1582 }
1583
1584 ice_flush(hw);
1585 clear_bit(ICE_VF_DIS, pf->state);
1586
1587 return true;
1588}
1589
1590
1591
1592
1593
1594
1595
1596static bool ice_is_vf_disabled(struct ice_vf *vf)
1597{
1598 struct ice_pf *pf = vf->pf;
1599
1600
1601
1602
1603
1604
1605 return (test_bit(ICE_VF_DIS, pf->state) ||
1606 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1618{
1619 struct ice_pf *pf = vf->pf;
1620 struct ice_vsi *vsi;
1621 struct device *dev;
1622 struct ice_hw *hw;
1623 bool rsd = false;
1624 u8 promisc_m;
1625 u32 reg;
1626 int i;
1627
1628 dev = ice_pf_to_dev(pf);
1629
1630 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1631 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1632 vf->vf_id);
1633 return true;
1634 }
1635
1636 if (ice_is_vf_disabled(vf)) {
1637 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1638 vf->vf_id);
1639 return true;
1640 }
1641
1642
1643 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1644 ice_trigger_vf_reset(vf, is_vflr, false);
1645
1646 vsi = ice_get_vf_vsi(vf);
1647
1648 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1649 ice_dis_vf_qs(vf);
1650
1651
1652
1653
1654 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1655 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1656
1657 hw = &pf->hw;
1658
1659
1660
1661 for (i = 0; i < 10; i++) {
1662
1663
1664
1665
1666 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1667 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1668 rsd = true;
1669 break;
1670 }
1671
1672
1673 usleep_range(10, 20);
1674 }
1675
1676 vf->driver_caps = 0;
1677 ice_vc_set_default_allowlist(vf);
1678
1679
1680
1681
1682 if (!rsd)
1683 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1684
1685
1686
1687
1688 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1689 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1690 if (vf->port_vlan_info || vsi->num_vlan)
1691 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1692 else
1693 promisc_m = ICE_UCAST_PROMISC_BITS;
1694
1695 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1696 dev_err(dev, "disabling promiscuous mode failed\n");
1697 }
1698
1699 ice_vf_fdir_exit(vf);
1700
1701
1702
1703 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1704 ice_vf_ctrl_vsi_release(vf);
1705
1706 ice_vf_pre_vsi_rebuild(vf);
1707
1708 if (ice_vf_rebuild_vsi_with_release(vf)) {
1709 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1710 return false;
1711 }
1712
1713 ice_vf_post_vsi_rebuild(vf);
1714
1715
1716 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1717 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1718
1719 return true;
1720}
1721
1722
1723
1724
1725
1726void ice_vc_notify_link_state(struct ice_pf *pf)
1727{
1728 int i;
1729
1730 ice_for_each_vf(pf, i)
1731 ice_vc_notify_vf_link_state(&pf->vf[i]);
1732}
1733
1734
1735
1736
1737
1738
1739
1740void ice_vc_notify_reset(struct ice_pf *pf)
1741{
1742 struct virtchnl_pf_event pfe;
1743
1744 if (!pf->num_alloc_vfs)
1745 return;
1746
1747 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1748 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1749 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1750 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1751}
1752
1753
1754
1755
1756
1757static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1758{
1759 struct virtchnl_pf_event pfe;
1760 struct ice_pf *pf;
1761
1762 if (!vf)
1763 return;
1764
1765 pf = vf->pf;
1766 if (ice_validate_vf_id(pf, vf->vf_id))
1767 return;
1768
1769
1770
1771
1772 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1773 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1774 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1775 return;
1776
1777 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1778 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1779 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1780 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1781 NULL);
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791static int ice_init_vf_vsi_res(struct ice_vf *vf)
1792{
1793 struct ice_pf *pf = vf->pf;
1794 u8 broadcast[ETH_ALEN];
1795 enum ice_status status;
1796 struct ice_vsi *vsi;
1797 struct device *dev;
1798 int err;
1799
1800 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1801
1802 dev = ice_pf_to_dev(pf);
1803 vsi = ice_vf_vsi_setup(vf);
1804 if (!vsi)
1805 return -ENOMEM;
1806
1807 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1808 if (err) {
1809 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1810 vf->vf_id);
1811 goto release_vsi;
1812 }
1813
1814 eth_broadcast_addr(broadcast);
1815 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1816 if (status) {
1817 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1818 vf->vf_id, ice_stat_str(status));
1819 err = ice_status_to_errno(status);
1820 goto release_vsi;
1821 }
1822
1823 vf->num_mac = 1;
1824
1825 return 0;
1826
1827release_vsi:
1828 ice_vf_vsi_release(vf);
1829 return err;
1830}
1831
1832
1833
1834
1835
1836static int ice_start_vfs(struct ice_pf *pf)
1837{
1838 struct ice_hw *hw = &pf->hw;
1839 int retval, i;
1840
1841 ice_for_each_vf(pf, i) {
1842 struct ice_vf *vf = &pf->vf[i];
1843
1844 ice_clear_vf_reset_trigger(vf);
1845
1846 retval = ice_init_vf_vsi_res(vf);
1847 if (retval) {
1848 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1849 vf->vf_id, retval);
1850 goto teardown;
1851 }
1852
1853 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1854 ice_ena_vf_mappings(vf);
1855 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1856 }
1857
1858 ice_flush(hw);
1859 return 0;
1860
1861teardown:
1862 for (i = i - 1; i >= 0; i--) {
1863 struct ice_vf *vf = &pf->vf[i];
1864
1865 ice_dis_vf_mappings(vf);
1866 ice_vf_vsi_release(vf);
1867 }
1868
1869 return retval;
1870}
1871
1872
1873
1874
1875
1876static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1877{
1878 int i;
1879
1880 ice_for_each_vf(pf, i) {
1881 struct ice_vf *vf = &pf->vf[i];
1882
1883 vf->pf = pf;
1884 vf->vf_id = i;
1885 vf->vf_sw_id = pf->first_sw;
1886
1887 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1888 vf->spoofchk = true;
1889 vf->num_vf_qs = pf->num_qps_per_vf;
1890 ice_vc_set_default_allowlist(vf);
1891
1892
1893
1894
1895 ice_vf_ctrl_invalidate_vsi(vf);
1896 ice_vf_fdir_init(vf);
1897 }
1898}
1899
1900
1901
1902
1903
1904
1905static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1906{
1907 struct ice_vf *vfs;
1908
1909 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1910 GFP_KERNEL);
1911 if (!vfs)
1912 return -ENOMEM;
1913
1914 pf->vf = vfs;
1915 pf->num_alloc_vfs = num_vfs;
1916
1917 return 0;
1918}
1919
1920
1921
1922
1923
1924
1925static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1926{
1927 struct device *dev = ice_pf_to_dev(pf);
1928 struct ice_hw *hw = &pf->hw;
1929 int ret;
1930
1931
1932 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1933 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1934 set_bit(ICE_OICR_INTR_DIS, pf->state);
1935 ice_flush(hw);
1936
1937 ret = pci_enable_sriov(pf->pdev, num_vfs);
1938 if (ret) {
1939 pf->num_alloc_vfs = 0;
1940 goto err_unroll_intr;
1941 }
1942
1943 ret = ice_alloc_vfs(pf, num_vfs);
1944 if (ret)
1945 goto err_pci_disable_sriov;
1946
1947 if (ice_set_per_vf_res(pf)) {
1948 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1949 num_vfs);
1950 ret = -ENOSPC;
1951 goto err_unroll_sriov;
1952 }
1953
1954 ice_set_dflt_settings_vfs(pf);
1955
1956 if (ice_start_vfs(pf)) {
1957 dev_err(dev, "Failed to start VF(s)\n");
1958 ret = -EAGAIN;
1959 goto err_unroll_sriov;
1960 }
1961
1962 clear_bit(ICE_VF_DIS, pf->state);
1963 return 0;
1964
1965err_unroll_sriov:
1966 devm_kfree(dev, pf->vf);
1967 pf->vf = NULL;
1968 pf->num_alloc_vfs = 0;
1969err_pci_disable_sriov:
1970 pci_disable_sriov(pf->pdev);
1971err_unroll_intr:
1972
1973 ice_irq_dynamic_ena(hw, NULL, NULL);
1974 clear_bit(ICE_OICR_INTR_DIS, pf->state);
1975 return ret;
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1986{
1987 int pre_existing_vfs = pci_num_vf(pf->pdev);
1988 struct device *dev = ice_pf_to_dev(pf);
1989 int err;
1990
1991 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1992 ice_free_vfs(pf);
1993 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1994 return 0;
1995
1996 if (num_vfs > pf->num_vfs_supported) {
1997 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1998 num_vfs, pf->num_vfs_supported);
1999 return -EOPNOTSUPP;
2000 }
2001
2002 dev_info(dev, "Enabling %d VFs\n", num_vfs);
2003 err = ice_ena_vfs(pf, num_vfs);
2004 if (err) {
2005 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
2006 return err;
2007 }
2008
2009 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
2010 return 0;
2011}
2012
2013
2014
2015
2016
2017static int ice_check_sriov_allowed(struct ice_pf *pf)
2018{
2019 struct device *dev = ice_pf_to_dev(pf);
2020
2021 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2022 dev_err(dev, "This device is not capable of SR-IOV\n");
2023 return -EOPNOTSUPP;
2024 }
2025
2026 if (ice_is_safe_mode(pf)) {
2027 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2028 return -EOPNOTSUPP;
2029 }
2030
2031 if (!ice_pf_state_is_nominal(pf)) {
2032 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2033 return -EBUSY;
2034 }
2035
2036 return 0;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2049{
2050 struct ice_pf *pf = pci_get_drvdata(pdev);
2051 struct device *dev = ice_pf_to_dev(pf);
2052 enum ice_status status;
2053 int err;
2054
2055 err = ice_check_sriov_allowed(pf);
2056 if (err)
2057 return err;
2058
2059 if (!num_vfs) {
2060 if (!pci_vfs_assigned(pdev)) {
2061 ice_mbx_deinit_snapshot(&pf->hw);
2062 ice_free_vfs(pf);
2063 if (pf->lag)
2064 ice_enable_lag(pf->lag);
2065 return 0;
2066 }
2067
2068 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
2069 return -EBUSY;
2070 }
2071
2072 status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2073 if (status)
2074 return ice_status_to_errno(status);
2075
2076 err = ice_pci_sriov_ena(pf, num_vfs);
2077 if (err) {
2078 ice_mbx_deinit_snapshot(&pf->hw);
2079 return err;
2080 }
2081
2082 if (pf->lag)
2083 ice_disable_lag(pf->lag);
2084 return num_vfs;
2085}
2086
2087
2088
2089
2090
2091
2092
2093
2094void ice_process_vflr_event(struct ice_pf *pf)
2095{
2096 struct ice_hw *hw = &pf->hw;
2097 unsigned int vf_id;
2098 u32 reg;
2099
2100 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2101 !pf->num_alloc_vfs)
2102 return;
2103
2104 ice_for_each_vf(pf, vf_id) {
2105 struct ice_vf *vf = &pf->vf[vf_id];
2106 u32 reg_idx, bit_idx;
2107
2108 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2109 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2110
2111 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2112 if (reg & BIT(bit_idx))
2113
2114 ice_reset_vf(vf, true);
2115 }
2116}
2117
2118
2119
2120
2121
2122static void ice_vc_reset_vf(struct ice_vf *vf)
2123{
2124 ice_vc_notify_vf_reset(vf);
2125 ice_reset_vf(vf, false);
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2137{
2138 unsigned int vf_id;
2139
2140 ice_for_each_vf(pf, vf_id) {
2141 struct ice_vf *vf = &pf->vf[vf_id];
2142 struct ice_vsi *vsi;
2143 u16 rxq_idx;
2144
2145 vsi = ice_get_vf_vsi(vf);
2146
2147 ice_for_each_rxq(vsi, rxq_idx)
2148 if (vsi->rxq_map[rxq_idx] == pfq)
2149 return vf;
2150 }
2151
2152 return NULL;
2153}
2154
2155
2156
2157
2158
2159
2160static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2161{
2162 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174void
2175ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2176{
2177 u32 gldcb_rtctq, queue;
2178 struct ice_vf *vf;
2179
2180 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2181 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2182
2183
2184 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2185 GLDCB_RTCTQ_RXQNUM_S;
2186
2187 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2188 if (!vf)
2189 return;
2190
2191 ice_vc_reset_vf(vf);
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204int
2205ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2206 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
2207{
2208 enum ice_status aq_ret;
2209 struct device *dev;
2210 struct ice_pf *pf;
2211
2212 if (!vf)
2213 return -EINVAL;
2214
2215 pf = vf->pf;
2216 if (ice_validate_vf_id(pf, vf->vf_id))
2217 return -EINVAL;
2218
2219 dev = ice_pf_to_dev(pf);
2220
2221
2222 if (v_retval) {
2223 vf->num_inval_msgs++;
2224 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2225 v_opcode, v_retval);
2226 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
2227 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
2228 vf->vf_id);
2229 dev_err(dev, "Use PF Control I/F to enable the VF\n");
2230 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2231 return -EIO;
2232 }
2233 } else {
2234 vf->num_valid_msgs++;
2235
2236 vf->num_inval_msgs = 0;
2237 }
2238
2239 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2240 msg, msglen, NULL);
2241 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
2242 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
2243 vf->vf_id, ice_stat_str(aq_ret),
2244 ice_aq_str(pf->hw.mailboxq.sq_last_status));
2245 return -EIO;
2246 }
2247
2248 return 0;
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2259{
2260 struct virtchnl_version_info info = {
2261 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2262 };
2263
2264 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2265
2266 if (VF_IS_V10(&vf->vf_ver))
2267 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2268
2269 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2270 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
2271 sizeof(struct virtchnl_version_info));
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2284{
2285 struct ice_port_info *pi = ice_vf_get_port_info(vf);
2286 u16 max_frame_size;
2287
2288 max_frame_size = pi->phy.link_info.max_frame_size;
2289
2290 if (vf->port_vlan_info)
2291 max_frame_size -= VLAN_HLEN;
2292
2293 return max_frame_size;
2294}
2295
2296
2297
2298
2299
2300
2301
2302
2303static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2304{
2305 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2306 struct virtchnl_vf_resource *vfres = NULL;
2307 struct ice_pf *pf = vf->pf;
2308 struct ice_vsi *vsi;
2309 int len = 0;
2310 int ret;
2311
2312 if (ice_check_vf_init(pf, vf)) {
2313 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2314 goto err;
2315 }
2316
2317 len = sizeof(struct virtchnl_vf_resource);
2318
2319 vfres = kzalloc(len, GFP_KERNEL);
2320 if (!vfres) {
2321 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2322 len = 0;
2323 goto err;
2324 }
2325 if (VF_IS_V11(&vf->vf_ver))
2326 vf->driver_caps = *(u32 *)msg;
2327 else
2328 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2329 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2330 VIRTCHNL_VF_OFFLOAD_VLAN;
2331
2332 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2333 vsi = ice_get_vf_vsi(vf);
2334 if (!vsi) {
2335 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336 goto err;
2337 }
2338
2339 if (!vsi->info.pvid)
2340 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2341
2342 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2343 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2344 } else {
2345 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2346 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2347 else
2348 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2349 }
2350
2351 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2352 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2353
2354 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2355 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2356
2357 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2358 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2359
2360 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2361 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2362
2363 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2364 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2365
2366 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2367 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2368
2369 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2370 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2371
2372 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2373 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2374
2375 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2376 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2377
2378 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2379 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2380
2381 vfres->num_vsis = 1;
2382
2383 vfres->num_queue_pairs = vsi->num_txq;
2384 vfres->max_vectors = pf->num_msix_per_vf;
2385 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2386 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2387 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
2388
2389 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2390 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2391 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2392 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2393 vf->hw_lan_addr.addr);
2394
2395
2396 vf->driver_caps = vfres->vf_cap_flags;
2397
2398 ice_vc_set_caps_allowlist(vf);
2399 ice_vc_set_working_allowlist(vf);
2400
2401 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2402
2403err:
2404
2405 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2406 (u8 *)vfres, len);
2407
2408 kfree(vfres);
2409 return ret;
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2421{
2422 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2423 ice_reset_vf(vf, false);
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2434{
2435 int i;
2436
2437 ice_for_each_vsi(pf, i)
2438 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2439 return pf->vsi[i];
2440
2441 return NULL;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2452{
2453 struct ice_pf *pf = vf->pf;
2454 struct ice_vsi *vsi;
2455
2456 vsi = ice_find_vsi_from_id(pf, vsi_id);
2457
2458 return (vsi && (vsi->vf_id == vf->vf_id));
2459}
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2470{
2471 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2472
2473 return (vsi && (qid < vsi->alloc_txq));
2474}
2475
2476
2477
2478
2479
2480
2481
2482
2483static bool ice_vc_isvalid_ring_len(u16 ring_len)
2484{
2485 return ring_len == 0 ||
2486 (ring_len >= ICE_MIN_NUM_DESC &&
2487 ring_len <= ICE_MAX_NUM_DESC &&
2488 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506static bool
2507ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2508 u32 *addl_hdrs, u64 *hash_flds)
2509{
2510 const struct ice_vc_hash_field_match_type *hf_list;
2511 const struct ice_vc_hdr_match_type *hdr_list;
2512 int i, hf_list_len, hdr_list_len;
2513
2514 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
2515 sizeof(hw->active_pkg_name))) {
2516 hf_list = ice_vc_hash_field_list_comms;
2517 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
2518 hdr_list = ice_vc_hdr_list_comms;
2519 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
2520 } else {
2521 hf_list = ice_vc_hash_field_list_os;
2522 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
2523 hdr_list = ice_vc_hdr_list_os;
2524 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
2525 }
2526
2527 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2528 struct virtchnl_proto_hdr *proto_hdr =
2529 &rss_cfg->proto_hdrs.proto_hdr[i];
2530 bool hdr_found = false;
2531 int j;
2532
2533
2534 for (j = 0; j < hdr_list_len; j++) {
2535 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2536
2537 if (proto_hdr->type == hdr_map.vc_hdr) {
2538 *addl_hdrs |= hdr_map.ice_hdr;
2539 hdr_found = true;
2540 }
2541 }
2542
2543 if (!hdr_found)
2544 return false;
2545
2546
2547
2548
2549 for (j = 0; j < hf_list_len; j++) {
2550 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2551
2552 if (proto_hdr->type == hf_map.vc_hdr &&
2553 proto_hdr->field_selector == hf_map.vc_hash_field) {
2554 *hash_flds |= hf_map.ice_hash_field;
2555 break;
2556 }
2557 }
2558 }
2559
2560 return true;
2561}
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571static bool ice_vf_adv_rss_offload_ena(u32 caps)
2572{
2573 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2585{
2586 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2587 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2588 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2589 struct device *dev = ice_pf_to_dev(vf->pf);
2590 struct ice_hw *hw = &vf->pf->hw;
2591 struct ice_vsi *vsi;
2592
2593 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2594 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2595 vf->vf_id);
2596 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2597 goto error_param;
2598 }
2599
2600 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2601 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2602 vf->vf_id);
2603 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2604 goto error_param;
2605 }
2606
2607 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2608 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2609 goto error_param;
2610 }
2611
2612 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2613 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2614 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2615 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2616 vf->vf_id);
2617 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2618 goto error_param;
2619 }
2620
2621 vsi = ice_get_vf_vsi(vf);
2622 if (!vsi) {
2623 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2624 goto error_param;
2625 }
2626
2627 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2628 struct ice_vsi_ctx *ctx;
2629 enum ice_status status;
2630 u8 lut_type, hash_type;
2631
2632 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2633 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2634 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2635
2636 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2637 if (!ctx) {
2638 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2639 goto error_param;
2640 }
2641
2642 ctx->info.q_opt_rss = ((lut_type <<
2643 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2644 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2645 (hash_type &
2646 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2647
2648
2649 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2650 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2651 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2652 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2653
2654 ctx->info.valid_sections =
2655 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2656
2657 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2658 if (status) {
2659 dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
2660 ice_stat_str(status),
2661 ice_aq_str(hw->adminq.sq_last_status));
2662 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2663 } else {
2664 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2665 }
2666
2667 kfree(ctx);
2668 } else {
2669 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2670 u64 hash_flds = ICE_HASH_INVALID;
2671
2672 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2673 &hash_flds)) {
2674 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675 goto error_param;
2676 }
2677
2678 if (add) {
2679 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2680 addl_hdrs)) {
2681 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2682 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2683 vsi->vsi_num, v_ret);
2684 }
2685 } else {
2686 enum ice_status status;
2687
2688 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2689 addl_hdrs);
2690
2691
2692
2693
2694
2695 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2696 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2697 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
2698 vf->vf_id, ice_stat_str(status));
2699 }
2700 }
2701 }
2702
2703error_param:
2704 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2715{
2716 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2717 struct virtchnl_rss_key *vrk =
2718 (struct virtchnl_rss_key *)msg;
2719 struct ice_vsi *vsi;
2720
2721 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2722 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723 goto error_param;
2724 }
2725
2726 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2727 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2728 goto error_param;
2729 }
2730
2731 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2732 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733 goto error_param;
2734 }
2735
2736 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2737 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2738 goto error_param;
2739 }
2740
2741 vsi = ice_get_vf_vsi(vf);
2742 if (!vsi) {
2743 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2744 goto error_param;
2745 }
2746
2747 if (ice_set_rss_key(vsi, vrk->key))
2748 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2749error_param:
2750 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2751 NULL, 0);
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2762{
2763 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2764 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2765 struct ice_vsi *vsi;
2766
2767 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2768 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2769 goto error_param;
2770 }
2771
2772 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2773 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2774 goto error_param;
2775 }
2776
2777 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2778 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2779 goto error_param;
2780 }
2781
2782 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2783 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2784 goto error_param;
2785 }
2786
2787 vsi = ice_get_vf_vsi(vf);
2788 if (!vsi) {
2789 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2790 goto error_param;
2791 }
2792
2793 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2794 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2795error_param:
2796 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2797 NULL, 0);
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807static void ice_wait_on_vf_reset(struct ice_vf *vf)
2808{
2809 int i;
2810
2811 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2812 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2813 break;
2814 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2815 }
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2827{
2828 struct ice_pf *pf;
2829
2830 ice_wait_on_vf_reset(vf);
2831
2832 if (ice_is_vf_disabled(vf))
2833 return -EINVAL;
2834
2835 pf = vf->pf;
2836 if (ice_check_vf_init(pf, vf))
2837 return -EBUSY;
2838
2839 return 0;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2851{
2852 struct ice_netdev_priv *np = netdev_priv(netdev);
2853 struct ice_pf *pf = np->vsi->back;
2854 struct ice_vsi_ctx *ctx;
2855 struct ice_vsi *vf_vsi;
2856 enum ice_status status;
2857 struct device *dev;
2858 struct ice_vf *vf;
2859 int ret;
2860
2861 dev = ice_pf_to_dev(pf);
2862 if (ice_validate_vf_id(pf, vf_id))
2863 return -EINVAL;
2864
2865 vf = &pf->vf[vf_id];
2866 ret = ice_check_vf_ready_for_cfg(vf);
2867 if (ret)
2868 return ret;
2869
2870 vf_vsi = ice_get_vf_vsi(vf);
2871 if (!vf_vsi) {
2872 netdev_err(netdev, "VSI %d for VF %d is null\n",
2873 vf->lan_vsi_idx, vf->vf_id);
2874 return -EINVAL;
2875 }
2876
2877 if (vf_vsi->type != ICE_VSI_VF) {
2878 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2879 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2880 return -ENODEV;
2881 }
2882
2883 if (ena == vf->spoofchk) {
2884 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2885 return 0;
2886 }
2887
2888 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2889 if (!ctx)
2890 return -ENOMEM;
2891
2892 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2893 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2894 if (ena) {
2895 ctx->info.sec_flags |=
2896 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2897 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2898 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2899 } else {
2900 ctx->info.sec_flags &=
2901 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2902 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2903 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2904 }
2905
2906 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2907 if (status) {
2908 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2909 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2910 ice_stat_str(status));
2911 ret = -EIO;
2912 goto out;
2913 }
2914
2915
2916 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2917 vf->spoofchk = ena;
2918
2919out:
2920 kfree(ctx);
2921 return ret;
2922}
2923
2924
2925
2926
2927
2928
2929
2930
2931bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2932{
2933 int vf_idx;
2934
2935 ice_for_each_vf(pf, vf_idx) {
2936 struct ice_vf *vf = &pf->vf[vf_idx];
2937
2938
2939 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2940 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2941 return true;
2942 }
2943
2944 return false;
2945}
2946
2947
2948
2949
2950
2951
2952
2953
2954static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2955{
2956 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2957 bool rm_promisc, alluni = false, allmulti = false;
2958 struct virtchnl_promisc_info *info =
2959 (struct virtchnl_promisc_info *)msg;
2960 struct ice_pf *pf = vf->pf;
2961 struct ice_vsi *vsi;
2962 struct device *dev;
2963 int ret = 0;
2964
2965 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2966 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967 goto error_param;
2968 }
2969
2970 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2971 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2972 goto error_param;
2973 }
2974
2975 vsi = ice_get_vf_vsi(vf);
2976 if (!vsi) {
2977 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2978 goto error_param;
2979 }
2980
2981 dev = ice_pf_to_dev(pf);
2982 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2983 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2984 vf->vf_id);
2985
2986 goto error_param;
2987 }
2988
2989 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2990 alluni = true;
2991
2992 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2993 allmulti = true;
2994
2995 rm_promisc = !allmulti && !alluni;
2996
2997 if (vsi->num_vlan || vf->port_vlan_info) {
2998 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2999 struct net_device *pf_netdev;
3000
3001 if (!pf_vsi) {
3002 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3003 goto error_param;
3004 }
3005
3006 pf_netdev = pf_vsi->netdev;
3007
3008 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
3009 if (ret) {
3010 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
3011 rm_promisc ? "ON" : "OFF", vf->vf_id,
3012 vsi->vsi_num);
3013 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3014 }
3015
3016 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
3017 if (ret) {
3018 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3019 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3020 goto error_param;
3021 }
3022 }
3023
3024 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
3025 bool set_dflt_vsi = alluni || allmulti;
3026
3027 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3028
3029
3030
3031 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3032 else if (!set_dflt_vsi &&
3033 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3034
3035
3036
3037 ret = ice_clear_dflt_vsi(pf->first_sw);
3038
3039 if (ret) {
3040 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3041 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3042 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3043 goto error_param;
3044 }
3045 } else {
3046 enum ice_status status;
3047 u8 promisc_m;
3048
3049 if (alluni) {
3050 if (vf->port_vlan_info || vsi->num_vlan)
3051 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3052 else
3053 promisc_m = ICE_UCAST_PROMISC_BITS;
3054 } else if (allmulti) {
3055 if (vf->port_vlan_info || vsi->num_vlan)
3056 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3057 else
3058 promisc_m = ICE_MCAST_PROMISC_BITS;
3059 } else {
3060 if (vf->port_vlan_info || vsi->num_vlan)
3061 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3062 else
3063 promisc_m = ICE_UCAST_PROMISC_BITS;
3064 }
3065
3066
3067
3068
3069 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
3070 if (status) {
3071 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
3072 rm_promisc ? "dis" : "en", vf->vf_id,
3073 ice_stat_str(status));
3074 v_ret = ice_err_to_virt_err(status);
3075 goto error_param;
3076 } else {
3077 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
3078 rm_promisc ? "dis" : "en", vf->vf_id);
3079 }
3080 }
3081
3082 if (allmulti &&
3083 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3084 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
3085 else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3086 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
3087
3088 if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3089 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
3090 else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3091 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
3092
3093error_param:
3094 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3095 v_ret, NULL, 0);
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3106{
3107 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3108 struct virtchnl_queue_select *vqs =
3109 (struct virtchnl_queue_select *)msg;
3110 struct ice_eth_stats stats = { 0 };
3111 struct ice_vsi *vsi;
3112
3113 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3114 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3115 goto error_param;
3116 }
3117
3118 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3119 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3120 goto error_param;
3121 }
3122
3123 vsi = ice_get_vf_vsi(vf);
3124 if (!vsi) {
3125 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3126 goto error_param;
3127 }
3128
3129 ice_update_eth_stats(vsi);
3130
3131 stats = vsi->eth_stats;
3132
3133error_param:
3134
3135 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
3136 (u8 *)&stats, sizeof(stats));
3137}
3138
3139
3140
3141
3142
3143
3144
3145static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3146{
3147 if ((!vqs->rx_queues && !vqs->tx_queues) ||
3148 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3149 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
3150 return false;
3151
3152 return true;
3153}
3154
3155
3156
3157
3158
3159
3160static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3161{
3162 struct ice_hw *hw = &vsi->back->hw;
3163 u32 pfq = vsi->txq_map[q_idx];
3164 u32 reg;
3165
3166 reg = rd32(hw, QINT_TQCTL(pfq));
3167
3168
3169
3170
3171
3172 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3173 return;
3174
3175 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3176}
3177
3178
3179
3180
3181
3182
3183static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3184{
3185 struct ice_hw *hw = &vsi->back->hw;
3186 u32 pfq = vsi->rxq_map[q_idx];
3187 u32 reg;
3188
3189 reg = rd32(hw, QINT_RQCTL(pfq));
3190
3191
3192
3193
3194
3195 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3196 return;
3197
3198 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3209{
3210 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3211 struct virtchnl_queue_select *vqs =
3212 (struct virtchnl_queue_select *)msg;
3213 struct ice_vsi *vsi;
3214 unsigned long q_map;
3215 u16 vf_q_id;
3216
3217 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3218 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3219 goto error_param;
3220 }
3221
3222 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3223 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3224 goto error_param;
3225 }
3226
3227 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3228 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3229 goto error_param;
3230 }
3231
3232 vsi = ice_get_vf_vsi(vf);
3233 if (!vsi) {
3234 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3235 goto error_param;
3236 }
3237
3238
3239
3240
3241
3242 q_map = vqs->rx_queues;
3243 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3244 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3245 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3246 goto error_param;
3247 }
3248
3249
3250 if (test_bit(vf_q_id, vf->rxq_ena))
3251 continue;
3252
3253 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
3254 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
3255 vf_q_id, vsi->vsi_num);
3256 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3257 goto error_param;
3258 }
3259
3260 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
3261 set_bit(vf_q_id, vf->rxq_ena);
3262 }
3263
3264 q_map = vqs->tx_queues;
3265 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3266 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3267 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3268 goto error_param;
3269 }
3270
3271
3272 if (test_bit(vf_q_id, vf->txq_ena))
3273 continue;
3274
3275 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
3276 set_bit(vf_q_id, vf->txq_ena);
3277 }
3278
3279
3280 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
3281 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3282
3283error_param:
3284
3285 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
3286 NULL, 0);
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3298{
3299 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3300 struct virtchnl_queue_select *vqs =
3301 (struct virtchnl_queue_select *)msg;
3302 struct ice_vsi *vsi;
3303 unsigned long q_map;
3304 u16 vf_q_id;
3305
3306 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
3307 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
3308 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3309 goto error_param;
3310 }
3311
3312 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3313 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3314 goto error_param;
3315 }
3316
3317 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3318 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3319 goto error_param;
3320 }
3321
3322 vsi = ice_get_vf_vsi(vf);
3323 if (!vsi) {
3324 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3325 goto error_param;
3326 }
3327
3328 if (vqs->tx_queues) {
3329 q_map = vqs->tx_queues;
3330
3331 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3332 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
3333 struct ice_txq_meta txq_meta = { 0 };
3334
3335 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3336 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3337 goto error_param;
3338 }
3339
3340
3341 if (!test_bit(vf_q_id, vf->txq_ena))
3342 continue;
3343
3344 ice_fill_txq_meta(vsi, ring, &txq_meta);
3345
3346 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3347 ring, &txq_meta)) {
3348 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
3349 vf_q_id, vsi->vsi_num);
3350 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3351 goto error_param;
3352 }
3353
3354
3355 clear_bit(vf_q_id, vf->txq_ena);
3356 }
3357 }
3358
3359 q_map = vqs->rx_queues;
3360
3361 if (q_map &&
3362 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
3363 if (ice_vsi_stop_all_rx_rings(vsi)) {
3364 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3365 vsi->vsi_num);
3366 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 goto error_param;
3368 }
3369
3370 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
3371 } else if (q_map) {
3372 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3373 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3374 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3375 goto error_param;
3376 }
3377
3378
3379 if (!test_bit(vf_q_id, vf->rxq_ena))
3380 continue;
3381
3382 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3383 true)) {
3384 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
3385 vf_q_id, vsi->vsi_num);
3386 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3387 goto error_param;
3388 }
3389
3390
3391 clear_bit(vf_q_id, vf->rxq_ena);
3392 }
3393 }
3394
3395
3396 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
3397 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3398
3399error_param:
3400
3401 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
3402 NULL, 0);
3403}
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414static int
3415ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3416 struct virtchnl_vector_map *map,
3417 struct ice_q_vector *q_vector)
3418{
3419 u16 vsi_q_id, vsi_q_id_idx;
3420 unsigned long qmap;
3421
3422 q_vector->num_ring_rx = 0;
3423 q_vector->num_ring_tx = 0;
3424
3425 qmap = map->rxq_map;
3426 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3427 vsi_q_id = vsi_q_id_idx;
3428
3429 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3430 return VIRTCHNL_STATUS_ERR_PARAM;
3431
3432 q_vector->num_ring_rx++;
3433 q_vector->rx.itr_idx = map->rxitr_idx;
3434 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3435 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3436 q_vector->rx.itr_idx);
3437 }
3438
3439 qmap = map->txq_map;
3440 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3441 vsi_q_id = vsi_q_id_idx;
3442
3443 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3444 return VIRTCHNL_STATUS_ERR_PARAM;
3445
3446 q_vector->num_ring_tx++;
3447 q_vector->tx.itr_idx = map->txitr_idx;
3448 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3449 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3450 q_vector->tx.itr_idx);
3451 }
3452
3453 return VIRTCHNL_STATUS_SUCCESS;
3454}
3455
3456
3457
3458
3459
3460
3461
3462
3463static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3464{
3465 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3466 u16 num_q_vectors_mapped, vsi_id, vector_id;
3467 struct virtchnl_irq_map_info *irqmap_info;
3468 struct virtchnl_vector_map *map;
3469 struct ice_pf *pf = vf->pf;
3470 struct ice_vsi *vsi;
3471 int i;
3472
3473 irqmap_info = (struct virtchnl_irq_map_info *)msg;
3474 num_q_vectors_mapped = irqmap_info->num_vectors;
3475
3476
3477
3478
3479
3480 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3481 pf->num_msix_per_vf < num_q_vectors_mapped ||
3482 !num_q_vectors_mapped) {
3483 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3484 goto error_param;
3485 }
3486
3487 vsi = ice_get_vf_vsi(vf);
3488 if (!vsi) {
3489 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3490 goto error_param;
3491 }
3492
3493 for (i = 0; i < num_q_vectors_mapped; i++) {
3494 struct ice_q_vector *q_vector;
3495
3496 map = &irqmap_info->vecmap[i];
3497
3498 vector_id = map->vector_id;
3499 vsi_id = map->vsi_id;
3500
3501
3502
3503 if (!(vector_id < pf->num_msix_per_vf) ||
3504 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
3505 (!vector_id && (map->rxq_map || map->txq_map))) {
3506 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3507 goto error_param;
3508 }
3509
3510
3511 if (!vector_id)
3512 continue;
3513
3514
3515
3516
3517 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3518 if (!q_vector) {
3519 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3520 goto error_param;
3521 }
3522
3523
3524 v_ret = (enum virtchnl_status_code)
3525 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3526 if (v_ret)
3527 goto error_param;
3528 }
3529
3530error_param:
3531
3532 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
3533 NULL, 0);
3534}
3535
3536
3537
3538
3539
3540
3541
3542
3543static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3544{
3545 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3546 struct virtchnl_vsi_queue_config_info *qci =
3547 (struct virtchnl_vsi_queue_config_info *)msg;
3548 struct virtchnl_queue_pair_info *qpi;
3549 struct ice_pf *pf = vf->pf;
3550 struct ice_vsi *vsi;
3551 int i, q_idx;
3552
3553 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3554 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3555 goto error_param;
3556 }
3557
3558 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
3559 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3560 goto error_param;
3561 }
3562
3563 vsi = ice_get_vf_vsi(vf);
3564 if (!vsi) {
3565 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3566 goto error_param;
3567 }
3568
3569 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
3570 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
3571 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
3572 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3573 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3574 goto error_param;
3575 }
3576
3577 for (i = 0; i < qci->num_queue_pairs; i++) {
3578 qpi = &qci->qpair[i];
3579 if (qpi->txq.vsi_id != qci->vsi_id ||
3580 qpi->rxq.vsi_id != qci->vsi_id ||
3581 qpi->rxq.queue_id != qpi->txq.queue_id ||
3582 qpi->txq.headwb_enabled ||
3583 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3584 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
3585 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
3586 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3587 goto error_param;
3588 }
3589
3590 q_idx = qpi->rxq.queue_id;
3591
3592
3593
3594
3595 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
3596 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3597 goto error_param;
3598 }
3599
3600
3601 if (qpi->txq.ring_len > 0) {
3602 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3603 vsi->tx_rings[i]->count = qpi->txq.ring_len;
3604 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
3605 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3606 goto error_param;
3607 }
3608 }
3609
3610
3611 if (qpi->rxq.ring_len > 0) {
3612 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3613
3614 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3615 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3616
3617 if (qpi->rxq.databuffer_size != 0 &&
3618 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3619 qpi->rxq.databuffer_size < 1024)) {
3620 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3621 goto error_param;
3622 }
3623 vsi->rx_buf_len = qpi->rxq.databuffer_size;
3624 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3625 if (qpi->rxq.max_pkt_size > max_frame_size ||
3626 qpi->rxq.max_pkt_size < 64) {
3627 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3628 goto error_param;
3629 }
3630
3631 vsi->max_frame = qpi->rxq.max_pkt_size;
3632
3633
3634
3635 if (vf->port_vlan_info)
3636 vsi->max_frame += VLAN_HLEN;
3637
3638 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
3639 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3640 goto error_param;
3641 }
3642 }
3643 }
3644
3645error_param:
3646
3647 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3648 NULL, 0);
3649}
3650
3651
3652
3653
3654
3655static bool ice_is_vf_trusted(struct ice_vf *vf)
3656{
3657 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3658}
3659
3660
3661
3662
3663
3664
3665
3666static bool ice_can_vf_change_mac(struct ice_vf *vf)
3667{
3668
3669
3670
3671
3672 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3673 return false;
3674
3675 return true;
3676}
3677
3678
3679
3680
3681
3682static u8
3683ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
3684{
3685 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
3686}
3687
3688
3689
3690
3691
3692static bool
3693ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
3694{
3695 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3696
3697 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
3698}
3699
3700
3701
3702
3703
3704
3705
3706
3707static bool
3708ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
3709{
3710 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3711
3712 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
3713}
3714
3715
3716
3717
3718
3719
3720static void
3721ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3722{
3723 u8 *mac_addr = vc_ether_addr->addr;
3724
3725 if (!is_valid_ether_addr(mac_addr))
3726 return;
3727
3728
3729
3730
3731
3732 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
3733 is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
3734 ice_is_vc_addr_primary(vc_ether_addr)) {
3735 ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
3736 ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
3737 }
3738
3739
3740
3741
3742
3743
3744
3745 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
3746 ether_addr_copy(vf->legacy_last_added_umac.addr,
3747 mac_addr);
3748 vf->legacy_last_added_umac.time_modified = jiffies;
3749 }
3750}
3751
3752
3753
3754
3755
3756
3757
3758static int
3759ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3760 struct virtchnl_ether_addr *vc_ether_addr)
3761{
3762 struct device *dev = ice_pf_to_dev(vf->pf);
3763 u8 *mac_addr = vc_ether_addr->addr;
3764 enum ice_status status;
3765
3766
3767 if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
3768 return 0;
3769
3770 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3771 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3772 return -EPERM;
3773 }
3774
3775 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3776 if (status == ICE_ERR_ALREADY_EXISTS) {
3777 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3778 vf->vf_id);
3779 return -EEXIST;
3780 } else if (status) {
3781 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3782 mac_addr, vf->vf_id, ice_stat_str(status));
3783 return -EIO;
3784 }
3785
3786 ice_vfhw_mac_add(vf, vc_ether_addr);
3787
3788 vf->num_mac++;
3789
3790 return 0;
3791}
3792
3793
3794
3795
3796
3797static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
3798{
3799#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
3800 return time_is_before_jiffies(last_added_umac->time_modified +
3801 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
3802}
3803
3804
3805
3806
3807
3808
3809static void
3810ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3811{
3812 u8 *mac_addr = vc_ether_addr->addr;
3813
3814 if (!is_valid_ether_addr(mac_addr) ||
3815 !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3816 return;
3817
3818
3819
3820
3821
3822
3823 eth_zero_addr(vf->dev_lan_addr.addr);
3824
3825
3826
3827
3828 if (ice_is_vc_addr_legacy(vc_ether_addr) &&
3829 !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
3830 ether_addr_copy(vf->dev_lan_addr.addr,
3831 vf->legacy_last_added_umac.addr);
3832 ether_addr_copy(vf->hw_lan_addr.addr,
3833 vf->legacy_last_added_umac.addr);
3834 }
3835}
3836
3837
3838
3839
3840
3841
3842
3843static int
3844ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3845 struct virtchnl_ether_addr *vc_ether_addr)
3846{
3847 struct device *dev = ice_pf_to_dev(vf->pf);
3848 u8 *mac_addr = vc_ether_addr->addr;
3849 enum ice_status status;
3850
3851 if (!ice_can_vf_change_mac(vf) &&
3852 ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3853 return 0;
3854
3855 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3856 if (status == ICE_ERR_DOES_NOT_EXIST) {
3857 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3858 vf->vf_id);
3859 return -ENOENT;
3860 } else if (status) {
3861 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3862 mac_addr, vf->vf_id, ice_stat_str(status));
3863 return -EIO;
3864 }
3865
3866 ice_vfhw_mac_del(vf, vc_ether_addr);
3867
3868 vf->num_mac--;
3869
3870 return 0;
3871}
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881static int
3882ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3883{
3884 int (*ice_vc_cfg_mac)
3885 (struct ice_vf *vf, struct ice_vsi *vsi,
3886 struct virtchnl_ether_addr *virtchnl_ether_addr);
3887 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3888 struct virtchnl_ether_addr_list *al =
3889 (struct virtchnl_ether_addr_list *)msg;
3890 struct ice_pf *pf = vf->pf;
3891 enum virtchnl_ops vc_op;
3892 struct ice_vsi *vsi;
3893 int i;
3894
3895 if (set) {
3896 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3897 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3898 } else {
3899 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3900 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3901 }
3902
3903 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3904 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3905 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3906 goto handle_mac_exit;
3907 }
3908
3909
3910
3911
3912
3913 if (set && !ice_is_vf_trusted(vf) &&
3914 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3915 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3916 vf->vf_id);
3917 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3918 goto handle_mac_exit;
3919 }
3920
3921 vsi = ice_get_vf_vsi(vf);
3922 if (!vsi) {
3923 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3924 goto handle_mac_exit;
3925 }
3926
3927 for (i = 0; i < al->num_elements; i++) {
3928 u8 *mac_addr = al->list[i].addr;
3929 int result;
3930
3931 if (is_broadcast_ether_addr(mac_addr) ||
3932 is_zero_ether_addr(mac_addr))
3933 continue;
3934
3935 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
3936 if (result == -EEXIST || result == -ENOENT) {
3937 continue;
3938 } else if (result) {
3939 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3940 goto handle_mac_exit;
3941 }
3942 }
3943
3944handle_mac_exit:
3945
3946 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3947}
3948
3949
3950
3951
3952
3953
3954
3955
3956static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3957{
3958 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3959}
3960
3961
3962
3963
3964
3965
3966
3967
3968static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3969{
3970 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3971}
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3984{
3985 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3986 struct virtchnl_vf_res_request *vfres =
3987 (struct virtchnl_vf_res_request *)msg;
3988 u16 req_queues = vfres->num_queue_pairs;
3989 struct ice_pf *pf = vf->pf;
3990 u16 max_allowed_vf_queues;
3991 u16 tx_rx_queue_left;
3992 struct device *dev;
3993 u16 cur_queues;
3994
3995 dev = ice_pf_to_dev(pf);
3996 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3997 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3998 goto error_param;
3999 }
4000
4001 cur_queues = vf->num_vf_qs;
4002 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
4003 ice_get_avail_rxq_count(pf));
4004 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
4005 if (!req_queues) {
4006 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
4007 vf->vf_id);
4008 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4009 dev_err(dev, "VF %d tried to request more than %d queues.\n",
4010 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
4011 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
4012 } else if (req_queues > cur_queues &&
4013 req_queues - cur_queues > tx_rx_queue_left) {
4014 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
4015 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
4016 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
4017 ICE_MAX_RSS_QS_PER_VF);
4018 } else {
4019
4020 vf->num_req_qs = req_queues;
4021 ice_vc_reset_vf(vf);
4022 dev_info(dev, "VF %d granted request of %u queues.\n",
4023 vf->vf_id, req_queues);
4024 return 0;
4025 }
4026
4027error_param:
4028
4029 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
4030 v_ret, (u8 *)vfres, sizeof(*vfres));
4031}
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043int
4044ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
4045 __be16 vlan_proto)
4046{
4047 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4048 struct device *dev;
4049 struct ice_vf *vf;
4050 u16 vlanprio;
4051 int ret;
4052
4053 dev = ice_pf_to_dev(pf);
4054 if (ice_validate_vf_id(pf, vf_id))
4055 return -EINVAL;
4056
4057 if (vlan_id >= VLAN_N_VID || qos > 7) {
4058 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
4059 vf_id, vlan_id, qos);
4060 return -EINVAL;
4061 }
4062
4063 if (vlan_proto != htons(ETH_P_8021Q)) {
4064 dev_err(dev, "VF VLAN protocol is not supported\n");
4065 return -EPROTONOSUPPORT;
4066 }
4067
4068 vf = &pf->vf[vf_id];
4069 ret = ice_check_vf_ready_for_cfg(vf);
4070 if (ret)
4071 return ret;
4072
4073 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
4074
4075 if (vf->port_vlan_info == vlanprio) {
4076
4077 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
4078 return 0;
4079 }
4080
4081 vf->port_vlan_info = vlanprio;
4082
4083 if (vf->port_vlan_info)
4084 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
4085 vlan_id, qos, vf_id);
4086 else
4087 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
4088
4089 ice_vc_reset_vf(vf);
4090
4091 return 0;
4092}
4093
4094
4095
4096
4097
4098
4099
4100static bool ice_vf_vlan_offload_ena(u32 caps)
4101{
4102 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
4103}
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
4114{
4115 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4116 struct virtchnl_vlan_filter_list *vfl =
4117 (struct virtchnl_vlan_filter_list *)msg;
4118 struct ice_pf *pf = vf->pf;
4119 bool vlan_promisc = false;
4120 struct ice_vsi *vsi;
4121 struct device *dev;
4122 struct ice_hw *hw;
4123 int status = 0;
4124 u8 promisc_m;
4125 int i;
4126
4127 dev = ice_pf_to_dev(pf);
4128 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4129 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4130 goto error_param;
4131 }
4132
4133 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4134 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4135 goto error_param;
4136 }
4137
4138 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
4139 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4140 goto error_param;
4141 }
4142
4143 for (i = 0; i < vfl->num_elements; i++) {
4144 if (vfl->vlan_id[i] >= VLAN_N_VID) {
4145 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4146 dev_err(dev, "invalid VF VLAN id %d\n",
4147 vfl->vlan_id[i]);
4148 goto error_param;
4149 }
4150 }
4151
4152 hw = &pf->hw;
4153 vsi = ice_get_vf_vsi(vf);
4154 if (!vsi) {
4155 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4156 goto error_param;
4157 }
4158
4159 if (add_v && !ice_is_vf_trusted(vf) &&
4160 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4161 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4162 vf->vf_id);
4163
4164
4165
4166 goto error_param;
4167 }
4168
4169 if (vsi->info.pvid) {
4170 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4171 goto error_param;
4172 }
4173
4174 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
4175 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
4176 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
4177 vlan_promisc = true;
4178
4179 if (add_v) {
4180 for (i = 0; i < vfl->num_elements; i++) {
4181 u16 vid = vfl->vlan_id[i];
4182
4183 if (!ice_is_vf_trusted(vf) &&
4184 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4185 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4186 vf->vf_id);
4187
4188
4189
4190
4191 goto error_param;
4192 }
4193
4194
4195
4196
4197
4198 if (!vid)
4199 continue;
4200
4201 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
4202 if (status) {
4203 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4204 goto error_param;
4205 }
4206
4207
4208 if (!vlan_promisc && vid &&
4209 !ice_vsi_is_vlan_pruning_ena(vsi)) {
4210 status = ice_cfg_vlan_pruning(vsi, true, false);
4211 if (status) {
4212 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4213 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
4214 vid, status);
4215 goto error_param;
4216 }
4217 } else if (vlan_promisc) {
4218
4219 promisc_m = ICE_PROMISC_VLAN_TX |
4220 ICE_PROMISC_VLAN_RX;
4221
4222 status = ice_set_vsi_promisc(hw, vsi->idx,
4223 promisc_m, vid);
4224 if (status) {
4225 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4226 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
4227 vid, status);
4228 }
4229 }
4230 }
4231 } else {
4232
4233
4234
4235
4236
4237
4238
4239 int num_vf_vlan;
4240
4241 num_vf_vlan = vsi->num_vlan;
4242 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
4243 u16 vid = vfl->vlan_id[i];
4244
4245
4246
4247
4248
4249 if (!vid)
4250 continue;
4251
4252
4253
4254
4255 status = ice_vsi_kill_vlan(vsi, vid);
4256 if (status) {
4257 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4258 goto error_param;
4259 }
4260
4261
4262 if (vsi->num_vlan == 1 &&
4263 ice_vsi_is_vlan_pruning_ena(vsi))
4264 ice_cfg_vlan_pruning(vsi, false, false);
4265
4266
4267 if (vlan_promisc) {
4268 promisc_m = ICE_PROMISC_VLAN_TX |
4269 ICE_PROMISC_VLAN_RX;
4270
4271 ice_clear_vsi_promisc(hw, vsi->idx,
4272 promisc_m, vid);
4273 }
4274 }
4275 }
4276
4277error_param:
4278
4279 if (add_v)
4280 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
4281 NULL, 0);
4282 else
4283 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
4284 NULL, 0);
4285}
4286
4287
4288
4289
4290
4291
4292
4293
4294static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
4295{
4296 return ice_vc_process_vlan_msg(vf, msg, true);
4297}
4298
4299
4300
4301
4302
4303
4304
4305
4306static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
4307{
4308 return ice_vc_process_vlan_msg(vf, msg, false);
4309}
4310
4311
4312
4313
4314
4315
4316
4317static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
4318{
4319 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4320 struct ice_vsi *vsi;
4321
4322 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4323 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4324 goto error_param;
4325 }
4326
4327 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4328 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4329 goto error_param;
4330 }
4331
4332 vsi = ice_get_vf_vsi(vf);
4333 if (ice_vsi_manage_vlan_stripping(vsi, true))
4334 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4335
4336error_param:
4337 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
4338 v_ret, NULL, 0);
4339}
4340
4341
4342
4343
4344
4345
4346
4347static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
4348{
4349 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4350 struct ice_vsi *vsi;
4351
4352 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4354 goto error_param;
4355 }
4356
4357 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4358 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4359 goto error_param;
4360 }
4361
4362 vsi = ice_get_vf_vsi(vf);
4363 if (!vsi) {
4364 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4365 goto error_param;
4366 }
4367
4368 if (ice_vsi_manage_vlan_stripping(vsi, false))
4369 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4370
4371error_param:
4372 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
4373 v_ret, NULL, 0);
4374}
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
4387{
4388 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
4389
4390 if (!vsi)
4391 return -EINVAL;
4392
4393
4394 if (vsi->info.pvid)
4395 return 0;
4396
4397 if (ice_vf_vlan_offload_ena(vf->driver_caps))
4398 return ice_vsi_manage_vlan_stripping(vsi, true);
4399 else
4400 return ice_vsi_manage_vlan_stripping(vsi, false);
4401}
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
4412{
4413 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
4414 s16 vf_id = le16_to_cpu(event->desc.retval);
4415 u16 msglen = event->msg_len;
4416 u8 *msg = event->msg_buf;
4417 struct ice_vf *vf = NULL;
4418 struct device *dev;
4419 int err = 0;
4420
4421
4422 if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
4423 return;
4424
4425 dev = ice_pf_to_dev(pf);
4426 if (ice_validate_vf_id(pf, vf_id)) {
4427 err = -EINVAL;
4428 goto error_handler;
4429 }
4430
4431 vf = &pf->vf[vf_id];
4432
4433
4434 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
4435 err = -EPERM;
4436 goto error_handler;
4437 }
4438
4439
4440 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4441 if (err) {
4442 if (err == VIRTCHNL_STATUS_ERR_PARAM)
4443 err = -EPERM;
4444 else
4445 err = -EINVAL;
4446 }
4447
4448 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
4449 ice_vc_send_msg_to_vf(vf, v_opcode,
4450 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
4451 0);
4452 return;
4453 }
4454
4455error_handler:
4456 if (err) {
4457 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
4458 NULL, 0);
4459 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
4460 vf_id, v_opcode, msglen, err);
4461 return;
4462 }
4463
4464 switch (v_opcode) {
4465 case VIRTCHNL_OP_VERSION:
4466 err = ice_vc_get_ver_msg(vf, msg);
4467 break;
4468 case VIRTCHNL_OP_GET_VF_RESOURCES:
4469 err = ice_vc_get_vf_res_msg(vf, msg);
4470 if (ice_vf_init_vlan_stripping(vf))
4471 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
4472 vf->vf_id);
4473 ice_vc_notify_vf_link_state(vf);
4474 break;
4475 case VIRTCHNL_OP_RESET_VF:
4476 ice_vc_reset_vf_msg(vf);
4477 break;
4478 case VIRTCHNL_OP_ADD_ETH_ADDR:
4479 err = ice_vc_add_mac_addr_msg(vf, msg);
4480 break;
4481 case VIRTCHNL_OP_DEL_ETH_ADDR:
4482 err = ice_vc_del_mac_addr_msg(vf, msg);
4483 break;
4484 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4485 err = ice_vc_cfg_qs_msg(vf, msg);
4486 break;
4487 case VIRTCHNL_OP_ENABLE_QUEUES:
4488 err = ice_vc_ena_qs_msg(vf, msg);
4489 ice_vc_notify_vf_link_state(vf);
4490 break;
4491 case VIRTCHNL_OP_DISABLE_QUEUES:
4492 err = ice_vc_dis_qs_msg(vf, msg);
4493 break;
4494 case VIRTCHNL_OP_REQUEST_QUEUES:
4495 err = ice_vc_request_qs_msg(vf, msg);
4496 break;
4497 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4498 err = ice_vc_cfg_irq_map_msg(vf, msg);
4499 break;
4500 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4501 err = ice_vc_config_rss_key(vf, msg);
4502 break;
4503 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4504 err = ice_vc_config_rss_lut(vf, msg);
4505 break;
4506 case VIRTCHNL_OP_GET_STATS:
4507 err = ice_vc_get_stats_msg(vf, msg);
4508 break;
4509 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4510 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
4511 break;
4512 case VIRTCHNL_OP_ADD_VLAN:
4513 err = ice_vc_add_vlan_msg(vf, msg);
4514 break;
4515 case VIRTCHNL_OP_DEL_VLAN:
4516 err = ice_vc_remove_vlan_msg(vf, msg);
4517 break;
4518 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4519 err = ice_vc_ena_vlan_stripping(vf);
4520 break;
4521 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4522 err = ice_vc_dis_vlan_stripping(vf);
4523 break;
4524 case VIRTCHNL_OP_ADD_FDIR_FILTER:
4525 err = ice_vc_add_fdir_fltr(vf, msg);
4526 break;
4527 case VIRTCHNL_OP_DEL_FDIR_FILTER:
4528 err = ice_vc_del_fdir_fltr(vf, msg);
4529 break;
4530 case VIRTCHNL_OP_ADD_RSS_CFG:
4531 err = ice_vc_handle_rss_cfg(vf, msg, true);
4532 break;
4533 case VIRTCHNL_OP_DEL_RSS_CFG:
4534 err = ice_vc_handle_rss_cfg(vf, msg, false);
4535 break;
4536 case VIRTCHNL_OP_UNKNOWN:
4537 default:
4538 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
4539 vf_id);
4540 err = ice_vc_send_msg_to_vf(vf, v_opcode,
4541 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
4542 NULL, 0);
4543 break;
4544 }
4545 if (err) {
4546
4547
4548
4549 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
4550 vf_id, v_opcode, err);
4551 }
4552}
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562int
4563ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
4564{
4565 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4566 struct ice_vf *vf;
4567
4568 if (ice_validate_vf_id(pf, vf_id))
4569 return -EINVAL;
4570
4571 vf = &pf->vf[vf_id];
4572
4573 if (ice_check_vf_init(pf, vf))
4574 return -EBUSY;
4575
4576 ivi->vf = vf_id;
4577 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
4578
4579
4580 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
4581 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
4582
4583 ivi->trusted = vf->trusted;
4584 ivi->spoofchk = vf->spoofchk;
4585 if (!vf->link_forced)
4586 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4587 else if (vf->link_up)
4588 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4589 else
4590 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4591 ivi->max_tx_rate = vf->tx_rate;
4592 ivi->min_tx_rate = 0;
4593 return 0;
4594}
4595
4596
4597
4598
4599
4600
4601
4602
4603static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
4604{
4605 struct ice_sw_recipe *mac_recipe_list =
4606 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
4607 struct ice_fltr_mgmt_list_entry *list_itr;
4608 struct list_head *rule_head;
4609 struct mutex *rule_lock;
4610
4611 rule_head = &mac_recipe_list->filt_rules;
4612 rule_lock = &mac_recipe_list->filt_rule_lock;
4613
4614 mutex_lock(rule_lock);
4615 list_for_each_entry(list_itr, rule_head, list_entry) {
4616 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4617
4618 if (ether_addr_equal(existing_mac, umac)) {
4619 mutex_unlock(rule_lock);
4620 return true;
4621 }
4622 }
4623
4624 mutex_unlock(rule_lock);
4625
4626 return false;
4627}
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4638{
4639 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4640 struct ice_vf *vf;
4641 int ret;
4642
4643 if (ice_validate_vf_id(pf, vf_id))
4644 return -EINVAL;
4645
4646 if (is_multicast_ether_addr(mac)) {
4647 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
4648 return -EINVAL;
4649 }
4650
4651 vf = &pf->vf[vf_id];
4652
4653 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
4654 ether_addr_equal(vf->hw_lan_addr.addr, mac))
4655 return 0;
4656
4657 ret = ice_check_vf_ready_for_cfg(vf);
4658 if (ret)
4659 return ret;
4660
4661 if (ice_unicast_mac_exists(pf, mac)) {
4662 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
4663 mac, vf_id, mac);
4664 return -EINVAL;
4665 }
4666
4667
4668
4669
4670 ether_addr_copy(vf->dev_lan_addr.addr, mac);
4671 ether_addr_copy(vf->hw_lan_addr.addr, mac);
4672 if (is_zero_ether_addr(mac)) {
4673
4674 vf->pf_set_mac = false;
4675 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
4676 vf->vf_id);
4677 } else {
4678
4679 vf->pf_set_mac = true;
4680 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
4681 mac, vf_id);
4682 }
4683
4684 ice_vc_reset_vf(vf);
4685 return 0;
4686}
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
4697{
4698 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4699 struct ice_vf *vf;
4700 int ret;
4701
4702 if (ice_validate_vf_id(pf, vf_id))
4703 return -EINVAL;
4704
4705 vf = &pf->vf[vf_id];
4706 ret = ice_check_vf_ready_for_cfg(vf);
4707 if (ret)
4708 return ret;
4709
4710
4711 if (trusted == vf->trusted)
4712 return 0;
4713
4714 vf->trusted = trusted;
4715 ice_vc_reset_vf(vf);
4716 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
4717 vf_id, trusted ? "" : "un");
4718
4719 return 0;
4720}
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
4731{
4732 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4733 struct ice_vf *vf;
4734 int ret;
4735
4736 if (ice_validate_vf_id(pf, vf_id))
4737 return -EINVAL;
4738
4739 vf = &pf->vf[vf_id];
4740 ret = ice_check_vf_ready_for_cfg(vf);
4741 if (ret)
4742 return ret;
4743
4744 switch (link_state) {
4745 case IFLA_VF_LINK_STATE_AUTO:
4746 vf->link_forced = false;
4747 break;
4748 case IFLA_VF_LINK_STATE_ENABLE:
4749 vf->link_forced = true;
4750 vf->link_up = true;
4751 break;
4752 case IFLA_VF_LINK_STATE_DISABLE:
4753 vf->link_forced = true;
4754 vf->link_up = false;
4755 break;
4756 default:
4757 return -EINVAL;
4758 }
4759
4760 ice_vc_notify_vf_link_state(vf);
4761
4762 return 0;
4763}
4764
4765
4766
4767
4768
4769
4770
4771int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4772 struct ifla_vf_stats *vf_stats)
4773{
4774 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4775 struct ice_eth_stats *stats;
4776 struct ice_vsi *vsi;
4777 struct ice_vf *vf;
4778 int ret;
4779
4780 if (ice_validate_vf_id(pf, vf_id))
4781 return -EINVAL;
4782
4783 vf = &pf->vf[vf_id];
4784 ret = ice_check_vf_ready_for_cfg(vf);
4785 if (ret)
4786 return ret;
4787
4788 vsi = ice_get_vf_vsi(vf);
4789 if (!vsi)
4790 return -EINVAL;
4791
4792 ice_update_eth_stats(vsi);
4793 stats = &vsi->eth_stats;
4794
4795 memset(vf_stats, 0, sizeof(*vf_stats));
4796
4797 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4798 stats->rx_multicast;
4799 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4800 stats->tx_multicast;
4801 vf_stats->rx_bytes = stats->rx_bytes;
4802 vf_stats->tx_bytes = stats->tx_bytes;
4803 vf_stats->broadcast = stats->rx_broadcast;
4804 vf_stats->multicast = stats->rx_multicast;
4805 vf_stats->rx_dropped = stats->rx_discards;
4806 vf_stats->tx_dropped = stats->tx_discards;
4807
4808 return 0;
4809}
4810
4811
4812
4813
4814
4815void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4816{
4817 struct ice_pf *pf = vf->pf;
4818 struct device *dev;
4819
4820 dev = ice_pf_to_dev(pf);
4821
4822 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4823 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4824 vf->dev_lan_addr.addr,
4825 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4826 ? "on" : "off");
4827}
4828
4829
4830
4831
4832
4833
4834
4835void ice_print_vfs_mdd_events(struct ice_pf *pf)
4836{
4837 struct device *dev = ice_pf_to_dev(pf);
4838 struct ice_hw *hw = &pf->hw;
4839 int i;
4840
4841
4842 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
4843 return;
4844
4845
4846 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4847 return;
4848
4849 pf->last_printed_mdd_jiffies = jiffies;
4850
4851 ice_for_each_vf(pf, i) {
4852 struct ice_vf *vf = &pf->vf[i];
4853
4854
4855 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4856 vf->mdd_rx_events.last_printed =
4857 vf->mdd_rx_events.count;
4858 ice_print_vf_rx_mdd_event(vf);
4859 }
4860
4861
4862 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4863 vf->mdd_tx_events.last_printed =
4864 vf->mdd_tx_events.count;
4865
4866 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4867 vf->mdd_tx_events.count, hw->pf_id, i,
4868 vf->dev_lan_addr.addr);
4869 }
4870 }
4871}
4872
4873
4874
4875
4876
4877
4878
4879
4880void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4881{
4882 u16 vf_id;
4883 int pos;
4884
4885 if (!pci_num_vf(pdev))
4886 return;
4887
4888 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4889 if (pos) {
4890 struct pci_dev *vfdev;
4891
4892 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4893 &vf_id);
4894 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4895 while (vfdev) {
4896 if (vfdev->is_virtfn && vfdev->physfn == pdev)
4897 pci_restore_msi_state(vfdev);
4898 vfdev = pci_get_device(pdev->vendor, vf_id,
4899 vfdev);
4900 }
4901 }
4902}
4903
4904
4905
4906
4907
4908
4909
4910
4911bool
4912ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
4913 u16 num_msg_proc, u16 num_msg_pending)
4914{
4915 s16 vf_id = le16_to_cpu(event->desc.retval);
4916 struct device *dev = ice_pf_to_dev(pf);
4917 struct ice_mbx_data mbxdata;
4918 enum ice_status status;
4919 bool malvf = false;
4920 struct ice_vf *vf;
4921
4922 if (ice_validate_vf_id(pf, vf_id))
4923 return false;
4924
4925 vf = &pf->vf[vf_id];
4926
4927 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
4928 return false;
4929
4930 mbxdata.num_msg_proc = num_msg_proc;
4931 mbxdata.num_pending_arq = num_msg_pending;
4932 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
4933#define ICE_MBX_OVERFLOW_WATERMARK 64
4934 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
4935
4936
4937 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
4938 if (status)
4939 return false;
4940
4941 if (malvf) {
4942 bool report_vf = false;
4943
4944
4945
4946
4947 status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
4948 ICE_MAX_VF_COUNT, vf_id,
4949 &report_vf);
4950 if (status)
4951 dev_dbg(dev, "Error reporting malicious VF\n");
4952
4953 if (report_vf) {
4954 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
4955
4956 if (pf_vsi)
4957 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
4958 &vf->dev_lan_addr.addr[0],
4959 pf_vsi->netdev->dev_addr);
4960 }
4961
4962 return true;
4963 }
4964
4965
4966
4967
4968 return false;
4969}
4970