1
2
3
4
5
6
7
8#include <time.h>
9#include <net/if.h>
10
11#include <rte_mbuf.h>
12#include <ethdev_driver.h>
13#include <rte_malloc.h>
14#include <rte_memcpy.h>
15#include <rte_string_fns.h>
16#include <rte_cycles.h>
17#include <rte_kvargs.h>
18#include <rte_dev.h>
19
20#include <dpaa2_pmd_logs.h>
21#include <dpaa2_hw_pvt.h>
22#include <dpaa2_hw_mempool.h>
23
24#include "../dpaa2_ethdev.h"
25
26int
27dpaa2_distset_to_dpkg_profile_cfg(
28 uint64_t req_dist_set,
29 struct dpkg_profile_cfg *kg_cfg);
30
31int
32rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
33 uint16_t offset,
34 uint8_t size)
35{
36 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 struct fsl_mc_io *dpni = priv->hw;
39 struct dpni_rx_tc_dist_cfg tc_cfg;
40 struct dpkg_profile_cfg kg_cfg;
41 void *p_params;
42 int ret, tc_index = 0;
43
44 if (!rte_eth_dev_is_valid_port(port_id)) {
45 DPAA2_PMD_WARN("Invalid port id %u", port_id);
46 return -EINVAL;
47 }
48
49 if (strcmp(eth_dev->device->driver->name,
50 RTE_STR(NET_DPAA2_PMD_DRIVER_NAME))) {
51 DPAA2_PMD_WARN("Not a valid dpaa2 port");
52 return -EINVAL;
53 }
54
55 p_params = rte_zmalloc(
56 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
57 if (!p_params) {
58 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
59 return -ENOMEM;
60 }
61
62 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
63 kg_cfg.extracts[0].extract.from_data.offset = offset;
64 kg_cfg.extracts[0].extract.from_data.size = size;
65 kg_cfg.extracts[0].num_of_byte_masks = 0;
66 kg_cfg.num_extracts = 1;
67
68 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
69 if (ret) {
70 DPAA2_PMD_ERR("Unable to prepare extract parameters");
71 rte_free(p_params);
72 return ret;
73 }
74
75 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
76 tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
77 tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
78 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
79
80 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
81 &tc_cfg);
82 rte_free(p_params);
83 if (ret) {
84 DPAA2_PMD_ERR(
85 "Setting distribution for Rx failed with err: %d",
86 ret);
87 return ret;
88 }
89
90 return 0;
91}
92
93int
94dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
95 uint64_t req_dist_set, int tc_index)
96{
97 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
98 struct fsl_mc_io *dpni = priv->hw;
99 struct dpni_rx_dist_cfg tc_cfg;
100 struct dpkg_profile_cfg kg_cfg;
101 void *p_params;
102 int ret, tc_dist_queues;
103
104
105
106
107
108 tc_dist_queues = eth_dev->data->nb_rx_queues -
109 tc_index * priv->dist_queues;
110 if (tc_dist_queues <= 0) {
111 DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
112 return 0;
113 }
114
115 if (tc_dist_queues > priv->dist_queues)
116 tc_dist_queues = priv->dist_queues;
117
118 p_params = rte_malloc(
119 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
120 if (!p_params) {
121 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
122 return -ENOMEM;
123 }
124
125 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
126 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
127
128 ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
129 if (ret) {
130 DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
131 req_dist_set);
132 rte_free(p_params);
133 return ret;
134 }
135
136 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
137 tc_cfg.dist_size = tc_dist_queues;
138 tc_cfg.enable = true;
139 tc_cfg.tc = tc_index;
140
141 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
142 if (ret) {
143 DPAA2_PMD_ERR("Unable to prepare extract parameters");
144 rte_free(p_params);
145 return ret;
146 }
147
148 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
149 rte_free(p_params);
150 if (ret) {
151 DPAA2_PMD_ERR(
152 "Setting distribution for Rx failed with err: %d",
153 ret);
154 return ret;
155 }
156
157 return 0;
158}
159
160int dpaa2_remove_flow_dist(
161 struct rte_eth_dev *eth_dev,
162 uint8_t tc_index)
163{
164 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
165 struct fsl_mc_io *dpni = priv->hw;
166 struct dpni_rx_dist_cfg tc_cfg;
167 struct dpkg_profile_cfg kg_cfg;
168 void *p_params;
169 int ret;
170
171 p_params = rte_malloc(
172 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
173 if (!p_params) {
174 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
175 return -ENOMEM;
176 }
177
178 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
179 tc_cfg.dist_size = 0;
180 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
181 tc_cfg.enable = true;
182 tc_cfg.tc = tc_index;
183
184 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
185 kg_cfg.num_extracts = 0;
186 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
187 if (ret) {
188 DPAA2_PMD_ERR("Unable to prepare extract parameters");
189 rte_free(p_params);
190 return ret;
191 }
192
193 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
194 &tc_cfg);
195 rte_free(p_params);
196 if (ret)
197 DPAA2_PMD_ERR(
198 "Setting distribution for Rx failed with err: %d",
199 ret);
200 return ret;
201}
202
203int
204dpaa2_distset_to_dpkg_profile_cfg(
205 uint64_t req_dist_set,
206 struct dpkg_profile_cfg *kg_cfg)
207{
208 uint32_t loop = 0, i = 0;
209 uint64_t dist_field = 0;
210 int l2_configured = 0, l3_configured = 0;
211 int l4_configured = 0, sctp_configured = 0;
212 int mpls_configured = 0;
213
214 memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
215 while (req_dist_set) {
216 if (req_dist_set % 2 != 0) {
217 dist_field = 1ULL << loop;
218 switch (dist_field) {
219 case ETH_RSS_L2_PAYLOAD:
220
221 if (l2_configured)
222 break;
223 l2_configured = 1;
224
225 kg_cfg->extracts[i].extract.from_hdr.prot =
226 NET_PROT_ETH;
227 kg_cfg->extracts[i].extract.from_hdr.field =
228 NH_FLD_ETH_TYPE;
229 kg_cfg->extracts[i].type =
230 DPKG_EXTRACT_FROM_HDR;
231 kg_cfg->extracts[i].extract.from_hdr.type =
232 DPKG_FULL_FIELD;
233 i++;
234 break;
235
236 case ETH_RSS_MPLS:
237
238 if (mpls_configured)
239 break;
240 mpls_configured = 1;
241
242 kg_cfg->extracts[i].extract.from_hdr.prot =
243 NET_PROT_MPLS;
244 kg_cfg->extracts[i].extract.from_hdr.field =
245 NH_FLD_MPLS_MPLSL_1;
246 kg_cfg->extracts[i].type =
247 DPKG_EXTRACT_FROM_HDR;
248 kg_cfg->extracts[i].extract.from_hdr.type =
249 DPKG_FULL_FIELD;
250 i++;
251
252 kg_cfg->extracts[i].extract.from_hdr.prot =
253 NET_PROT_MPLS;
254 kg_cfg->extracts[i].extract.from_hdr.field =
255 NH_FLD_MPLS_MPLSL_2;
256 kg_cfg->extracts[i].type =
257 DPKG_EXTRACT_FROM_HDR;
258 kg_cfg->extracts[i].extract.from_hdr.type =
259 DPKG_FULL_FIELD;
260 i++;
261
262 kg_cfg->extracts[i].extract.from_hdr.prot =
263 NET_PROT_MPLS;
264 kg_cfg->extracts[i].extract.from_hdr.field =
265 NH_FLD_MPLS_MPLSL_N;
266 kg_cfg->extracts[i].type =
267 DPKG_EXTRACT_FROM_HDR;
268 kg_cfg->extracts[i].extract.from_hdr.type =
269 DPKG_FULL_FIELD;
270 i++;
271 break;
272
273 case ETH_RSS_IPV4:
274 case ETH_RSS_FRAG_IPV4:
275 case ETH_RSS_NONFRAG_IPV4_OTHER:
276 case ETH_RSS_IPV6:
277 case ETH_RSS_FRAG_IPV6:
278 case ETH_RSS_NONFRAG_IPV6_OTHER:
279 case ETH_RSS_IPV6_EX:
280
281 if (l3_configured)
282 break;
283 l3_configured = 1;
284
285 kg_cfg->extracts[i].extract.from_hdr.prot =
286 NET_PROT_IP;
287 kg_cfg->extracts[i].extract.from_hdr.field =
288 NH_FLD_IP_SRC;
289 kg_cfg->extracts[i].type =
290 DPKG_EXTRACT_FROM_HDR;
291 kg_cfg->extracts[i].extract.from_hdr.type =
292 DPKG_FULL_FIELD;
293 i++;
294
295 kg_cfg->extracts[i].extract.from_hdr.prot =
296 NET_PROT_IP;
297 kg_cfg->extracts[i].extract.from_hdr.field =
298 NH_FLD_IP_DST;
299 kg_cfg->extracts[i].type =
300 DPKG_EXTRACT_FROM_HDR;
301 kg_cfg->extracts[i].extract.from_hdr.type =
302 DPKG_FULL_FIELD;
303 i++;
304
305 kg_cfg->extracts[i].extract.from_hdr.prot =
306 NET_PROT_IP;
307 kg_cfg->extracts[i].extract.from_hdr.field =
308 NH_FLD_IP_PROTO;
309 kg_cfg->extracts[i].type =
310 DPKG_EXTRACT_FROM_HDR;
311 kg_cfg->extracts[i].extract.from_hdr.type =
312 DPKG_FULL_FIELD;
313 kg_cfg->num_extracts++;
314 i++;
315 break;
316
317 case ETH_RSS_NONFRAG_IPV4_TCP:
318 case ETH_RSS_NONFRAG_IPV6_TCP:
319 case ETH_RSS_NONFRAG_IPV4_UDP:
320 case ETH_RSS_NONFRAG_IPV6_UDP:
321 case ETH_RSS_IPV6_TCP_EX:
322 case ETH_RSS_IPV6_UDP_EX:
323
324 if (l4_configured)
325 break;
326 l4_configured = 1;
327
328 kg_cfg->extracts[i].extract.from_hdr.prot =
329 NET_PROT_TCP;
330 kg_cfg->extracts[i].extract.from_hdr.field =
331 NH_FLD_TCP_PORT_SRC;
332 kg_cfg->extracts[i].type =
333 DPKG_EXTRACT_FROM_HDR;
334 kg_cfg->extracts[i].extract.from_hdr.type =
335 DPKG_FULL_FIELD;
336 i++;
337
338 kg_cfg->extracts[i].extract.from_hdr.prot =
339 NET_PROT_TCP;
340 kg_cfg->extracts[i].extract.from_hdr.field =
341 NH_FLD_TCP_PORT_SRC;
342 kg_cfg->extracts[i].type =
343 DPKG_EXTRACT_FROM_HDR;
344 kg_cfg->extracts[i].extract.from_hdr.type =
345 DPKG_FULL_FIELD;
346 i++;
347 break;
348
349 case ETH_RSS_NONFRAG_IPV4_SCTP:
350 case ETH_RSS_NONFRAG_IPV6_SCTP:
351
352 if (sctp_configured)
353 break;
354 sctp_configured = 1;
355
356 kg_cfg->extracts[i].extract.from_hdr.prot =
357 NET_PROT_SCTP;
358 kg_cfg->extracts[i].extract.from_hdr.field =
359 NH_FLD_SCTP_PORT_SRC;
360 kg_cfg->extracts[i].type =
361 DPKG_EXTRACT_FROM_HDR;
362 kg_cfg->extracts[i].extract.from_hdr.type =
363 DPKG_FULL_FIELD;
364 i++;
365
366 kg_cfg->extracts[i].extract.from_hdr.prot =
367 NET_PROT_SCTP;
368 kg_cfg->extracts[i].extract.from_hdr.field =
369 NH_FLD_SCTP_PORT_DST;
370 kg_cfg->extracts[i].type =
371 DPKG_EXTRACT_FROM_HDR;
372 kg_cfg->extracts[i].extract.from_hdr.type =
373 DPKG_FULL_FIELD;
374 i++;
375 break;
376
377 default:
378 DPAA2_PMD_WARN(
379 "unsupported flow dist option 0x%" PRIx64,
380 dist_field);
381 return -EINVAL;
382 }
383 }
384 req_dist_set = req_dist_set >> 1;
385 loop++;
386 }
387 kg_cfg->num_extracts = i;
388 return 0;
389}
390
391int
392dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
393 void *blist)
394{
395
396
397
398 int32_t retcode;
399 struct fsl_mc_io *dpni = priv->hw;
400 struct dpni_pools_cfg bpool_cfg;
401 struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
402 struct dpni_buffer_layout layout;
403 int tot_size;
404
405
406
407
408
409
410 tot_size = RTE_PKTMBUF_HEADROOM;
411 tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
412
413 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
414 layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
415 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
416 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
417 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
418 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
419 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
420
421 layout.pass_timestamp = true;
422 layout.pass_frame_status = 1;
423 layout.private_data_size = DPAA2_FD_PTA_SIZE;
424 layout.pass_parser_result = 1;
425 layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
426 layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
427 DPAA2_MBUF_HW_ANNOTATION;
428 retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
429 DPNI_QUEUE_RX, &layout);
430 if (retcode) {
431 DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
432 retcode);
433 return retcode;
434 }
435
436
437 memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
438 bpool_cfg.num_dpbp = 1;
439 bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
440 bpool_cfg.pools[0].backup_pool = 0;
441 bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
442 DPAA2_PACKET_LAYOUT_ALIGN);
443 bpool_cfg.pools[0].priority_mask = 0;
444
445 retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
446 if (retcode != 0) {
447 DPAA2_PMD_ERR("Error configuring buffer pool on interface."
448 " bpid = %d error code = %d",
449 bpool_cfg.pools[0].dpbp_id, retcode);
450 return retcode;
451 }
452
453 priv->bp_list = bp_list;
454 return 0;
455}
456