dpdk/examples/flow_classify/flow_classify.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Intel Corporation
   3 */
   4
   5#include <stdint.h>
   6#include <inttypes.h>
   7#include <getopt.h>
   8
   9#include <rte_eal.h>
  10#include <rte_ethdev.h>
  11#include <rte_cycles.h>
  12#include <rte_lcore.h>
  13#include <rte_mbuf.h>
  14#include <rte_flow.h>
  15#include <rte_flow_classify.h>
  16#include <rte_table_acl.h>
  17
  18#define RX_RING_SIZE 1024
  19#define TX_RING_SIZE 1024
  20
  21#define NUM_MBUFS 8191
  22#define MBUF_CACHE_SIZE 250
  23#define BURST_SIZE 32
  24
  25#define MAX_NUM_CLASSIFY 30
  26#define FLOW_CLASSIFY_MAX_RULE_NUM 91
  27#define FLOW_CLASSIFY_MAX_PRIORITY 8
  28#define FLOW_CLASSIFIER_NAME_SIZE 64
  29
  30#define COMMENT_LEAD_CHAR       ('#')
  31#define OPTION_RULE_IPV4        "rule_ipv4"
  32#define RTE_LOGTYPE_FLOW_CLASSIFY       RTE_LOGTYPE_USER3
  33#define flow_classify_log(format, ...) \
  34                RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
  35
  36#define uint32_t_to_char(ip, a, b, c, d) do {\
  37                *a = (unsigned char)(ip >> 24 & 0xff);\
  38                *b = (unsigned char)(ip >> 16 & 0xff);\
  39                *c = (unsigned char)(ip >> 8 & 0xff);\
  40                *d = (unsigned char)(ip & 0xff);\
  41        } while (0)
  42
  43enum {
  44        CB_FLD_SRC_ADDR,
  45        CB_FLD_DST_ADDR,
  46        CB_FLD_SRC_PORT,
  47        CB_FLD_SRC_PORT_DLM,
  48        CB_FLD_SRC_PORT_MASK,
  49        CB_FLD_DST_PORT,
  50        CB_FLD_DST_PORT_DLM,
  51        CB_FLD_DST_PORT_MASK,
  52        CB_FLD_PROTO,
  53        CB_FLD_PRIORITY,
  54        CB_FLD_NUM,
  55};
  56
  57static struct{
  58        const char *rule_ipv4_name;
  59} parm_config;
  60const char cb_port_delim[] = ":";
  61
  62static const struct rte_eth_conf port_conf_default = {
  63        .rxmode = {
  64                .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
  65        },
  66};
  67
  68struct flow_classifier {
  69        struct rte_flow_classifier *cls;
  70};
  71
  72struct flow_classifier_acl {
  73        struct flow_classifier cls;
  74} __rte_cache_aligned;
  75
  76/* ACL field definitions for IPv4 5 tuple rule */
  77
  78enum {
  79        PROTO_FIELD_IPV4,
  80        SRC_FIELD_IPV4,
  81        DST_FIELD_IPV4,
  82        SRCP_FIELD_IPV4,
  83        DSTP_FIELD_IPV4,
  84        NUM_FIELDS_IPV4
  85};
  86
  87enum {
  88        PROTO_INPUT_IPV4,
  89        SRC_INPUT_IPV4,
  90        DST_INPUT_IPV4,
  91        SRCP_DESTP_INPUT_IPV4
  92};
  93
  94static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
  95        /* first input field - always one byte long. */
  96        {
  97                .type = RTE_ACL_FIELD_TYPE_BITMASK,
  98                .size = sizeof(uint8_t),
  99                .field_index = PROTO_FIELD_IPV4,
 100                .input_index = PROTO_INPUT_IPV4,
 101                .offset = sizeof(struct rte_ether_hdr) +
 102                        offsetof(struct rte_ipv4_hdr, next_proto_id),
 103        },
 104        /* next input field (IPv4 source address) - 4 consecutive bytes. */
 105        {
 106                /* rte_flow uses a bit mask for IPv4 addresses */
 107                .type = RTE_ACL_FIELD_TYPE_BITMASK,
 108                .size = sizeof(uint32_t),
 109                .field_index = SRC_FIELD_IPV4,
 110                .input_index = SRC_INPUT_IPV4,
 111                .offset = sizeof(struct rte_ether_hdr) +
 112                        offsetof(struct rte_ipv4_hdr, src_addr),
 113        },
 114        /* next input field (IPv4 destination address) - 4 consecutive bytes. */
 115        {
 116                /* rte_flow uses a bit mask for IPv4 addresses */
 117                .type = RTE_ACL_FIELD_TYPE_BITMASK,
 118                .size = sizeof(uint32_t),
 119                .field_index = DST_FIELD_IPV4,
 120                .input_index = DST_INPUT_IPV4,
 121                .offset = sizeof(struct rte_ether_hdr) +
 122                        offsetof(struct rte_ipv4_hdr, dst_addr),
 123        },
 124        /*
 125         * Next 2 fields (src & dst ports) form 4 consecutive bytes.
 126         * They share the same input index.
 127         */
 128        {
 129                /* rte_flow uses a bit mask for protocol ports */
 130                .type = RTE_ACL_FIELD_TYPE_BITMASK,
 131                .size = sizeof(uint16_t),
 132                .field_index = SRCP_FIELD_IPV4,
 133                .input_index = SRCP_DESTP_INPUT_IPV4,
 134                .offset = sizeof(struct rte_ether_hdr) +
 135                        sizeof(struct rte_ipv4_hdr) +
 136                        offsetof(struct rte_tcp_hdr, src_port),
 137        },
 138        {
 139                /* rte_flow uses a bit mask for protocol ports */
 140                .type = RTE_ACL_FIELD_TYPE_BITMASK,
 141                .size = sizeof(uint16_t),
 142                .field_index = DSTP_FIELD_IPV4,
 143                .input_index = SRCP_DESTP_INPUT_IPV4,
 144                .offset = sizeof(struct rte_ether_hdr) +
 145                        sizeof(struct rte_ipv4_hdr) +
 146                        offsetof(struct rte_tcp_hdr, dst_port),
 147        },
 148};
 149
 150/* flow classify data */
 151static int num_classify_rules;
 152static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
 153static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
 154static struct rte_flow_classify_stats classify_stats = {
 155                .stats = (void **)&ntuple_stats
 156};
 157
 158/* parameters for rte_flow_classify_validate and
 159 * rte_flow_classify_table_entry_add functions
 160 */
 161
 162static struct rte_flow_item  eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
 163        0, 0, 0 };
 164static struct rte_flow_item  end_item = { RTE_FLOW_ITEM_TYPE_END,
 165        0, 0, 0 };
 166
 167/* sample actions:
 168 * "actions count / end"
 169 */
 170struct rte_flow_query_count count = {
 171        .reset = 1,
 172        .hits_set = 1,
 173        .bytes_set = 1,
 174        .hits = 0,
 175        .bytes = 0,
 176};
 177static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
 178        &count};
 179static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
 180static struct rte_flow_action actions[2];
 181
 182/* sample attributes */
 183static struct rte_flow_attr attr;
 184
 185/* flow_classify.c: * Based on DPDK skeleton forwarding example. */
 186
 187/*
 188 * Initializes a given port using global settings and with the RX buffers
 189 * coming from the mbuf_pool passed as a parameter.
 190 */
 191static inline int
 192port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 193{
 194        struct rte_eth_conf port_conf = port_conf_default;
 195        struct rte_ether_addr addr;
 196        const uint16_t rx_rings = 1, tx_rings = 1;
 197        int retval;
 198        uint16_t q;
 199        struct rte_eth_dev_info dev_info;
 200        struct rte_eth_txconf txconf;
 201
 202        if (!rte_eth_dev_is_valid_port(port))
 203                return -1;
 204
 205        retval = rte_eth_dev_info_get(port, &dev_info);
 206        if (retval != 0) {
 207                printf("Error during getting device (port %u) info: %s\n",
 208                                port, strerror(-retval));
 209                return retval;
 210        }
 211
 212        if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
 213                port_conf.txmode.offloads |=
 214                        DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 215
 216        /* Configure the Ethernet device. */
 217        retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
 218        if (retval != 0)
 219                return retval;
 220
 221        /* Allocate and set up 1 RX queue per Ethernet port. */
 222        for (q = 0; q < rx_rings; q++) {
 223                retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
 224                                rte_eth_dev_socket_id(port), NULL, mbuf_pool);
 225                if (retval < 0)
 226                        return retval;
 227        }
 228
 229        txconf = dev_info.default_txconf;
 230        txconf.offloads = port_conf.txmode.offloads;
 231        /* Allocate and set up 1 TX queue per Ethernet port. */
 232        for (q = 0; q < tx_rings; q++) {
 233                retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
 234                                rte_eth_dev_socket_id(port), &txconf);
 235                if (retval < 0)
 236                        return retval;
 237        }
 238
 239        /* Start the Ethernet port. */
 240        retval = rte_eth_dev_start(port);
 241        if (retval < 0)
 242                return retval;
 243
 244        /* Display the port MAC address. */
 245        retval = rte_eth_macaddr_get(port, &addr);
 246        if (retval != 0)
 247                return retval;
 248
 249        printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
 250                           " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
 251                        port,
 252                        addr.addr_bytes[0], addr.addr_bytes[1],
 253                        addr.addr_bytes[2], addr.addr_bytes[3],
 254                        addr.addr_bytes[4], addr.addr_bytes[5]);
 255
 256        /* Enable RX in promiscuous mode for the Ethernet device. */
 257        retval = rte_eth_promiscuous_enable(port);
 258        if (retval != 0)
 259                return retval;
 260
 261        return 0;
 262}
 263
 264/*
 265 * The lcore main. This is the main thread that does the work, reading from
 266 * an input port classifying the packets and writing to an output port.
 267 */
 268static __rte_noreturn void
 269lcore_main(struct flow_classifier *cls_app)
 270{
 271        uint16_t port;
 272        int ret;
 273        int i = 0;
 274
 275        ret = rte_flow_classify_table_entry_delete(cls_app->cls,
 276                        rules[7]);
 277        if (ret)
 278                printf("table_entry_delete failed [7] %d\n\n", ret);
 279        else
 280                printf("table_entry_delete succeeded [7]\n\n");
 281
 282        /*
 283         * Check that the port is on the same NUMA node as the polling thread
 284         * for best performance.
 285         */
 286        RTE_ETH_FOREACH_DEV(port)
 287                if (rte_eth_dev_socket_id(port) >= 0 &&
 288                        rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
 289                        printf("\n\n");
 290                        printf("WARNING: port %u is on remote NUMA node\n",
 291                               port);
 292                        printf("to polling thread.\n");
 293                        printf("Performance will not be optimal.\n");
 294                }
 295        printf("\nCore %u forwarding packets. ", rte_lcore_id());
 296        printf("[Ctrl+C to quit]\n");
 297
 298        /* Run until the application is quit or killed. */
 299        for (;;) {
 300                /*
 301                 * Receive packets on a port, classify them and forward them
 302                 * on the paired port.
 303                 * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
 304                 */
 305                RTE_ETH_FOREACH_DEV(port) {
 306                        /* Get burst of RX packets, from first port of pair. */
 307                        struct rte_mbuf *bufs[BURST_SIZE];
 308                        const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
 309                                        bufs, BURST_SIZE);
 310
 311                        if (unlikely(nb_rx == 0))
 312                                continue;
 313
 314                        for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
 315                                if (rules[i]) {
 316                                        ret = rte_flow_classifier_query(
 317                                                cls_app->cls,
 318                                                bufs, nb_rx, rules[i],
 319                                                &classify_stats);
 320                                        if (ret)
 321                                                printf(
 322                                                        "rule [%d] query failed ret [%d]\n\n",
 323                                                        i, ret);
 324                                        else {
 325                                                printf(
 326                                                "rule[%d] count=%"PRIu64"\n",
 327                                                i, ntuple_stats.counter1);
 328
 329                                                printf("proto = %d\n",
 330                                                ntuple_stats.ipv4_5tuple.proto);
 331                                        }
 332                                }
 333                        }
 334
 335                        /* Send burst of TX packets, to second port of pair. */
 336                        const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
 337                                        bufs, nb_rx);
 338
 339                        /* Free any unsent packets. */
 340                        if (unlikely(nb_tx < nb_rx)) {
 341                                uint16_t buf;
 342
 343                                for (buf = nb_tx; buf < nb_rx; buf++)
 344                                        rte_pktmbuf_free(bufs[buf]);
 345                        }
 346                }
 347        }
 348}
 349
 350/*
 351 * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
 352 * Expected format:
 353 * <src_ipv4_addr>'/'<masklen> <space> \
 354 * <dst_ipv4_addr>'/'<masklen> <space> \
 355 * <src_port> <space> ":" <src_port_mask> <space> \
 356 * <dst_port> <space> ":" <dst_port_mask> <space> \
 357 * <proto>'/'<proto_mask> <space> \
 358 * <priority>
 359 */
 360
 361static int
 362get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
 363                char dlm)
 364{
 365        unsigned long val;
 366        char *end;
 367
 368        errno = 0;
 369        val = strtoul(*in, &end, base);
 370        if (errno != 0 || end[0] != dlm || val > lim)
 371                return -EINVAL;
 372        *fd = (uint32_t)val;
 373        *in = end + 1;
 374        return 0;
 375}
 376
 377static int
 378parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
 379{
 380        uint32_t a, b, c, d, m;
 381
 382        if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
 383                return -EINVAL;
 384        if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
 385                return -EINVAL;
 386        if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
 387                return -EINVAL;
 388        if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
 389                return -EINVAL;
 390        if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
 391                return -EINVAL;
 392
 393        addr[0] = RTE_IPV4(a, b, c, d);
 394        mask_len[0] = m;
 395        return 0;
 396}
 397
 398static int
 399parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
 400{
 401        int i, ret;
 402        char *s, *sp, *in[CB_FLD_NUM];
 403        static const char *dlm = " \t\n";
 404        int dim = CB_FLD_NUM;
 405        uint32_t temp;
 406
 407        s = str;
 408        for (i = 0; i != dim; i++, s = NULL) {
 409                in[i] = strtok_r(s, dlm, &sp);
 410                if (in[i] == NULL)
 411                        return -EINVAL;
 412        }
 413
 414        ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
 415                        &ntuple_filter->src_ip,
 416                        &ntuple_filter->src_ip_mask);
 417        if (ret != 0) {
 418                flow_classify_log("failed to read source address/mask: %s\n",
 419                        in[CB_FLD_SRC_ADDR]);
 420                return ret;
 421        }
 422
 423        ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
 424                        &ntuple_filter->dst_ip,
 425                        &ntuple_filter->dst_ip_mask);
 426        if (ret != 0) {
 427                flow_classify_log("failed to read source address/mask: %s\n",
 428                        in[CB_FLD_DST_ADDR]);
 429                return ret;
 430        }
 431
 432        if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
 433                return -EINVAL;
 434        ntuple_filter->src_port = (uint16_t)temp;
 435
 436        if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
 437                        sizeof(cb_port_delim)) != 0)
 438                return -EINVAL;
 439
 440        if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
 441                return -EINVAL;
 442        ntuple_filter->src_port_mask = (uint16_t)temp;
 443
 444        if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
 445                return -EINVAL;
 446        ntuple_filter->dst_port = (uint16_t)temp;
 447
 448        if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
 449                        sizeof(cb_port_delim)) != 0)
 450                return -EINVAL;
 451
 452        if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
 453                return -EINVAL;
 454        ntuple_filter->dst_port_mask = (uint16_t)temp;
 455
 456        if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
 457                return -EINVAL;
 458        ntuple_filter->proto = (uint8_t)temp;
 459
 460        if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
 461                return -EINVAL;
 462        ntuple_filter->proto_mask = (uint8_t)temp;
 463
 464        if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
 465                return -EINVAL;
 466        ntuple_filter->priority = (uint16_t)temp;
 467        if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
 468                ret = -EINVAL;
 469
 470        return ret;
 471}
 472
 473/* Bypass comment and empty lines */
 474static inline int
 475is_bypass_line(char *buff)
 476{
 477        int i = 0;
 478
 479        /* comment line */
 480        if (buff[0] == COMMENT_LEAD_CHAR)
 481                return 1;
 482        /* empty line */
 483        while (buff[i] != '\0') {
 484                if (!isspace(buff[i]))
 485                        return 0;
 486                i++;
 487        }
 488        return 1;
 489}
 490
 491static uint32_t
 492convert_depth_to_bitmask(uint32_t depth_val)
 493{
 494        uint32_t bitmask = 0;
 495        int i, j;
 496
 497        for (i = depth_val, j = 0; i > 0; i--, j++)
 498                bitmask |= (1 << (31 - j));
 499        return bitmask;
 500}
 501
 502static int
 503add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
 504                struct flow_classifier *cls_app)
 505{
 506        int ret = -1;
 507        int key_found;
 508        struct rte_flow_error error;
 509        struct rte_flow_item_ipv4 ipv4_spec;
 510        struct rte_flow_item_ipv4 ipv4_mask;
 511        struct rte_flow_item ipv4_udp_item;
 512        struct rte_flow_item ipv4_tcp_item;
 513        struct rte_flow_item ipv4_sctp_item;
 514        struct rte_flow_item_udp udp_spec;
 515        struct rte_flow_item_udp udp_mask;
 516        struct rte_flow_item udp_item;
 517        struct rte_flow_item_tcp tcp_spec;
 518        struct rte_flow_item_tcp tcp_mask;
 519        struct rte_flow_item tcp_item;
 520        struct rte_flow_item_sctp sctp_spec;
 521        struct rte_flow_item_sctp sctp_mask;
 522        struct rte_flow_item sctp_item;
 523        struct rte_flow_item pattern_ipv4_5tuple[4];
 524        struct rte_flow_classify_rule *rule;
 525        uint8_t ipv4_proto;
 526
 527        if (num_classify_rules >= MAX_NUM_CLASSIFY) {
 528                printf(
 529                        "\nINFO:  classify rule capacity %d reached\n",
 530                        num_classify_rules);
 531                return ret;
 532        }
 533
 534        /* set up parameters for validate and add */
 535        memset(&ipv4_spec, 0, sizeof(ipv4_spec));
 536        ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
 537        ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
 538        ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
 539        ipv4_proto = ipv4_spec.hdr.next_proto_id;
 540
 541        memset(&ipv4_mask, 0, sizeof(ipv4_mask));
 542        ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
 543        ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
 544        ipv4_mask.hdr.src_addr =
 545                convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
 546        ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
 547        ipv4_mask.hdr.dst_addr =
 548                convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
 549
 550        switch (ipv4_proto) {
 551        case IPPROTO_UDP:
 552                ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
 553                ipv4_udp_item.spec = &ipv4_spec;
 554                ipv4_udp_item.mask = &ipv4_mask;
 555                ipv4_udp_item.last = NULL;
 556
 557                udp_spec.hdr.src_port = ntuple_filter->src_port;
 558                udp_spec.hdr.dst_port = ntuple_filter->dst_port;
 559                udp_spec.hdr.dgram_len = 0;
 560                udp_spec.hdr.dgram_cksum = 0;
 561
 562                udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
 563                udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
 564                udp_mask.hdr.dgram_len = 0;
 565                udp_mask.hdr.dgram_cksum = 0;
 566
 567                udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
 568                udp_item.spec = &udp_spec;
 569                udp_item.mask = &udp_mask;
 570                udp_item.last = NULL;
 571
 572                attr.priority = ntuple_filter->priority;
 573                pattern_ipv4_5tuple[1] = ipv4_udp_item;
 574                pattern_ipv4_5tuple[2] = udp_item;
 575                break;
 576        case IPPROTO_TCP:
 577                ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
 578                ipv4_tcp_item.spec = &ipv4_spec;
 579                ipv4_tcp_item.mask = &ipv4_mask;
 580                ipv4_tcp_item.last = NULL;
 581
 582                memset(&tcp_spec, 0, sizeof(tcp_spec));
 583                tcp_spec.hdr.src_port = ntuple_filter->src_port;
 584                tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
 585
 586                memset(&tcp_mask, 0, sizeof(tcp_mask));
 587                tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
 588                tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
 589
 590                tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
 591                tcp_item.spec = &tcp_spec;
 592                tcp_item.mask = &tcp_mask;
 593                tcp_item.last = NULL;
 594
 595                attr.priority = ntuple_filter->priority;
 596                pattern_ipv4_5tuple[1] = ipv4_tcp_item;
 597                pattern_ipv4_5tuple[2] = tcp_item;
 598                break;
 599        case IPPROTO_SCTP:
 600                ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
 601                ipv4_sctp_item.spec = &ipv4_spec;
 602                ipv4_sctp_item.mask = &ipv4_mask;
 603                ipv4_sctp_item.last = NULL;
 604
 605                sctp_spec.hdr.src_port = ntuple_filter->src_port;
 606                sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
 607                sctp_spec.hdr.cksum = 0;
 608                sctp_spec.hdr.tag = 0;
 609
 610                sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
 611                sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
 612                sctp_mask.hdr.cksum = 0;
 613                sctp_mask.hdr.tag = 0;
 614
 615                sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
 616                sctp_item.spec = &sctp_spec;
 617                sctp_item.mask = &sctp_mask;
 618                sctp_item.last = NULL;
 619
 620                attr.priority = ntuple_filter->priority;
 621                pattern_ipv4_5tuple[1] = ipv4_sctp_item;
 622                pattern_ipv4_5tuple[2] = sctp_item;
 623                break;
 624        default:
 625                return ret;
 626        }
 627
 628        attr.ingress = 1;
 629        pattern_ipv4_5tuple[0] = eth_item;
 630        pattern_ipv4_5tuple[3] = end_item;
 631        actions[0] = count_action;
 632        actions[1] = end_action;
 633
 634        /* Validate and add rule */
 635        ret = rte_flow_classify_validate(cls_app->cls, &attr,
 636                        pattern_ipv4_5tuple, actions, &error);
 637        if (ret) {
 638                printf("table entry validate failed ipv4_proto = %u\n",
 639                        ipv4_proto);
 640                return ret;
 641        }
 642
 643        rule = rte_flow_classify_table_entry_add(
 644                        cls_app->cls, &attr, pattern_ipv4_5tuple,
 645                        actions, &key_found, &error);
 646        if (rule == NULL) {
 647                printf("table entry add failed ipv4_proto = %u\n",
 648                        ipv4_proto);
 649                ret = -1;
 650                return ret;
 651        }
 652
 653        rules[num_classify_rules] = rule;
 654        num_classify_rules++;
 655        return 0;
 656}
 657
 658static int
 659add_rules(const char *rule_path, struct flow_classifier *cls_app)
 660{
 661        FILE *fh;
 662        char buff[LINE_MAX];
 663        unsigned int i = 0;
 664        unsigned int total_num = 0;
 665        struct rte_eth_ntuple_filter ntuple_filter;
 666        int ret;
 667
 668        fh = fopen(rule_path, "rb");
 669        if (fh == NULL)
 670                rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
 671                        rule_path);
 672
 673        ret = fseek(fh, 0, SEEK_SET);
 674        if (ret)
 675                rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
 676                        ret);
 677
 678        i = 0;
 679        while (fgets(buff, LINE_MAX, fh) != NULL) {
 680                i++;
 681
 682                if (is_bypass_line(buff))
 683                        continue;
 684
 685                if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
 686                        printf("\nINFO: classify rule capacity %d reached\n",
 687                                total_num);
 688                        break;
 689                }
 690
 691                if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
 692                        rte_exit(EXIT_FAILURE,
 693                                "%s Line %u: parse rules error\n",
 694                                rule_path, i);
 695
 696                if (add_classify_rule(&ntuple_filter, cls_app) != 0)
 697                        rte_exit(EXIT_FAILURE, "add rule error\n");
 698
 699                total_num++;
 700        }
 701
 702        fclose(fh);
 703        return 0;
 704}
 705
 706/* display usage */
 707static void
 708print_usage(const char *prgname)
 709{
 710        printf("%s usage:\n", prgname);
 711        printf("[EAL options] --  --"OPTION_RULE_IPV4"=FILE: ");
 712        printf("specify the ipv4 rules file.\n");
 713        printf("Each rule occupies one line in the file.\n");
 714}
 715
 716/* Parse the argument given in the command line of the application */
 717static int
 718parse_args(int argc, char **argv)
 719{
 720        int opt, ret;
 721        char **argvopt;
 722        int option_index;
 723        char *prgname = argv[0];
 724        static struct option lgopts[] = {
 725                {OPTION_RULE_IPV4, 1, 0, 0},
 726                {NULL, 0, 0, 0}
 727        };
 728
 729        argvopt = argv;
 730
 731        while ((opt = getopt_long(argc, argvopt, "",
 732                                lgopts, &option_index)) != EOF) {
 733
 734                switch (opt) {
 735                /* long options */
 736                case 0:
 737                        if (!strncmp(lgopts[option_index].name,
 738                                        OPTION_RULE_IPV4,
 739                                        sizeof(OPTION_RULE_IPV4)))
 740                                parm_config.rule_ipv4_name = optarg;
 741                        break;
 742                default:
 743                        print_usage(prgname);
 744                        return -1;
 745                }
 746        }
 747
 748        if (optind >= 0)
 749                argv[optind-1] = prgname;
 750
 751        ret = optind-1;
 752        optind = 1; /* reset getopt lib */
 753        return ret;
 754}
 755
 756/*
 757 * The main function, which does initialization and calls the lcore_main
 758 * function.
 759 */
 760int
 761main(int argc, char *argv[])
 762{
 763        struct rte_mempool *mbuf_pool;
 764        uint16_t nb_ports;
 765        uint16_t portid;
 766        int ret;
 767        int socket_id;
 768        struct rte_table_acl_params table_acl_params;
 769        struct rte_flow_classify_table_params cls_table_params;
 770        struct flow_classifier *cls_app;
 771        struct rte_flow_classifier_params cls_params;
 772        uint32_t size;
 773
 774        /* Initialize the Environment Abstraction Layer (EAL). */
 775        ret = rte_eal_init(argc, argv);
 776        if (ret < 0)
 777                rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
 778
 779        argc -= ret;
 780        argv += ret;
 781
 782        /* parse application arguments (after the EAL ones) */
 783        ret = parse_args(argc, argv);
 784        if (ret < 0)
 785                rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
 786
 787        /* Check that there is an even number of ports to send/receive on. */
 788        nb_ports = rte_eth_dev_count_avail();
 789        if (nb_ports < 2 || (nb_ports & 1))
 790                rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
 791
 792        /* Creates a new mempool in memory to hold the mbufs. */
 793        mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
 794                MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
 795
 796        if (mbuf_pool == NULL)
 797                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
 798
 799        /* Initialize all ports. */
 800        RTE_ETH_FOREACH_DEV(portid)
 801                if (port_init(portid, mbuf_pool) != 0)
 802                        rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
 803                                        portid);
 804
 805        if (rte_lcore_count() > 1)
 806                printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
 807
 808        socket_id = rte_eth_dev_socket_id(0);
 809
 810        /* Memory allocation */
 811        size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
 812        cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
 813        if (cls_app == NULL)
 814                rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
 815
 816        cls_params.name = "flow_classifier";
 817        cls_params.socket_id = socket_id;
 818
 819        cls_app->cls = rte_flow_classifier_create(&cls_params);
 820        if (cls_app->cls == NULL) {
 821                rte_free(cls_app);
 822                rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
 823        }
 824
 825        /* initialise ACL table params */
 826        table_acl_params.name = "table_acl_ipv4_5tuple";
 827        table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
 828        table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
 829        memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
 830
 831        /* initialise table create params */
 832        cls_table_params.ops = &rte_table_acl_ops;
 833        cls_table_params.arg_create = &table_acl_params;
 834        cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
 835
 836        ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
 837        if (ret) {
 838                rte_flow_classifier_free(cls_app->cls);
 839                rte_free(cls_app);
 840                rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
 841        }
 842
 843        /* read file of IPv4 5 tuple rules and initialize parameters
 844         * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
 845         * API's.
 846         */
 847        if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
 848                rte_flow_classifier_free(cls_app->cls);
 849                rte_free(cls_app);
 850                rte_exit(EXIT_FAILURE, "Failed to add rules\n");
 851        }
 852
 853        /* Call lcore_main on the main core only. */
 854        lcore_main(cls_app);
 855
 856        /* clean up the EAL */
 857        rte_eal_cleanup();
 858
 859        return 0;
 860}
 861