dpdk/app/test/test_lpm.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2014 Intel Corporation
   3 */
   4
   5#include "test.h"
   6
   7#ifdef RTE_EXEC_ENV_WINDOWS
   8static int
   9test_lpm(void)
  10{
  11        printf("lpm not supported on Windows, skipping test\n");
  12        return TEST_SKIPPED;
  13}
  14
  15#else
  16
  17#include <stdio.h>
  18#include <stdint.h>
  19#include <stdlib.h>
  20
  21#include <rte_ip.h>
  22#include <rte_lpm.h>
  23#include <rte_malloc.h>
  24
  25#include "test_xmmt_ops.h"
  26
  27#define TEST_LPM_ASSERT(cond) do {                                            \
  28        if (!(cond)) {                                                        \
  29                printf("Error at line %d: \n", __LINE__);                     \
  30                return -1;                                                    \
  31        }                                                                     \
  32} while(0)
  33
  34typedef int32_t (*rte_lpm_test)(void);
  35
  36static int32_t test0(void);
  37static int32_t test1(void);
  38static int32_t test2(void);
  39static int32_t test3(void);
  40static int32_t test4(void);
  41static int32_t test5(void);
  42static int32_t test6(void);
  43static int32_t test7(void);
  44static int32_t test8(void);
  45static int32_t test9(void);
  46static int32_t test10(void);
  47static int32_t test11(void);
  48static int32_t test12(void);
  49static int32_t test13(void);
  50static int32_t test14(void);
  51static int32_t test15(void);
  52static int32_t test16(void);
  53static int32_t test17(void);
  54static int32_t test18(void);
  55static int32_t test19(void);
  56static int32_t test20(void);
  57static int32_t test21(void);
  58
  59rte_lpm_test tests[] = {
  60/* Test Cases */
  61        test0,
  62        test1,
  63        test2,
  64        test3,
  65        test4,
  66        test5,
  67        test6,
  68        test7,
  69        test8,
  70        test9,
  71        test10,
  72        test11,
  73        test12,
  74        test13,
  75        test14,
  76        test15,
  77        test16,
  78        test17,
  79        test18,
  80        test19,
  81        test20,
  82        test21
  83};
  84
  85#define MAX_DEPTH 32
  86#define MAX_RULES 256
  87#define NUMBER_TBL8S 256
  88#define PASS 0
  89
  90/*
  91 * Check that rte_lpm_create fails gracefully for incorrect user input
  92 * arguments
  93 */
  94int32_t
  95test0(void)
  96{
  97        struct rte_lpm *lpm = NULL;
  98        struct rte_lpm_config config;
  99
 100        config.max_rules = MAX_RULES;
 101        config.number_tbl8s = NUMBER_TBL8S;
 102        config.flags = 0;
 103
 104        /* rte_lpm_create: lpm name == NULL */
 105        lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
 106        TEST_LPM_ASSERT(lpm == NULL);
 107
 108        /* rte_lpm_create: max_rules = 0 */
 109        /* Note: __func__ inserts the function name, in this case "test0". */
 110        config.max_rules = 0;
 111        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 112        TEST_LPM_ASSERT(lpm == NULL);
 113
 114        /* socket_id < -1 is invalid */
 115        config.max_rules = MAX_RULES;
 116        lpm = rte_lpm_create(__func__, -2, &config);
 117        TEST_LPM_ASSERT(lpm == NULL);
 118
 119        return PASS;
 120}
 121
 122/*
 123 * Create lpm table then delete lpm table 100 times
 124 * Use a slightly different rules size each time
 125 * */
 126int32_t
 127test1(void)
 128{
 129        struct rte_lpm *lpm = NULL;
 130        struct rte_lpm_config config;
 131
 132        config.number_tbl8s = NUMBER_TBL8S;
 133        config.flags = 0;
 134        int32_t i;
 135
 136        /* rte_lpm_free: Free NULL */
 137        for (i = 0; i < 100; i++) {
 138                config.max_rules = MAX_RULES - i;
 139                lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 140                TEST_LPM_ASSERT(lpm != NULL);
 141
 142                rte_lpm_free(lpm);
 143        }
 144
 145        /* Can not test free so return success */
 146        return PASS;
 147}
 148
 149/*
 150 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
 151 * therefore it is impossible to check for failure but this test is added to
 152 * increase function coverage metrics and to validate that freeing null does
 153 * not crash.
 154 */
 155int32_t
 156test2(void)
 157{
 158        struct rte_lpm *lpm = NULL;
 159        struct rte_lpm_config config;
 160
 161        config.max_rules = MAX_RULES;
 162        config.number_tbl8s = NUMBER_TBL8S;
 163        config.flags = 0;
 164
 165        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 166        TEST_LPM_ASSERT(lpm != NULL);
 167
 168        rte_lpm_free(lpm);
 169        rte_lpm_free(NULL);
 170        return PASS;
 171}
 172
 173/*
 174 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
 175 */
 176int32_t
 177test3(void)
 178{
 179        struct rte_lpm *lpm = NULL;
 180        struct rte_lpm_config config;
 181
 182        config.max_rules = MAX_RULES;
 183        config.number_tbl8s = NUMBER_TBL8S;
 184        config.flags = 0;
 185        uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop = 100;
 186        uint8_t depth = 24;
 187        int32_t status = 0;
 188
 189        /* rte_lpm_add: lpm == NULL */
 190        status = rte_lpm_add(NULL, ip, depth, next_hop);
 191        TEST_LPM_ASSERT(status < 0);
 192
 193        /*Create valid lpm to use in rest of test. */
 194        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 195        TEST_LPM_ASSERT(lpm != NULL);
 196
 197        /* rte_lpm_add: depth < 1 */
 198        status = rte_lpm_add(lpm, ip, 0, next_hop);
 199        TEST_LPM_ASSERT(status < 0);
 200
 201        /* rte_lpm_add: depth > MAX_DEPTH */
 202        status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
 203        TEST_LPM_ASSERT(status < 0);
 204
 205        rte_lpm_free(lpm);
 206
 207        return PASS;
 208}
 209
 210/*
 211 * Check that rte_lpm_delete fails gracefully for incorrect user input
 212 * arguments
 213 */
 214int32_t
 215test4(void)
 216{
 217        struct rte_lpm *lpm = NULL;
 218        struct rte_lpm_config config;
 219
 220        config.max_rules = MAX_RULES;
 221        config.number_tbl8s = NUMBER_TBL8S;
 222        config.flags = 0;
 223        uint32_t ip = RTE_IPV4(0, 0, 0, 0);
 224        uint8_t depth = 24;
 225        int32_t status = 0;
 226
 227        /* rte_lpm_delete: lpm == NULL */
 228        status = rte_lpm_delete(NULL, ip, depth);
 229        TEST_LPM_ASSERT(status < 0);
 230
 231        /*Create valid lpm to use in rest of test. */
 232        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 233        TEST_LPM_ASSERT(lpm != NULL);
 234
 235        /* rte_lpm_delete: depth < 1 */
 236        status = rte_lpm_delete(lpm, ip, 0);
 237        TEST_LPM_ASSERT(status < 0);
 238
 239        /* rte_lpm_delete: depth > MAX_DEPTH */
 240        status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
 241        TEST_LPM_ASSERT(status < 0);
 242
 243        rte_lpm_free(lpm);
 244
 245        return PASS;
 246}
 247
 248/*
 249 * Check that rte_lpm_lookup fails gracefully for incorrect user input
 250 * arguments
 251 */
 252int32_t
 253test5(void)
 254{
 255#if defined(RTE_LIBRTE_LPM_DEBUG)
 256        struct rte_lpm *lpm = NULL;
 257        struct rte_lpm_config config;
 258
 259        config.max_rules = MAX_RULES;
 260        config.number_tbl8s = NUMBER_TBL8S;
 261        config.flags = 0;
 262        uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_return = 0;
 263        int32_t status = 0;
 264
 265        /* rte_lpm_lookup: lpm == NULL */
 266        status = rte_lpm_lookup(NULL, ip, &next_hop_return);
 267        TEST_LPM_ASSERT(status < 0);
 268
 269        /*Create valid lpm to use in rest of test. */
 270        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 271        TEST_LPM_ASSERT(lpm != NULL);
 272
 273        /* rte_lpm_lookup: depth < 1 */
 274        status = rte_lpm_lookup(lpm, ip, NULL);
 275        TEST_LPM_ASSERT(status < 0);
 276
 277        rte_lpm_free(lpm);
 278#endif
 279        return PASS;
 280}
 281
 282
 283
 284/*
 285 * Call add, lookup and delete for a single rule with depth <= 24
 286 */
 287int32_t
 288test6(void)
 289{
 290        struct rte_lpm *lpm = NULL;
 291        struct rte_lpm_config config;
 292
 293        config.max_rules = MAX_RULES;
 294        config.number_tbl8s = NUMBER_TBL8S;
 295        config.flags = 0;
 296        uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
 297        uint8_t depth = 24;
 298        int32_t status = 0;
 299
 300        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 301        TEST_LPM_ASSERT(lpm != NULL);
 302
 303        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 304        TEST_LPM_ASSERT(status == 0);
 305
 306        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 307        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 308
 309        status = rte_lpm_delete(lpm, ip, depth);
 310        TEST_LPM_ASSERT(status == 0);
 311
 312        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 313        TEST_LPM_ASSERT(status == -ENOENT);
 314
 315        rte_lpm_free(lpm);
 316
 317        return PASS;
 318}
 319
 320/*
 321 * Call add, lookup and delete for a single rule with depth > 24
 322 */
 323
 324int32_t
 325test7(void)
 326{
 327        xmm_t ipx4;
 328        uint32_t hop[4];
 329        struct rte_lpm *lpm = NULL;
 330        struct rte_lpm_config config;
 331
 332        config.max_rules = MAX_RULES;
 333        config.number_tbl8s = NUMBER_TBL8S;
 334        config.flags = 0;
 335        uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
 336        uint8_t depth = 32;
 337        int32_t status = 0;
 338
 339        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 340        TEST_LPM_ASSERT(lpm != NULL);
 341
 342        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 343        TEST_LPM_ASSERT(status == 0);
 344
 345        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 346        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 347
 348        ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
 349        rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 350        TEST_LPM_ASSERT(hop[0] == next_hop_add);
 351        TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
 352        TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 353        TEST_LPM_ASSERT(hop[3] == next_hop_add);
 354
 355        status = rte_lpm_delete(lpm, ip, depth);
 356        TEST_LPM_ASSERT(status == 0);
 357
 358        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 359        TEST_LPM_ASSERT(status == -ENOENT);
 360
 361        rte_lpm_free(lpm);
 362
 363        return PASS;
 364}
 365
 366/*
 367 * Use rte_lpm_add to add rules which effect only the second half of the lpm
 368 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
 369 * depth. Check lookup hit for on every add and check for lookup miss on the
 370 * first half of the lpm table after each add. Finally delete all rules going
 371 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
 372 * delete. The lookup should return the next_hop_add value related to the
 373 * previous depth value (i.e. depth -1).
 374 */
 375int32_t
 376test8(void)
 377{
 378        xmm_t ipx4;
 379        uint32_t hop[4];
 380        struct rte_lpm *lpm = NULL;
 381        struct rte_lpm_config config;
 382
 383        config.max_rules = MAX_RULES;
 384        config.number_tbl8s = NUMBER_TBL8S;
 385        config.flags = 0;
 386        uint32_t ip1 = RTE_IPV4(127, 255, 255, 255), ip2 = RTE_IPV4(128, 0, 0, 0);
 387        uint32_t next_hop_add, next_hop_return;
 388        uint8_t depth;
 389        int32_t status = 0;
 390
 391        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 392        TEST_LPM_ASSERT(lpm != NULL);
 393
 394        /* Loop with rte_lpm_add. */
 395        for (depth = 1; depth <= 32; depth++) {
 396                /* Let the next_hop_add value = depth. Just for change. */
 397                next_hop_add = depth;
 398
 399                status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
 400                TEST_LPM_ASSERT(status == 0);
 401
 402                /* Check IP in first half of tbl24 which should be empty. */
 403                status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
 404                TEST_LPM_ASSERT(status == -ENOENT);
 405
 406                status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
 407                TEST_LPM_ASSERT((status == 0) &&
 408                        (next_hop_return == next_hop_add));
 409
 410                ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1);
 411                rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 412                TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 413                TEST_LPM_ASSERT(hop[1] == next_hop_add);
 414                TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 415                TEST_LPM_ASSERT(hop[3] == next_hop_add);
 416        }
 417
 418        /* Loop with rte_lpm_delete. */
 419        for (depth = 32; depth >= 1; depth--) {
 420                next_hop_add = (uint8_t) (depth - 1);
 421
 422                status = rte_lpm_delete(lpm, ip2, depth);
 423                TEST_LPM_ASSERT(status == 0);
 424
 425                status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
 426
 427                if (depth != 1) {
 428                        TEST_LPM_ASSERT((status == 0) &&
 429                                (next_hop_return == next_hop_add));
 430                } else {
 431                        TEST_LPM_ASSERT(status == -ENOENT);
 432                }
 433
 434                status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
 435                TEST_LPM_ASSERT(status == -ENOENT);
 436
 437                ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2);
 438                rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 439                if (depth != 1) {
 440                        TEST_LPM_ASSERT(hop[0] == next_hop_add);
 441                        TEST_LPM_ASSERT(hop[1] == next_hop_add);
 442                } else {
 443                        TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 444                        TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
 445                }
 446                TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 447                TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
 448        }
 449
 450        rte_lpm_free(lpm);
 451
 452        return PASS;
 453}
 454
 455/*
 456 * - Add & lookup to hit invalid TBL24 entry
 457 * - Add & lookup to hit valid TBL24 entry not extended
 458 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
 459 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
 460 *
 461 */
 462int32_t
 463test9(void)
 464{
 465        struct rte_lpm *lpm = NULL;
 466        struct rte_lpm_config config;
 467
 468        config.max_rules = MAX_RULES;
 469        config.number_tbl8s = NUMBER_TBL8S;
 470        config.flags = 0;
 471        uint32_t ip, ip_1, ip_2;
 472        uint8_t depth, depth_1, depth_2;
 473        uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
 474        int32_t status = 0;
 475
 476        /* Add & lookup to hit invalid TBL24 entry */
 477        ip = RTE_IPV4(128, 0, 0, 0);
 478        depth = 24;
 479        next_hop_add = 100;
 480
 481        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 482        TEST_LPM_ASSERT(lpm != NULL);
 483
 484        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 485        TEST_LPM_ASSERT(status == 0);
 486
 487        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 488        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 489
 490        status = rte_lpm_delete(lpm, ip, depth);
 491        TEST_LPM_ASSERT(status == 0);
 492
 493        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 494        TEST_LPM_ASSERT(status == -ENOENT);
 495
 496        rte_lpm_delete_all(lpm);
 497
 498        /* Add & lookup to hit valid TBL24 entry not extended */
 499        ip = RTE_IPV4(128, 0, 0, 0);
 500        depth = 23;
 501        next_hop_add = 100;
 502
 503        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 504        TEST_LPM_ASSERT(status == 0);
 505
 506        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 507        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 508
 509        depth = 24;
 510        next_hop_add = 101;
 511
 512        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 513        TEST_LPM_ASSERT(status == 0);
 514
 515        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 516        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 517
 518        depth = 24;
 519
 520        status = rte_lpm_delete(lpm, ip, depth);
 521        TEST_LPM_ASSERT(status == 0);
 522
 523        depth = 23;
 524
 525        status = rte_lpm_delete(lpm, ip, depth);
 526        TEST_LPM_ASSERT(status == 0);
 527
 528        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 529        TEST_LPM_ASSERT(status == -ENOENT);
 530
 531        rte_lpm_delete_all(lpm);
 532
 533        /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
 534         * entry */
 535        ip = RTE_IPV4(128, 0, 0, 0);
 536        depth = 32;
 537        next_hop_add = 100;
 538
 539        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 540        TEST_LPM_ASSERT(status == 0);
 541
 542        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 543        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 544
 545        ip = RTE_IPV4(128, 0, 0, 5);
 546        depth = 32;
 547        next_hop_add = 101;
 548
 549        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 550        TEST_LPM_ASSERT(status == 0);
 551
 552        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 553        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 554
 555        status = rte_lpm_delete(lpm, ip, depth);
 556        TEST_LPM_ASSERT(status == 0);
 557
 558        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 559        TEST_LPM_ASSERT(status == -ENOENT);
 560
 561        ip = RTE_IPV4(128, 0, 0, 0);
 562        depth = 32;
 563        next_hop_add = 100;
 564
 565        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 566        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 567
 568        status = rte_lpm_delete(lpm, ip, depth);
 569        TEST_LPM_ASSERT(status == 0);
 570
 571        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 572        TEST_LPM_ASSERT(status == -ENOENT);
 573
 574        rte_lpm_delete_all(lpm);
 575
 576        /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
 577         * entry */
 578        ip_1 = RTE_IPV4(128, 0, 0, 0);
 579        depth_1 = 25;
 580        next_hop_add_1 = 101;
 581
 582        ip_2 = RTE_IPV4(128, 0, 0, 5);
 583        depth_2 = 32;
 584        next_hop_add_2 = 102;
 585
 586        next_hop_return = 0;
 587
 588        status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
 589        TEST_LPM_ASSERT(status == 0);
 590
 591        status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
 592        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 593
 594        status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
 595        TEST_LPM_ASSERT(status == 0);
 596
 597        status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
 598        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
 599
 600        status = rte_lpm_delete(lpm, ip_2, depth_2);
 601        TEST_LPM_ASSERT(status == 0);
 602
 603        status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
 604        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 605
 606        status = rte_lpm_delete(lpm, ip_1, depth_1);
 607        TEST_LPM_ASSERT(status == 0);
 608
 609        status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
 610        TEST_LPM_ASSERT(status == -ENOENT);
 611
 612        rte_lpm_free(lpm);
 613
 614        return PASS;
 615}
 616
 617
 618/*
 619 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
 620 *   lookup)
 621 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
 622 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
 623 *   delete & lookup)
 624 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
 625 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
 626 * - Delete a rule that is not present in the TBL24 & lookup
 627 * - Delete a rule that is not present in the TBL8 & lookup
 628 *
 629 */
 630int32_t
 631test10(void)
 632{
 633
 634        struct rte_lpm *lpm = NULL;
 635        struct rte_lpm_config config;
 636
 637        config.max_rules = MAX_RULES;
 638        config.number_tbl8s = NUMBER_TBL8S;
 639        config.flags = 0;
 640        uint32_t ip, next_hop_add, next_hop_return;
 641        uint8_t depth;
 642        int32_t status = 0;
 643
 644        /* Add rule that covers a TBL24 range previously invalid & lookup
 645         * (& delete & lookup) */
 646        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 647        TEST_LPM_ASSERT(lpm != NULL);
 648
 649        ip = RTE_IPV4(128, 0, 0, 0);
 650        depth = 16;
 651        next_hop_add = 100;
 652
 653        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 654        TEST_LPM_ASSERT(status == 0);
 655
 656        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 657        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 658
 659        status = rte_lpm_delete(lpm, ip, depth);
 660        TEST_LPM_ASSERT(status == 0);
 661
 662        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 663        TEST_LPM_ASSERT(status == -ENOENT);
 664
 665        rte_lpm_delete_all(lpm);
 666
 667        ip = RTE_IPV4(128, 0, 0, 0);
 668        depth = 25;
 669        next_hop_add = 100;
 670
 671        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 672        TEST_LPM_ASSERT(status == 0);
 673
 674        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 675        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 676
 677        status = rte_lpm_delete(lpm, ip, depth);
 678        TEST_LPM_ASSERT(status == 0);
 679
 680        rte_lpm_delete_all(lpm);
 681
 682        /* Add rule that extends a TBL24 valid entry & lookup for both rules
 683         * (& delete & lookup) */
 684
 685        ip = RTE_IPV4(128, 0, 0, 0);
 686        depth = 24;
 687        next_hop_add = 100;
 688
 689        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 690        TEST_LPM_ASSERT(status == 0);
 691
 692        ip = RTE_IPV4(128, 0, 0, 10);
 693        depth = 32;
 694        next_hop_add = 101;
 695
 696        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 697        TEST_LPM_ASSERT(status == 0);
 698
 699        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 700        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 701
 702        ip = RTE_IPV4(128, 0, 0, 0);
 703        next_hop_add = 100;
 704
 705        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 706        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 707
 708        ip = RTE_IPV4(128, 0, 0, 0);
 709        depth = 24;
 710
 711        status = rte_lpm_delete(lpm, ip, depth);
 712        TEST_LPM_ASSERT(status == 0);
 713
 714        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 715        TEST_LPM_ASSERT(status == -ENOENT);
 716
 717        ip = RTE_IPV4(128, 0, 0, 10);
 718        depth = 32;
 719
 720        status = rte_lpm_delete(lpm, ip, depth);
 721        TEST_LPM_ASSERT(status == 0);
 722
 723        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 724        TEST_LPM_ASSERT(status == -ENOENT);
 725
 726        rte_lpm_delete_all(lpm);
 727
 728        /* Add rule that updates the next hop in TBL24 & lookup
 729         * (& delete & lookup) */
 730
 731        ip = RTE_IPV4(128, 0, 0, 0);
 732        depth = 24;
 733        next_hop_add = 100;
 734
 735        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 736        TEST_LPM_ASSERT(status == 0);
 737
 738        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 739        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 740
 741        next_hop_add = 101;
 742
 743        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 744        TEST_LPM_ASSERT(status == 0);
 745
 746        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 747        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 748
 749        status = rte_lpm_delete(lpm, ip, depth);
 750        TEST_LPM_ASSERT(status == 0);
 751
 752        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 753        TEST_LPM_ASSERT(status == -ENOENT);
 754
 755        rte_lpm_delete_all(lpm);
 756
 757        /* Add rule that updates the next hop in TBL8 & lookup
 758         * (& delete & lookup) */
 759
 760        ip = RTE_IPV4(128, 0, 0, 0);
 761        depth = 32;
 762        next_hop_add = 100;
 763
 764        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 765        TEST_LPM_ASSERT(status == 0);
 766
 767        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 768        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 769
 770        next_hop_add = 101;
 771
 772        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 773        TEST_LPM_ASSERT(status == 0);
 774
 775        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 776        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 777
 778        status = rte_lpm_delete(lpm, ip, depth);
 779        TEST_LPM_ASSERT(status == 0);
 780
 781        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 782        TEST_LPM_ASSERT(status == -ENOENT);
 783
 784        rte_lpm_delete_all(lpm);
 785
 786        /* Delete a rule that is not present in the TBL24 & lookup */
 787
 788        ip = RTE_IPV4(128, 0, 0, 0);
 789        depth = 24;
 790
 791        status = rte_lpm_delete(lpm, ip, depth);
 792        TEST_LPM_ASSERT(status < 0);
 793
 794        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 795        TEST_LPM_ASSERT(status == -ENOENT);
 796
 797        rte_lpm_delete_all(lpm);
 798
 799        /* Delete a rule that is not present in the TBL8 & lookup */
 800
 801        ip = RTE_IPV4(128, 0, 0, 0);
 802        depth = 32;
 803
 804        status = rte_lpm_delete(lpm, ip, depth);
 805        TEST_LPM_ASSERT(status < 0);
 806
 807        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 808        TEST_LPM_ASSERT(status == -ENOENT);
 809
 810        rte_lpm_free(lpm);
 811
 812        return PASS;
 813}
 814
 815/*
 816 * Add two rules, lookup to hit the more specific one, lookup to hit the less
 817 * specific one delete the less specific rule and lookup previous values again;
 818 * add a more specific rule than the existing rule, lookup again
 819 *
 820 * */
 821int32_t
 822test11(void)
 823{
 824
 825        struct rte_lpm *lpm = NULL;
 826        struct rte_lpm_config config;
 827
 828        config.max_rules = MAX_RULES;
 829        config.number_tbl8s = NUMBER_TBL8S;
 830        config.flags = 0;
 831        uint32_t ip, next_hop_add, next_hop_return;
 832        uint8_t depth;
 833        int32_t status = 0;
 834
 835        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 836        TEST_LPM_ASSERT(lpm != NULL);
 837
 838        ip = RTE_IPV4(128, 0, 0, 0);
 839        depth = 24;
 840        next_hop_add = 100;
 841
 842        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 843        TEST_LPM_ASSERT(status == 0);
 844
 845        ip = RTE_IPV4(128, 0, 0, 10);
 846        depth = 32;
 847        next_hop_add = 101;
 848
 849        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 850        TEST_LPM_ASSERT(status == 0);
 851
 852        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 853        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 854
 855        ip = RTE_IPV4(128, 0, 0, 0);
 856        next_hop_add = 100;
 857
 858        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 859        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 860
 861        ip = RTE_IPV4(128, 0, 0, 0);
 862        depth = 24;
 863
 864        status = rte_lpm_delete(lpm, ip, depth);
 865        TEST_LPM_ASSERT(status == 0);
 866
 867        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 868        TEST_LPM_ASSERT(status == -ENOENT);
 869
 870        ip = RTE_IPV4(128, 0, 0, 10);
 871        depth = 32;
 872
 873        status = rte_lpm_delete(lpm, ip, depth);
 874        TEST_LPM_ASSERT(status == 0);
 875
 876        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 877        TEST_LPM_ASSERT(status == -ENOENT);
 878
 879        rte_lpm_free(lpm);
 880
 881        return PASS;
 882}
 883
 884/*
 885 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
 886 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
 887 * and contraction.
 888 *
 889 * */
 890
 891int32_t
 892test12(void)
 893{
 894        xmm_t ipx4;
 895        uint32_t hop[4];
 896        struct rte_lpm *lpm = NULL;
 897        struct rte_lpm_config config;
 898
 899        config.max_rules = MAX_RULES;
 900        config.number_tbl8s = NUMBER_TBL8S;
 901        config.flags = 0;
 902        uint32_t ip, i, next_hop_add, next_hop_return;
 903        uint8_t depth;
 904        int32_t status = 0;
 905
 906        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 907        TEST_LPM_ASSERT(lpm != NULL);
 908
 909        ip = RTE_IPV4(128, 0, 0, 0);
 910        depth = 32;
 911        next_hop_add = 100;
 912
 913        for (i = 0; i < 1000; i++) {
 914                status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 915                TEST_LPM_ASSERT(status == 0);
 916
 917                status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 918                TEST_LPM_ASSERT((status == 0) &&
 919                                (next_hop_return == next_hop_add));
 920
 921                ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1);
 922                rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 923                TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 924                TEST_LPM_ASSERT(hop[1] == next_hop_add);
 925                TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 926                TEST_LPM_ASSERT(hop[3] == next_hop_add);
 927
 928                status = rte_lpm_delete(lpm, ip, depth);
 929                TEST_LPM_ASSERT(status == 0);
 930
 931                status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 932                TEST_LPM_ASSERT(status == -ENOENT);
 933        }
 934
 935        rte_lpm_free(lpm);
 936
 937        return PASS;
 938}
 939
 940/*
 941 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
 942 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
 943 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
 944 * extension and contraction.
 945 *
 946 * */
 947
 948int32_t
 949test13(void)
 950{
 951        struct rte_lpm *lpm = NULL;
 952        struct rte_lpm_config config;
 953
 954        config.max_rules = MAX_RULES;
 955        config.number_tbl8s = NUMBER_TBL8S;
 956        config.flags = 0;
 957        uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
 958        uint8_t depth;
 959        int32_t status = 0;
 960
 961        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
 962        TEST_LPM_ASSERT(lpm != NULL);
 963
 964        ip = RTE_IPV4(128, 0, 0, 0);
 965        depth = 24;
 966        next_hop_add_1 = 100;
 967
 968        status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
 969        TEST_LPM_ASSERT(status == 0);
 970
 971        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 972        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 973
 974        depth = 32;
 975        next_hop_add_2 = 101;
 976
 977        for (i = 0; i < 1000; i++) {
 978                status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
 979                TEST_LPM_ASSERT(status == 0);
 980
 981                status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 982                TEST_LPM_ASSERT((status == 0) &&
 983                                (next_hop_return == next_hop_add_2));
 984
 985                status = rte_lpm_delete(lpm, ip, depth);
 986                TEST_LPM_ASSERT(status == 0);
 987
 988                status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 989                TEST_LPM_ASSERT((status == 0) &&
 990                                (next_hop_return == next_hop_add_1));
 991        }
 992
 993        depth = 24;
 994
 995        status = rte_lpm_delete(lpm, ip, depth);
 996        TEST_LPM_ASSERT(status == 0);
 997
 998        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
 999        TEST_LPM_ASSERT(status == -ENOENT);
1000
1001        rte_lpm_free(lpm);
1002
1003        return PASS;
1004}
1005
1006/*
1007 * For TBL8 extension exhaustion. Add 512 rules that require a tbl8 extension.
1008 * No more tbl8 extensions will be allowed. Now add one more rule that required
1009 * a tbl8 extension and get fail.
1010 * */
1011int32_t
1012test14(void)
1013{
1014
1015        /* We only use depth = 32 in the loop below so we must make sure
1016         * that we have enough storage for all rules at that depth*/
1017
1018        struct rte_lpm *lpm = NULL;
1019        struct rte_lpm_config config;
1020
1021        config.max_rules = 256 * 32;
1022        config.number_tbl8s = 512;
1023        config.flags = 0;
1024        uint32_t ip, next_hop_base, next_hop_return;
1025        uint8_t depth;
1026        int32_t status = 0;
1027        xmm_t ipx4;
1028        uint32_t hop[4];
1029
1030        /* Add enough space for 256 rules for every depth */
1031        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1032        TEST_LPM_ASSERT(lpm != NULL);
1033
1034        depth = 32;
1035        next_hop_base = 100;
1036        ip = RTE_IPV4(0, 0, 0, 0);
1037
1038        /* Add 256 rules that require a tbl8 extension */
1039        for (; ip <= RTE_IPV4(0, 1, 255, 0); ip += 256) {
1040                status = rte_lpm_add(lpm, ip, depth, next_hop_base + ip);
1041                TEST_LPM_ASSERT(status == 0);
1042
1043                status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1044                TEST_LPM_ASSERT((status == 0) &&
1045                                (next_hop_return == next_hop_base + ip));
1046
1047                ipx4 = vect_set_epi32(ip + 3, ip + 2, ip + 1, ip);
1048                rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
1049                TEST_LPM_ASSERT(hop[0] == next_hop_base + ip);
1050                TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
1051                TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
1052                TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
1053        }
1054
1055        /* All tbl8 extensions have been used above. Try to add one more and
1056         * we get a fail */
1057        ip = RTE_IPV4(1, 0, 0, 0);
1058        depth = 32;
1059
1060        status = rte_lpm_add(lpm, ip, depth, next_hop_base + ip);
1061        TEST_LPM_ASSERT(status < 0);
1062
1063        rte_lpm_free(lpm);
1064
1065        return PASS;
1066}
1067
1068/*
1069 * Sequence of operations for find existing lpm table
1070 *
1071 *  - create table
1072 *  - find existing table: hit
1073 *  - find non-existing table: miss
1074 *
1075 */
1076int32_t
1077test15(void)
1078{
1079        struct rte_lpm *lpm = NULL, *result = NULL;
1080        struct rte_lpm_config config;
1081
1082        config.max_rules = 256 * 32;
1083        config.number_tbl8s = NUMBER_TBL8S;
1084        config.flags = 0;
1085
1086        /* Create lpm  */
1087        lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
1088        TEST_LPM_ASSERT(lpm != NULL);
1089
1090        /* Try to find existing lpm */
1091        result = rte_lpm_find_existing("lpm_find_existing");
1092        TEST_LPM_ASSERT(result == lpm);
1093
1094        /* Try to find non-existing lpm */
1095        result = rte_lpm_find_existing("lpm_find_non_existing");
1096        TEST_LPM_ASSERT(result == NULL);
1097
1098        /* Cleanup. */
1099        rte_lpm_delete_all(lpm);
1100        rte_lpm_free(lpm);
1101
1102        return PASS;
1103}
1104
1105/*
1106 * test failure condition of overloading the tbl8 so no more will fit
1107 * Check we get an error return value in that case
1108 */
1109int32_t
1110test16(void)
1111{
1112        uint32_t ip;
1113        struct rte_lpm_config config;
1114
1115        config.max_rules = 256 * 32;
1116        config.number_tbl8s = NUMBER_TBL8S;
1117        config.flags = 0;
1118        struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1119
1120        /* ip loops through all possibilities for top 24 bits of address */
1121        for (ip = 0; ip < 0xFFFFFF; ip++) {
1122                /* add an entry within a different tbl8 each time, since
1123                 * depth >24 and the top 24 bits are different */
1124                if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1125                        break;
1126        }
1127
1128        if (ip != NUMBER_TBL8S) {
1129                printf("Error, unexpected failure with filling tbl8 groups\n");
1130                printf("Failed after %u additions, expected after %u\n",
1131                                (unsigned)ip, (unsigned)NUMBER_TBL8S);
1132        }
1133
1134        rte_lpm_free(lpm);
1135        return 0;
1136}
1137
1138/*
1139 * Test for overwriting of tbl8:
1140 *  - add rule /32 and lookup
1141 *  - add new rule /24 and lookup
1142 *      - add third rule /25 and lookup
1143 *      - lookup /32 and /24 rule to ensure the table has not been overwritten.
1144 */
1145int32_t
1146test17(void)
1147{
1148        struct rte_lpm *lpm = NULL;
1149        struct rte_lpm_config config;
1150
1151        config.max_rules = MAX_RULES;
1152        config.number_tbl8s = NUMBER_TBL8S;
1153        config.flags = 0;
1154        const uint32_t ip_10_32 = RTE_IPV4(10, 10, 10, 2);
1155        const uint32_t ip_10_24 = RTE_IPV4(10, 10, 10, 0);
1156        const uint32_t ip_20_25 = RTE_IPV4(10, 10, 20, 2);
1157        const uint8_t d_ip_10_32 = 32,
1158                        d_ip_10_24 = 24,
1159                        d_ip_20_25 = 25;
1160        const uint32_t next_hop_ip_10_32 = 100,
1161                        next_hop_ip_10_24 = 105,
1162                        next_hop_ip_20_25 = 111;
1163        uint32_t next_hop_return = 0;
1164        int32_t status = 0;
1165
1166        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1167        TEST_LPM_ASSERT(lpm != NULL);
1168
1169        if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1170                        next_hop_ip_10_32)) < 0)
1171                return -1;
1172
1173        status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1174        uint32_t test_hop_10_32 = next_hop_return;
1175        TEST_LPM_ASSERT(status == 0);
1176        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1177
1178        if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1179                        next_hop_ip_10_24)) < 0)
1180                        return -1;
1181
1182        status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1183        uint32_t test_hop_10_24 = next_hop_return;
1184        TEST_LPM_ASSERT(status == 0);
1185        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1186
1187        if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1188                        next_hop_ip_20_25)) < 0)
1189                return -1;
1190
1191        status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1192        uint32_t test_hop_20_25 = next_hop_return;
1193        TEST_LPM_ASSERT(status == 0);
1194        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1195
1196        if (test_hop_10_32 == test_hop_10_24) {
1197                printf("Next hop return equal\n");
1198                return -1;
1199        }
1200
1201        if (test_hop_10_24 == test_hop_20_25) {
1202                printf("Next hop return equal\n");
1203                return -1;
1204        }
1205
1206        status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1207        TEST_LPM_ASSERT(status == 0);
1208        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1209
1210        status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1211        TEST_LPM_ASSERT(status == 0);
1212        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1213
1214        rte_lpm_free(lpm);
1215
1216        return PASS;
1217}
1218
1219/*
1220 * Test for recycle of tbl8
1221 *  - step 1: add a rule with depth=28 (> 24)
1222 *  - step 2: add a rule with same 24-bit prefix and depth=23 (< 24)
1223 *  - step 3: delete the first rule
1224 *  - step 4: check tbl8 is freed
1225 *  - step 5: add a rule same as the first one (depth=28)
1226 *  - step 6: check same tbl8 is allocated
1227 *  - step 7: add a rule with same 24-bit prefix and depth=24
1228 *  - step 8: delete the rule (depth=28) added in step 5
1229 *  - step 9: check tbl8 is freed
1230 *  - step 10: add a rule with same 24-bit prefix and depth = 28
1231 *  - setp 11: check same tbl8 is allocated again
1232 */
1233int32_t
1234test18(void)
1235{
1236#define group_idx next_hop
1237        struct rte_lpm *lpm = NULL;
1238        struct rte_lpm_config config;
1239        uint32_t ip, next_hop;
1240        uint8_t depth;
1241        uint32_t tbl8_group_index;
1242
1243        config.max_rules = MAX_RULES;
1244        config.number_tbl8s = NUMBER_TBL8S;
1245        config.flags = 0;
1246
1247        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1248        TEST_LPM_ASSERT(lpm != NULL);
1249
1250        ip = RTE_IPV4(192, 168, 100, 100);
1251        depth = 28;
1252        next_hop = 1;
1253        rte_lpm_add(lpm, ip, depth, next_hop);
1254
1255        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1256        tbl8_group_index = lpm->tbl24[ip>>8].group_idx;
1257
1258        depth = 23;
1259        next_hop = 2;
1260        rte_lpm_add(lpm, ip, depth, next_hop);
1261        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1262
1263        depth = 28;
1264        rte_lpm_delete(lpm, ip, depth);
1265
1266        TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1267
1268        next_hop = 3;
1269        rte_lpm_add(lpm, ip, depth, next_hop);
1270
1271        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1272        TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1273
1274        depth = 24;
1275        next_hop = 4;
1276        rte_lpm_add(lpm, ip, depth, next_hop);
1277        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1278
1279        depth = 28;
1280        rte_lpm_delete(lpm, ip, depth);
1281
1282        TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1283
1284        next_hop = 5;
1285        rte_lpm_add(lpm, ip, depth, next_hop);
1286
1287        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1288        TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1289
1290        rte_lpm_free(lpm);
1291#undef group_idx
1292        return PASS;
1293}
1294
1295/*
1296 * rte_lpm_rcu_qsbr_add positive and negative tests.
1297 *  - Add RCU QSBR variable to LPM
1298 *  - Add another RCU QSBR variable to LPM
1299 *  - Check returns
1300 */
1301int32_t
1302test19(void)
1303{
1304        struct rte_lpm *lpm = NULL;
1305        struct rte_lpm_config config;
1306        size_t sz;
1307        struct rte_rcu_qsbr *qsv;
1308        struct rte_rcu_qsbr *qsv2;
1309        int32_t status;
1310        struct rte_lpm_rcu_config rcu_cfg = {0};
1311
1312        config.max_rules = MAX_RULES;
1313        config.number_tbl8s = NUMBER_TBL8S;
1314        config.flags = 0;
1315
1316        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1317        TEST_LPM_ASSERT(lpm != NULL);
1318
1319        /* Create RCU QSBR variable */
1320        sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
1321        qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1322                                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1323        TEST_LPM_ASSERT(qsv != NULL);
1324
1325        status = rte_rcu_qsbr_init(qsv, RTE_MAX_LCORE);
1326        TEST_LPM_ASSERT(status == 0);
1327
1328        rcu_cfg.v = qsv;
1329        /* Invalid QSBR mode */
1330        rcu_cfg.mode = 2;
1331        status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1332        TEST_LPM_ASSERT(status != 0);
1333
1334        rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
1335        /* Attach RCU QSBR to LPM table */
1336        status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1337        TEST_LPM_ASSERT(status == 0);
1338
1339        /* Create and attach another RCU QSBR to LPM table */
1340        qsv2 = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1341                                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1342        TEST_LPM_ASSERT(qsv2 != NULL);
1343
1344        rcu_cfg.v = qsv2;
1345        rcu_cfg.mode = RTE_LPM_QSBR_MODE_SYNC;
1346        status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1347        TEST_LPM_ASSERT(status != 0);
1348
1349        rte_lpm_free(lpm);
1350        rte_free(qsv);
1351        rte_free(qsv2);
1352
1353        return PASS;
1354}
1355
1356/*
1357 * rte_lpm_rcu_qsbr_add DQ mode functional test.
1358 * Reader and writer are in the same thread in this test.
1359 *  - Create LPM which supports 1 tbl8 group at max
1360 *  - Add RCU QSBR variable to LPM
1361 *  - Add a rule with depth=28 (> 24)
1362 *  - Register a reader thread (not a real thread)
1363 *  - Reader lookup existing rule
1364 *  - Writer delete the rule
1365 *  - Reader lookup the rule
1366 *  - Writer re-add the rule (no available tbl8 group)
1367 *  - Reader report quiescent state and unregister
1368 *  - Writer re-add the rule
1369 *  - Reader lookup the rule
1370 */
1371int32_t
1372test20(void)
1373{
1374        struct rte_lpm *lpm = NULL;
1375        struct rte_lpm_config config;
1376        size_t sz;
1377        struct rte_rcu_qsbr *qsv;
1378        int32_t status;
1379        uint32_t ip, next_hop, next_hop_return;
1380        uint8_t depth;
1381        struct rte_lpm_rcu_config rcu_cfg = {0};
1382
1383        config.max_rules = MAX_RULES;
1384        config.number_tbl8s = 1;
1385        config.flags = 0;
1386
1387        lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1388        TEST_LPM_ASSERT(lpm != NULL);
1389
1390        /* Create RCU QSBR variable */
1391        sz = rte_rcu_qsbr_get_memsize(1);
1392        qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1393                                RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1394        TEST_LPM_ASSERT(qsv != NULL);
1395
1396        status = rte_rcu_qsbr_init(qsv, 1);
1397        TEST_LPM_ASSERT(status == 0);
1398
1399        rcu_cfg.v = qsv;
1400        rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
1401        /* Attach RCU QSBR to LPM table */
1402        status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1403        TEST_LPM_ASSERT(status == 0);
1404
1405        ip = RTE_IPV4(192, 0, 2, 100);
1406        depth = 28;
1407        next_hop = 1;
1408        status = rte_lpm_add(lpm, ip, depth, next_hop);
1409        TEST_LPM_ASSERT(status == 0);
1410        TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1411
1412        /* Register pseudo reader */
1413        status = rte_rcu_qsbr_thread_register(qsv, 0);
1414        TEST_LPM_ASSERT(status == 0);
1415        rte_rcu_qsbr_thread_online(qsv, 0);
1416
1417        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1418        TEST_LPM_ASSERT(status == 0);
1419        TEST_LPM_ASSERT(next_hop_return == next_hop);
1420
1421        /* Writer update */
1422        status = rte_lpm_delete(lpm, ip, depth);
1423        TEST_LPM_ASSERT(status == 0);
1424        TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid);
1425
1426        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1427        TEST_LPM_ASSERT(status != 0);
1428
1429        status = rte_lpm_add(lpm, ip, depth, next_hop);
1430        TEST_LPM_ASSERT(status != 0);
1431
1432        /* Reader quiescent */
1433        rte_rcu_qsbr_quiescent(qsv, 0);
1434
1435        status = rte_lpm_add(lpm, ip, depth, next_hop);
1436        TEST_LPM_ASSERT(status == 0);
1437
1438        rte_rcu_qsbr_thread_offline(qsv, 0);
1439        status = rte_rcu_qsbr_thread_unregister(qsv, 0);
1440        TEST_LPM_ASSERT(status == 0);
1441
1442        status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1443        TEST_LPM_ASSERT(status == 0);
1444        TEST_LPM_ASSERT(next_hop_return == next_hop);
1445
1446        rte_lpm_free(lpm);
1447        rte_free(qsv);
1448
1449        return PASS;
1450}
1451
1452static struct rte_lpm *g_lpm;
1453static struct rte_rcu_qsbr *g_v;
1454static uint32_t g_ip = RTE_IPV4(192, 0, 2, 100);
1455static volatile uint8_t writer_done;
1456/* Report quiescent state interval every 1024 lookups. Larger critical
1457 * sections in reader will result in writer polling multiple times.
1458 */
1459#define QSBR_REPORTING_INTERVAL 1024
1460#define WRITER_ITERATIONS       512
1461
1462/*
1463 * Reader thread using rte_lpm data structure with RCU.
1464 */
1465static int
1466test_lpm_rcu_qsbr_reader(void *arg)
1467{
1468        int i;
1469        uint32_t next_hop_return = 0;
1470
1471        RTE_SET_USED(arg);
1472        /* Register this thread to report quiescent state */
1473        rte_rcu_qsbr_thread_register(g_v, 0);
1474        rte_rcu_qsbr_thread_online(g_v, 0);
1475
1476        do {
1477                for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
1478                        rte_lpm_lookup(g_lpm, g_ip, &next_hop_return);
1479
1480                /* Update quiescent state */
1481                rte_rcu_qsbr_quiescent(g_v, 0);
1482        } while (!writer_done);
1483
1484        rte_rcu_qsbr_thread_offline(g_v, 0);
1485        rte_rcu_qsbr_thread_unregister(g_v, 0);
1486
1487        return 0;
1488}
1489
1490/*
1491 * rte_lpm_rcu_qsbr_add sync mode functional test.
1492 * 1 Reader and 1 writer. They cannot be in the same thread in this test.
1493 *  - Create LPM which supports 1 tbl8 group at max
1494 *  - Add RCU QSBR variable with sync mode to LPM
1495 *  - Register a reader thread. Reader keeps looking up a specific rule.
1496 *  - Writer keeps adding and deleting a specific rule with depth=28 (> 24)
1497 */
1498int32_t
1499test21(void)
1500{
1501        struct rte_lpm_config config;
1502        size_t sz;
1503        int32_t status;
1504        uint32_t i, next_hop;
1505        uint8_t depth;
1506        struct rte_lpm_rcu_config rcu_cfg = {0};
1507
1508        if (rte_lcore_count() < 2) {
1509                printf("Not enough cores for %s, expecting at least 2\n",
1510                        __func__);
1511                return TEST_SKIPPED;
1512        }
1513
1514        config.max_rules = MAX_RULES;
1515        config.number_tbl8s = 1;
1516        config.flags = 0;
1517
1518        g_lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1519        TEST_LPM_ASSERT(g_lpm != NULL);
1520
1521        /* Create RCU QSBR variable */
1522        sz = rte_rcu_qsbr_get_memsize(1);
1523        g_v = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1524                                RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1525        TEST_LPM_ASSERT(g_v != NULL);
1526
1527        status = rte_rcu_qsbr_init(g_v, 1);
1528        TEST_LPM_ASSERT(status == 0);
1529
1530        rcu_cfg.v = g_v;
1531        rcu_cfg.mode = RTE_LPM_QSBR_MODE_SYNC;
1532        /* Attach RCU QSBR to LPM table */
1533        status = rte_lpm_rcu_qsbr_add(g_lpm, &rcu_cfg);
1534        TEST_LPM_ASSERT(status == 0);
1535
1536        writer_done = 0;
1537        /* Launch reader thread */
1538        rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
1539                                rte_get_next_lcore(-1, 1, 0));
1540
1541        depth = 28;
1542        next_hop = 1;
1543        status = rte_lpm_add(g_lpm, g_ip, depth, next_hop);
1544        if (status != 0) {
1545                printf("%s: Failed to add rule\n", __func__);
1546                goto error;
1547        }
1548
1549        /* Writer update */
1550        for (i = 0; i < WRITER_ITERATIONS; i++) {
1551                status = rte_lpm_delete(g_lpm, g_ip, depth);
1552                if (status != 0) {
1553                        printf("%s: Failed to delete rule at iteration %d\n",
1554                                __func__, i);
1555                        goto error;
1556                }
1557
1558                status = rte_lpm_add(g_lpm, g_ip, depth, next_hop);
1559                if (status != 0) {
1560                        printf("%s: Failed to add rule at iteration %d\n",
1561                                __func__, i);
1562                        goto error;
1563                }
1564        }
1565
1566error:
1567        writer_done = 1;
1568        /* Wait until reader exited. */
1569        rte_eal_mp_wait_lcore();
1570
1571        rte_lpm_free(g_lpm);
1572        rte_free(g_v);
1573
1574        return (status == 0) ? PASS : -1;
1575}
1576
1577/*
1578 * Do all unit tests.
1579 */
1580
1581static int
1582test_lpm(void)
1583{
1584        unsigned i;
1585        int status, global_status = 0;
1586
1587        for (i = 0; i < RTE_DIM(tests); i++) {
1588                status = tests[i]();
1589                if (status < 0) {
1590                        printf("ERROR: LPM Test %u: FAIL\n", i);
1591                        global_status = status;
1592                }
1593        }
1594
1595        return global_status;
1596}
1597
1598#endif /* !RTE_EXEC_ENV_WINDOWS */
1599
1600REGISTER_TEST_COMMAND(lpm_autotest, test_lpm);
1601