linux/net/tipc/node.c
<<
>>
Prefs
   1/*
   2 * net/tipc/node.c: TIPC node management routines
   3 *
   4 * Copyright (c) 2000-2006, Ericsson AB
   5 * Copyright (c) 2005-2006, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "config.h"
  39#include "node.h"
  40#include "cluster.h"
  41#include "net.h"
  42#include "addr.h"
  43#include "node_subscr.h"
  44#include "link.h"
  45#include "port.h"
  46#include "bearer.h"
  47#include "name_distr.h"
  48
  49void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
  50static void node_lost_contact(struct tipc_node *n_ptr);
  51static void node_established_contact(struct tipc_node *n_ptr);
  52
  53struct tipc_node *tipc_nodes = NULL;    /* sorted list of nodes within cluster */
  54
  55static DEFINE_SPINLOCK(node_create_lock);
  56
  57u32 tipc_own_tag = 0;
  58
  59/**
  60 * tipc_node_create - create neighboring node
  61 *
  62 * Currently, this routine is called by neighbor discovery code, which holds
  63 * net_lock for reading only.  We must take node_create_lock to ensure a node
  64 * isn't created twice if two different bearers discover the node at the same
  65 * time.  (It would be preferable to switch to holding net_lock in write mode,
  66 * but this is a non-trivial change.)
  67 */
  68
  69struct tipc_node *tipc_node_create(u32 addr)
  70{
  71        struct cluster *c_ptr;
  72        struct tipc_node *n_ptr;
  73        struct tipc_node **curr_node;
  74
  75        spin_lock_bh(&node_create_lock);
  76
  77        for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
  78                if (addr < n_ptr->addr)
  79                        break;
  80                if (addr == n_ptr->addr) {
  81                        spin_unlock_bh(&node_create_lock);
  82                        return n_ptr;
  83                }
  84        }
  85
  86        n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
  87        if (!n_ptr) {
  88                spin_unlock_bh(&node_create_lock);
  89                warn("Node creation failed, no memory\n");
  90                return NULL;
  91        }
  92
  93        c_ptr = tipc_cltr_find(addr);
  94        if (!c_ptr) {
  95                c_ptr = tipc_cltr_create(addr);
  96        }
  97        if (!c_ptr) {
  98                spin_unlock_bh(&node_create_lock);
  99                kfree(n_ptr);
 100                return NULL;
 101        }
 102
 103        n_ptr->addr = addr;
 104                spin_lock_init(&n_ptr->lock);
 105        INIT_LIST_HEAD(&n_ptr->nsub);
 106        n_ptr->owner = c_ptr;
 107        tipc_cltr_attach_node(c_ptr, n_ptr);
 108        n_ptr->last_router = -1;
 109
 110        /* Insert node into ordered list */
 111        for (curr_node = &tipc_nodes; *curr_node;
 112             curr_node = &(*curr_node)->next) {
 113                if (addr < (*curr_node)->addr) {
 114                        n_ptr->next = *curr_node;
 115                        break;
 116                }
 117        }
 118        (*curr_node) = n_ptr;
 119        spin_unlock_bh(&node_create_lock);
 120        return n_ptr;
 121}
 122
 123void tipc_node_delete(struct tipc_node *n_ptr)
 124{
 125        if (!n_ptr)
 126                return;
 127
 128#if 0
 129        /* Not needed because links are already deleted via tipc_bearer_stop() */
 130
 131        u32 l_num;
 132
 133        for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
 134                link_delete(n_ptr->links[l_num]);
 135        }
 136#endif
 137
 138        dbg("node %x deleted\n", n_ptr->addr);
 139        kfree(n_ptr);
 140}
 141
 142
 143/**
 144 * tipc_node_link_up - handle addition of link
 145 *
 146 * Link becomes active (alone or shared) or standby, depending on its priority.
 147 */
 148
 149void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
 150{
 151        struct link **active = &n_ptr->active_links[0];
 152
 153        n_ptr->working_links++;
 154
 155        info("Established link <%s> on network plane %c\n",
 156             l_ptr->name, l_ptr->b_ptr->net_plane);
 157
 158        if (!active[0]) {
 159                dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
 160                active[0] = active[1] = l_ptr;
 161                node_established_contact(n_ptr);
 162                return;
 163        }
 164        if (l_ptr->priority < active[0]->priority) {
 165                info("New link <%s> becomes standby\n", l_ptr->name);
 166                return;
 167        }
 168        tipc_link_send_duplicate(active[0], l_ptr);
 169        if (l_ptr->priority == active[0]->priority) {
 170                active[0] = l_ptr;
 171                return;
 172        }
 173        info("Old link <%s> becomes standby\n", active[0]->name);
 174        if (active[1] != active[0])
 175                info("Old link <%s> becomes standby\n", active[1]->name);
 176        active[0] = active[1] = l_ptr;
 177}
 178
 179/**
 180 * node_select_active_links - select active link
 181 */
 182
 183static void node_select_active_links(struct tipc_node *n_ptr)
 184{
 185        struct link **active = &n_ptr->active_links[0];
 186        u32 i;
 187        u32 highest_prio = 0;
 188
 189        active[0] = active[1] = NULL;
 190
 191        for (i = 0; i < MAX_BEARERS; i++) {
 192                struct link *l_ptr = n_ptr->links[i];
 193
 194                if (!l_ptr || !tipc_link_is_up(l_ptr) ||
 195                    (l_ptr->priority < highest_prio))
 196                        continue;
 197
 198                if (l_ptr->priority > highest_prio) {
 199                        highest_prio = l_ptr->priority;
 200                        active[0] = active[1] = l_ptr;
 201                } else {
 202                        active[1] = l_ptr;
 203                }
 204        }
 205}
 206
 207/**
 208 * tipc_node_link_down - handle loss of link
 209 */
 210
 211void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
 212{
 213        struct link **active;
 214
 215        n_ptr->working_links--;
 216
 217        if (!tipc_link_is_active(l_ptr)) {
 218                info("Lost standby link <%s> on network plane %c\n",
 219                     l_ptr->name, l_ptr->b_ptr->net_plane);
 220                return;
 221        }
 222        info("Lost link <%s> on network plane %c\n",
 223                l_ptr->name, l_ptr->b_ptr->net_plane);
 224
 225        active = &n_ptr->active_links[0];
 226        if (active[0] == l_ptr)
 227                active[0] = active[1];
 228        if (active[1] == l_ptr)
 229                active[1] = active[0];
 230        if (active[0] == l_ptr)
 231                node_select_active_links(n_ptr);
 232        if (tipc_node_is_up(n_ptr))
 233                tipc_link_changeover(l_ptr);
 234        else
 235                node_lost_contact(n_ptr);
 236}
 237
 238int tipc_node_has_active_links(struct tipc_node *n_ptr)
 239{
 240        return (n_ptr &&
 241                ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
 242}
 243
 244int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
 245{
 246        return (n_ptr->working_links > 1);
 247}
 248
 249static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
 250{
 251        return (n_ptr && (n_ptr->last_router >= 0));
 252}
 253
 254int tipc_node_is_up(struct tipc_node *n_ptr)
 255{
 256        return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
 257}
 258
 259struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
 260{
 261        struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr);
 262
 263        if (!n_ptr)
 264                n_ptr = tipc_node_create(l_ptr->addr);
 265        if (n_ptr) {
 266                u32 bearer_id = l_ptr->b_ptr->identity;
 267                char addr_string[16];
 268
 269                if (n_ptr->link_cnt >= 2) {
 270                        err("Attempt to create third link to %s\n",
 271                            addr_string_fill(addr_string, n_ptr->addr));
 272                        return NULL;
 273                }
 274
 275                if (!n_ptr->links[bearer_id]) {
 276                        n_ptr->links[bearer_id] = l_ptr;
 277                        tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
 278                        n_ptr->link_cnt++;
 279                        return n_ptr;
 280                }
 281                err("Attempt to establish second link on <%s> to %s \n",
 282                    l_ptr->b_ptr->publ.name,
 283                    addr_string_fill(addr_string, l_ptr->addr));
 284        }
 285        return NULL;
 286}
 287
 288void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
 289{
 290        n_ptr->links[l_ptr->b_ptr->identity] = NULL;
 291        tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
 292        n_ptr->link_cnt--;
 293}
 294
 295/*
 296 * Routing table management - five cases to handle:
 297 *
 298 * 1: A link towards a zone/cluster external node comes up.
 299 *    => Send a multicast message updating routing tables of all
 300 *    system nodes within own cluster that the new destination
 301 *    can be reached via this node.
 302 *    (node.establishedContact()=>cluster.multicastNewRoute())
 303 *
 304 * 2: A link towards a slave node comes up.
 305 *    => Send a multicast message updating routing tables of all
 306 *    system nodes within own cluster that the new destination
 307 *    can be reached via this node.
 308 *    (node.establishedContact()=>cluster.multicastNewRoute())
 309 *    => Send a  message to the slave node about existence
 310 *    of all system nodes within cluster:
 311 *    (node.establishedContact()=>cluster.sendLocalRoutes())
 312 *
 313 * 3: A new cluster local system node becomes available.
 314 *    => Send message(s) to this particular node containing
 315 *    information about all cluster external and slave
 316 *     nodes which can be reached via this node.
 317 *    (node.establishedContact()==>network.sendExternalRoutes())
 318 *    (node.establishedContact()==>network.sendSlaveRoutes())
 319 *    => Send messages to all directly connected slave nodes
 320 *    containing information about the existence of the new node
 321 *    (node.establishedContact()=>cluster.multicastNewRoute())
 322 *
 323 * 4: The link towards a zone/cluster external node or slave
 324 *    node goes down.
 325 *    => Send a multcast message updating routing tables of all
 326 *    nodes within cluster that the new destination can not any
 327 *    longer be reached via this node.
 328 *    (node.lostAllLinks()=>cluster.bcastLostRoute())
 329 *
 330 * 5: A cluster local system node becomes unavailable.
 331 *    => Remove all references to this node from the local
 332 *    routing tables. Note: This is a completely node
 333 *    local operation.
 334 *    (node.lostAllLinks()=>network.removeAsRouter())
 335 *    => Send messages to all directly connected slave nodes
 336 *    containing information about loss of the node
 337 *    (node.establishedContact()=>cluster.multicastLostRoute())
 338 *
 339 */
 340
 341static void node_established_contact(struct tipc_node *n_ptr)
 342{
 343        struct cluster *c_ptr;
 344
 345        dbg("node_established_contact:-> %x\n", n_ptr->addr);
 346        if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
 347                tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
 348        }
 349
 350        /* Syncronize broadcast acks */
 351        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
 352
 353        if (is_slave(tipc_own_addr))
 354                return;
 355        if (!in_own_cluster(n_ptr->addr)) {
 356                /* Usage case 1 (see above) */
 357                c_ptr = tipc_cltr_find(tipc_own_addr);
 358                if (!c_ptr)
 359                        c_ptr = tipc_cltr_create(tipc_own_addr);
 360                if (c_ptr)
 361                        tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
 362                                                  tipc_max_nodes);
 363                return;
 364        }
 365
 366        c_ptr = n_ptr->owner;
 367        if (is_slave(n_ptr->addr)) {
 368                /* Usage case 2 (see above) */
 369                tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
 370                tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
 371                return;
 372        }
 373
 374        if (n_ptr->bclink.supported) {
 375                tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
 376                if (n_ptr->addr < tipc_own_addr)
 377                        tipc_own_tag++;
 378        }
 379
 380        /* Case 3 (see above) */
 381        tipc_net_send_external_routes(n_ptr->addr);
 382        tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
 383        tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
 384                                  tipc_highest_allowed_slave);
 385}
 386
 387static void node_lost_contact(struct tipc_node *n_ptr)
 388{
 389        struct cluster *c_ptr;
 390        struct tipc_node_subscr *ns, *tns;
 391        char addr_string[16];
 392        u32 i;
 393
 394        /* Clean up broadcast reception remains */
 395        n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
 396        while (n_ptr->bclink.deferred_head) {
 397                struct sk_buff* buf = n_ptr->bclink.deferred_head;
 398                n_ptr->bclink.deferred_head = buf->next;
 399                buf_discard(buf);
 400        }
 401        if (n_ptr->bclink.defragm) {
 402                buf_discard(n_ptr->bclink.defragm);
 403                n_ptr->bclink.defragm = NULL;
 404        }
 405        if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
 406                tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
 407        }
 408
 409        /* Update routing tables */
 410        if (is_slave(tipc_own_addr)) {
 411                tipc_net_remove_as_router(n_ptr->addr);
 412        } else {
 413                if (!in_own_cluster(n_ptr->addr)) {
 414                        /* Case 4 (see above) */
 415                        c_ptr = tipc_cltr_find(tipc_own_addr);
 416                        tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
 417                                                   tipc_max_nodes);
 418                } else {
 419                        /* Case 5 (see above) */
 420                        c_ptr = tipc_cltr_find(n_ptr->addr);
 421                        if (is_slave(n_ptr->addr)) {
 422                                tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
 423                                                           tipc_max_nodes);
 424                        } else {
 425                                if (n_ptr->bclink.supported) {
 426                                        tipc_nmap_remove(&tipc_cltr_bcast_nodes,
 427                                                         n_ptr->addr);
 428                                        if (n_ptr->addr < tipc_own_addr)
 429                                                tipc_own_tag--;
 430                                }
 431                                tipc_net_remove_as_router(n_ptr->addr);
 432                                tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
 433                                                           LOWEST_SLAVE,
 434                                                           tipc_highest_allowed_slave);
 435                        }
 436                }
 437        }
 438        if (tipc_node_has_active_routes(n_ptr))
 439                return;
 440
 441        info("Lost contact with %s\n",
 442             addr_string_fill(addr_string, n_ptr->addr));
 443
 444        /* Abort link changeover */
 445        for (i = 0; i < MAX_BEARERS; i++) {
 446                struct link *l_ptr = n_ptr->links[i];
 447                if (!l_ptr)
 448                        continue;
 449                l_ptr->reset_checkpoint = l_ptr->next_in_no;
 450                l_ptr->exp_msg_count = 0;
 451                tipc_link_reset_fragments(l_ptr);
 452        }
 453
 454        /* Notify subscribers */
 455        list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
 456                ns->node = NULL;
 457                list_del_init(&ns->nodesub_list);
 458                tipc_k_signal((Handler)ns->handle_node_down,
 459                              (unsigned long)ns->usr_handle);
 460        }
 461}
 462
 463/**
 464 * tipc_node_select_next_hop - find the next-hop node for a message
 465 *
 466 * Called by when cluster local lookup has failed.
 467 */
 468
 469struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector)
 470{
 471        struct tipc_node *n_ptr;
 472        u32 router_addr;
 473
 474        if (!tipc_addr_domain_valid(addr))
 475                return NULL;
 476
 477        /* Look for direct link to destination processsor */
 478        n_ptr = tipc_node_find(addr);
 479        if (n_ptr && tipc_node_has_active_links(n_ptr))
 480                return n_ptr;
 481
 482        /* Cluster local system nodes *must* have direct links */
 483        if (!is_slave(addr) && in_own_cluster(addr))
 484                return NULL;
 485
 486        /* Look for cluster local router with direct link to node */
 487        router_addr = tipc_node_select_router(n_ptr, selector);
 488        if (router_addr)
 489                return tipc_node_select(router_addr, selector);
 490
 491        /* Slave nodes can only be accessed within own cluster via a
 492           known router with direct link -- if no router was found,give up */
 493        if (is_slave(addr))
 494                return NULL;
 495
 496        /* Inter zone/cluster -- find any direct link to remote cluster */
 497        addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
 498        n_ptr = tipc_net_select_remote_node(addr, selector);
 499        if (n_ptr && tipc_node_has_active_links(n_ptr))
 500                return n_ptr;
 501
 502        /* Last resort -- look for any router to anywhere in remote zone */
 503        router_addr =  tipc_net_select_router(addr, selector);
 504        if (router_addr)
 505                return tipc_node_select(router_addr, selector);
 506
 507        return NULL;
 508}
 509
 510/**
 511 * tipc_node_select_router - select router to reach specified node
 512 *
 513 * Uses a deterministic and fair algorithm for selecting router node.
 514 */
 515
 516u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref)
 517{
 518        u32 ulim;
 519        u32 mask;
 520        u32 start;
 521        u32 r;
 522
 523        if (!n_ptr)
 524                return 0;
 525
 526        if (n_ptr->last_router < 0)
 527                return 0;
 528        ulim = ((n_ptr->last_router + 1) * 32) - 1;
 529
 530        /* Start entry must be random */
 531        mask = tipc_max_nodes;
 532        while (mask > ulim)
 533                mask >>= 1;
 534        start = ref & mask;
 535        r = start;
 536
 537        /* Lookup upwards with wrap-around */
 538        do {
 539                if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
 540                        break;
 541        } while (++r <= ulim);
 542        if (r > ulim) {
 543                r = 1;
 544                do {
 545                        if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
 546                                break;
 547                } while (++r < start);
 548                assert(r != start);
 549        }
 550        assert(r && (r <= ulim));
 551        return tipc_addr(own_zone(), own_cluster(), r);
 552}
 553
 554void tipc_node_add_router(struct tipc_node *n_ptr, u32 router)
 555{
 556        u32 r_num = tipc_node(router);
 557
 558        n_ptr->routers[r_num / 32] =
 559                ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
 560        n_ptr->last_router = tipc_max_nodes / 32;
 561        while ((--n_ptr->last_router >= 0) &&
 562               !n_ptr->routers[n_ptr->last_router]);
 563}
 564
 565void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router)
 566{
 567        u32 r_num = tipc_node(router);
 568
 569        if (n_ptr->last_router < 0)
 570                return;         /* No routes */
 571
 572        n_ptr->routers[r_num / 32] =
 573                ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
 574        n_ptr->last_router = tipc_max_nodes / 32;
 575        while ((--n_ptr->last_router >= 0) &&
 576               !n_ptr->routers[n_ptr->last_router]);
 577
 578        if (!tipc_node_is_up(n_ptr))
 579                node_lost_contact(n_ptr);
 580}
 581
 582#if 0
 583void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str)
 584{
 585        u32 i;
 586
 587        tipc_printf(buf, "\n\n%s", str);
 588        for (i = 0; i < MAX_BEARERS; i++) {
 589                if (!n_ptr->links[i])
 590                        continue;
 591                tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
 592        }
 593        tipc_printf(buf, "Active links: [%x,%x]\n",
 594                    n_ptr->active_links[0], n_ptr->active_links[1]);
 595}
 596#endif
 597
 598u32 tipc_available_nodes(const u32 domain)
 599{
 600        struct tipc_node *n_ptr;
 601        u32 cnt = 0;
 602
 603        read_lock_bh(&tipc_net_lock);
 604        for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
 605                if (!in_scope(domain, n_ptr->addr))
 606                        continue;
 607                if (tipc_node_is_up(n_ptr))
 608                        cnt++;
 609        }
 610        read_unlock_bh(&tipc_net_lock);
 611        return cnt;
 612}
 613
 614struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 615{
 616        u32 domain;
 617        struct sk_buff *buf;
 618        struct tipc_node *n_ptr;
 619        struct tipc_node_info node_info;
 620        u32 payload_size;
 621
 622        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 623                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 624
 625        domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 626        if (!tipc_addr_domain_valid(domain))
 627                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 628                                                   " (network address)");
 629
 630        read_lock_bh(&tipc_net_lock);
 631        if (!tipc_nodes) {
 632                read_unlock_bh(&tipc_net_lock);
 633                return tipc_cfg_reply_none();
 634        }
 635
 636        /* For now, get space for all other nodes
 637           (will need to modify this when slave nodes are supported */
 638
 639        payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
 640        if (payload_size > 32768u) {
 641                read_unlock_bh(&tipc_net_lock);
 642                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
 643                                                   " (too many nodes)");
 644        }
 645        buf = tipc_cfg_reply_alloc(payload_size);
 646        if (!buf) {
 647                read_unlock_bh(&tipc_net_lock);
 648                return NULL;
 649        }
 650
 651        /* Add TLVs for all nodes in scope */
 652
 653        for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
 654                if (!in_scope(domain, n_ptr->addr))
 655                        continue;
 656                node_info.addr = htonl(n_ptr->addr);
 657                node_info.up = htonl(tipc_node_is_up(n_ptr));
 658                tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
 659                                    &node_info, sizeof(node_info));
 660        }
 661
 662        read_unlock_bh(&tipc_net_lock);
 663        return buf;
 664}
 665
 666struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
 667{
 668        u32 domain;
 669        struct sk_buff *buf;
 670        struct tipc_node *n_ptr;
 671        struct tipc_link_info link_info;
 672        u32 payload_size;
 673
 674        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 675                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 676
 677        domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 678        if (!tipc_addr_domain_valid(domain))
 679                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 680                                                   " (network address)");
 681
 682        if (tipc_mode != TIPC_NET_MODE)
 683                return tipc_cfg_reply_none();
 684
 685        read_lock_bh(&tipc_net_lock);
 686
 687        /* Get space for all unicast links + multicast link */
 688
 689        payload_size = TLV_SPACE(sizeof(link_info)) *
 690                (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1);
 691        if (payload_size > 32768u) {
 692                read_unlock_bh(&tipc_net_lock);
 693                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
 694                                                   " (too many links)");
 695        }
 696        buf = tipc_cfg_reply_alloc(payload_size);
 697        if (!buf) {
 698                read_unlock_bh(&tipc_net_lock);
 699                return NULL;
 700        }
 701
 702        /* Add TLV for broadcast link */
 703
 704        link_info.dest = htonl(tipc_own_addr & 0xfffff00);
 705        link_info.up = htonl(1);
 706        strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
 707        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 708
 709        /* Add TLVs for any other links in scope */
 710
 711        for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
 712                u32 i;
 713
 714                if (!in_scope(domain, n_ptr->addr))
 715                        continue;
 716                tipc_node_lock(n_ptr);
 717                for (i = 0; i < MAX_BEARERS; i++) {
 718                        if (!n_ptr->links[i])
 719                                continue;
 720                        link_info.dest = htonl(n_ptr->addr);
 721                        link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
 722                        strcpy(link_info.str, n_ptr->links[i]->name);
 723                        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
 724                                            &link_info, sizeof(link_info));
 725                }
 726                tipc_node_unlock(n_ptr);
 727        }
 728
 729        read_unlock_bh(&tipc_net_lock);
 730        return buf;
 731}
 732