linux/net/tipc/node.c
<<
>>
Prefs
   1/*
   2 * net/tipc/node.c: TIPC node management routines
   3 *
   4 * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
   5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "config.h"
  39#include "node.h"
  40#include "name_distr.h"
  41#include "socket.h"
  42
  43#define NODE_HTABLE_SIZE 512
  44
  45static void node_lost_contact(struct tipc_node *n_ptr);
  46static void node_established_contact(struct tipc_node *n_ptr);
  47
  48static struct hlist_head node_htable[NODE_HTABLE_SIZE];
  49LIST_HEAD(tipc_node_list);
  50static u32 tipc_num_nodes;
  51static u32 tipc_num_links;
  52static DEFINE_SPINLOCK(node_list_lock);
  53
  54struct tipc_sock_conn {
  55        u32 port;
  56        u32 peer_port;
  57        u32 peer_node;
  58        struct list_head list;
  59};
  60
  61static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
  62        [TIPC_NLA_NODE_UNSPEC]          = { .type = NLA_UNSPEC },
  63        [TIPC_NLA_NODE_ADDR]            = { .type = NLA_U32 },
  64        [TIPC_NLA_NODE_UP]              = { .type = NLA_FLAG }
  65};
  66
  67/*
  68 * A trivial power-of-two bitmask technique is used for speed, since this
  69 * operation is done for every incoming TIPC packet. The number of hash table
  70 * entries has been chosen so that no hash chain exceeds 8 nodes and will
  71 * usually be much smaller (typically only a single node).
  72 */
  73static unsigned int tipc_hashfn(u32 addr)
  74{
  75        return addr & (NODE_HTABLE_SIZE - 1);
  76}
  77
  78/*
  79 * tipc_node_find - locate specified node object, if it exists
  80 */
  81struct tipc_node *tipc_node_find(u32 addr)
  82{
  83        struct tipc_node *node;
  84
  85        if (unlikely(!in_own_cluster_exact(addr)))
  86                return NULL;
  87
  88        rcu_read_lock();
  89        hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
  90                if (node->addr == addr) {
  91                        rcu_read_unlock();
  92                        return node;
  93                }
  94        }
  95        rcu_read_unlock();
  96        return NULL;
  97}
  98
  99struct tipc_node *tipc_node_create(u32 addr)
 100{
 101        struct tipc_node *n_ptr, *temp_node;
 102
 103        spin_lock_bh(&node_list_lock);
 104
 105        n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
 106        if (!n_ptr) {
 107                spin_unlock_bh(&node_list_lock);
 108                pr_warn("Node creation failed, no memory\n");
 109                return NULL;
 110        }
 111
 112        n_ptr->addr = addr;
 113        spin_lock_init(&n_ptr->lock);
 114        INIT_HLIST_NODE(&n_ptr->hash);
 115        INIT_LIST_HEAD(&n_ptr->list);
 116        INIT_LIST_HEAD(&n_ptr->publ_list);
 117        INIT_LIST_HEAD(&n_ptr->conn_sks);
 118        skb_queue_head_init(&n_ptr->waiting_sks);
 119        __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
 120
 121        hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
 122
 123        list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
 124                if (n_ptr->addr < temp_node->addr)
 125                        break;
 126        }
 127        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
 128        n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
 129        n_ptr->signature = INVALID_NODE_SIG;
 130
 131        tipc_num_nodes++;
 132
 133        spin_unlock_bh(&node_list_lock);
 134        return n_ptr;
 135}
 136
 137static void tipc_node_delete(struct tipc_node *n_ptr)
 138{
 139        list_del_rcu(&n_ptr->list);
 140        hlist_del_rcu(&n_ptr->hash);
 141        kfree_rcu(n_ptr, rcu);
 142
 143        tipc_num_nodes--;
 144}
 145
 146void tipc_node_stop(void)
 147{
 148        struct tipc_node *node, *t_node;
 149
 150        spin_lock_bh(&node_list_lock);
 151        list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
 152                tipc_node_delete(node);
 153        spin_unlock_bh(&node_list_lock);
 154}
 155
 156int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
 157{
 158        struct tipc_node *node;
 159        struct tipc_sock_conn *conn;
 160
 161        if (in_own_node(dnode))
 162                return 0;
 163
 164        node = tipc_node_find(dnode);
 165        if (!node) {
 166                pr_warn("Connecting sock to node 0x%x failed\n", dnode);
 167                return -EHOSTUNREACH;
 168        }
 169        conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
 170        if (!conn)
 171                return -EHOSTUNREACH;
 172        conn->peer_node = dnode;
 173        conn->port = port;
 174        conn->peer_port = peer_port;
 175
 176        tipc_node_lock(node);
 177        list_add_tail(&conn->list, &node->conn_sks);
 178        tipc_node_unlock(node);
 179        return 0;
 180}
 181
 182void tipc_node_remove_conn(u32 dnode, u32 port)
 183{
 184        struct tipc_node *node;
 185        struct tipc_sock_conn *conn, *safe;
 186
 187        if (in_own_node(dnode))
 188                return;
 189
 190        node = tipc_node_find(dnode);
 191        if (!node)
 192                return;
 193
 194        tipc_node_lock(node);
 195        list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
 196                if (port != conn->port)
 197                        continue;
 198                list_del(&conn->list);
 199                kfree(conn);
 200        }
 201        tipc_node_unlock(node);
 202}
 203
 204void tipc_node_abort_sock_conns(struct list_head *conns)
 205{
 206        struct tipc_sock_conn *conn, *safe;
 207        struct sk_buff *buf;
 208
 209        list_for_each_entry_safe(conn, safe, conns, list) {
 210                buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
 211                                      SHORT_H_SIZE, 0, tipc_own_addr,
 212                                      conn->peer_node, conn->port,
 213                                      conn->peer_port, TIPC_ERR_NO_NODE);
 214                if (likely(buf))
 215                        tipc_sk_rcv(buf);
 216                list_del(&conn->list);
 217                kfree(conn);
 218        }
 219}
 220
 221/**
 222 * tipc_node_link_up - handle addition of link
 223 *
 224 * Link becomes active (alone or shared) or standby, depending on its priority.
 225 */
 226void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 227{
 228        struct tipc_link **active = &n_ptr->active_links[0];
 229
 230        n_ptr->working_links++;
 231        n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
 232        n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 233
 234        pr_info("Established link <%s> on network plane %c\n",
 235                l_ptr->name, l_ptr->net_plane);
 236
 237        if (!active[0]) {
 238                active[0] = active[1] = l_ptr;
 239                node_established_contact(n_ptr);
 240                goto exit;
 241        }
 242        if (l_ptr->priority < active[0]->priority) {
 243                pr_info("New link <%s> becomes standby\n", l_ptr->name);
 244                goto exit;
 245        }
 246        tipc_link_dup_queue_xmit(active[0], l_ptr);
 247        if (l_ptr->priority == active[0]->priority) {
 248                active[0] = l_ptr;
 249                goto exit;
 250        }
 251        pr_info("Old link <%s> becomes standby\n", active[0]->name);
 252        if (active[1] != active[0])
 253                pr_info("Old link <%s> becomes standby\n", active[1]->name);
 254        active[0] = active[1] = l_ptr;
 255exit:
 256        /* Leave room for changeover header when returning 'mtu' to users: */
 257        n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
 258        n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
 259}
 260
 261/**
 262 * node_select_active_links - select active link
 263 */
 264static void node_select_active_links(struct tipc_node *n_ptr)
 265{
 266        struct tipc_link **active = &n_ptr->active_links[0];
 267        u32 i;
 268        u32 highest_prio = 0;
 269
 270        active[0] = active[1] = NULL;
 271
 272        for (i = 0; i < MAX_BEARERS; i++) {
 273                struct tipc_link *l_ptr = n_ptr->links[i];
 274
 275                if (!l_ptr || !tipc_link_is_up(l_ptr) ||
 276                    (l_ptr->priority < highest_prio))
 277                        continue;
 278
 279                if (l_ptr->priority > highest_prio) {
 280                        highest_prio = l_ptr->priority;
 281                        active[0] = active[1] = l_ptr;
 282                } else {
 283                        active[1] = l_ptr;
 284                }
 285        }
 286}
 287
 288/**
 289 * tipc_node_link_down - handle loss of link
 290 */
 291void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 292{
 293        struct tipc_link **active;
 294
 295        n_ptr->working_links--;
 296        n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
 297        n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 298
 299        if (!tipc_link_is_active(l_ptr)) {
 300                pr_info("Lost standby link <%s> on network plane %c\n",
 301                        l_ptr->name, l_ptr->net_plane);
 302                return;
 303        }
 304        pr_info("Lost link <%s> on network plane %c\n",
 305                l_ptr->name, l_ptr->net_plane);
 306
 307        active = &n_ptr->active_links[0];
 308        if (active[0] == l_ptr)
 309                active[0] = active[1];
 310        if (active[1] == l_ptr)
 311                active[1] = active[0];
 312        if (active[0] == l_ptr)
 313                node_select_active_links(n_ptr);
 314        if (tipc_node_is_up(n_ptr))
 315                tipc_link_failover_send_queue(l_ptr);
 316        else
 317                node_lost_contact(n_ptr);
 318
 319        /* Leave room for changeover header when returning 'mtu' to users: */
 320        if (active[0]) {
 321                n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
 322                n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
 323                return;
 324        }
 325
 326        /* Loopback link went down? No fragmentation needed from now on. */
 327        if (n_ptr->addr == tipc_own_addr) {
 328                n_ptr->act_mtus[0] = MAX_MSG_SIZE;
 329                n_ptr->act_mtus[1] = MAX_MSG_SIZE;
 330        }
 331}
 332
 333int tipc_node_active_links(struct tipc_node *n_ptr)
 334{
 335        return n_ptr->active_links[0] != NULL;
 336}
 337
 338int tipc_node_is_up(struct tipc_node *n_ptr)
 339{
 340        return tipc_node_active_links(n_ptr);
 341}
 342
 343void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 344{
 345        n_ptr->links[l_ptr->bearer_id] = l_ptr;
 346        spin_lock_bh(&node_list_lock);
 347        tipc_num_links++;
 348        spin_unlock_bh(&node_list_lock);
 349        n_ptr->link_cnt++;
 350}
 351
 352void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 353{
 354        int i;
 355
 356        for (i = 0; i < MAX_BEARERS; i++) {
 357                if (l_ptr != n_ptr->links[i])
 358                        continue;
 359                n_ptr->links[i] = NULL;
 360                spin_lock_bh(&node_list_lock);
 361                tipc_num_links--;
 362                spin_unlock_bh(&node_list_lock);
 363                n_ptr->link_cnt--;
 364        }
 365}
 366
 367static void node_established_contact(struct tipc_node *n_ptr)
 368{
 369        n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
 370        n_ptr->bclink.oos_state = 0;
 371        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
 372        tipc_bclink_add_node(n_ptr->addr);
 373}
 374
 375static void node_lost_contact(struct tipc_node *n_ptr)
 376{
 377        char addr_string[16];
 378        u32 i;
 379
 380        pr_info("Lost contact with %s\n",
 381                tipc_addr_string_fill(addr_string, n_ptr->addr));
 382
 383        /* Flush broadcast link info associated with lost node */
 384        if (n_ptr->bclink.recv_permitted) {
 385                __skb_queue_purge(&n_ptr->bclink.deferred_queue);
 386
 387                if (n_ptr->bclink.reasm_buf) {
 388                        kfree_skb(n_ptr->bclink.reasm_buf);
 389                        n_ptr->bclink.reasm_buf = NULL;
 390                }
 391
 392                tipc_bclink_remove_node(n_ptr->addr);
 393                tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
 394
 395                n_ptr->bclink.recv_permitted = false;
 396        }
 397
 398        /* Abort link changeover */
 399        for (i = 0; i < MAX_BEARERS; i++) {
 400                struct tipc_link *l_ptr = n_ptr->links[i];
 401                if (!l_ptr)
 402                        continue;
 403                l_ptr->reset_checkpoint = l_ptr->next_in_no;
 404                l_ptr->exp_msg_count = 0;
 405                tipc_link_reset_fragments(l_ptr);
 406        }
 407
 408        n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
 409
 410        /* Notify subscribers and prevent re-contact with node until
 411         * cleanup is done.
 412         */
 413        n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
 414                               TIPC_NOTIFY_NODE_DOWN;
 415}
 416
 417struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 418{
 419        u32 domain;
 420        struct sk_buff *buf;
 421        struct tipc_node *n_ptr;
 422        struct tipc_node_info node_info;
 423        u32 payload_size;
 424
 425        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 426                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 427
 428        domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 429        if (!tipc_addr_domain_valid(domain))
 430                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 431                                                   " (network address)");
 432
 433        spin_lock_bh(&node_list_lock);
 434        if (!tipc_num_nodes) {
 435                spin_unlock_bh(&node_list_lock);
 436                return tipc_cfg_reply_none();
 437        }
 438
 439        /* For now, get space for all other nodes */
 440        payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
 441        if (payload_size > 32768u) {
 442                spin_unlock_bh(&node_list_lock);
 443                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
 444                                                   " (too many nodes)");
 445        }
 446        spin_unlock_bh(&node_list_lock);
 447
 448        buf = tipc_cfg_reply_alloc(payload_size);
 449        if (!buf)
 450                return NULL;
 451
 452        /* Add TLVs for all nodes in scope */
 453        rcu_read_lock();
 454        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
 455                if (!tipc_in_scope(domain, n_ptr->addr))
 456                        continue;
 457                node_info.addr = htonl(n_ptr->addr);
 458                node_info.up = htonl(tipc_node_is_up(n_ptr));
 459                tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
 460                                    &node_info, sizeof(node_info));
 461        }
 462        rcu_read_unlock();
 463        return buf;
 464}
 465
 466struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
 467{
 468        u32 domain;
 469        struct sk_buff *buf;
 470        struct tipc_node *n_ptr;
 471        struct tipc_link_info link_info;
 472        u32 payload_size;
 473
 474        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 475                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 476
 477        domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 478        if (!tipc_addr_domain_valid(domain))
 479                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 480                                                   " (network address)");
 481
 482        if (!tipc_own_addr)
 483                return tipc_cfg_reply_none();
 484
 485        spin_lock_bh(&node_list_lock);
 486        /* Get space for all unicast links + broadcast link */
 487        payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
 488        if (payload_size > 32768u) {
 489                spin_unlock_bh(&node_list_lock);
 490                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
 491                                                   " (too many links)");
 492        }
 493        spin_unlock_bh(&node_list_lock);
 494
 495        buf = tipc_cfg_reply_alloc(payload_size);
 496        if (!buf)
 497                return NULL;
 498
 499        /* Add TLV for broadcast link */
 500        link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
 501        link_info.up = htonl(1);
 502        strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
 503        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 504
 505        /* Add TLVs for any other links in scope */
 506        rcu_read_lock();
 507        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
 508                u32 i;
 509
 510                if (!tipc_in_scope(domain, n_ptr->addr))
 511                        continue;
 512                tipc_node_lock(n_ptr);
 513                for (i = 0; i < MAX_BEARERS; i++) {
 514                        if (!n_ptr->links[i])
 515                                continue;
 516                        link_info.dest = htonl(n_ptr->addr);
 517                        link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
 518                        strcpy(link_info.str, n_ptr->links[i]->name);
 519                        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
 520                                            &link_info, sizeof(link_info));
 521                }
 522                tipc_node_unlock(n_ptr);
 523        }
 524        rcu_read_unlock();
 525        return buf;
 526}
 527
 528/**
 529 * tipc_node_get_linkname - get the name of a link
 530 *
 531 * @bearer_id: id of the bearer
 532 * @node: peer node address
 533 * @linkname: link name output buffer
 534 *
 535 * Returns 0 on success
 536 */
 537int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
 538{
 539        struct tipc_link *link;
 540        struct tipc_node *node = tipc_node_find(addr);
 541
 542        if ((bearer_id >= MAX_BEARERS) || !node)
 543                return -EINVAL;
 544        tipc_node_lock(node);
 545        link = node->links[bearer_id];
 546        if (link) {
 547                strncpy(linkname, link->name, len);
 548                tipc_node_unlock(node);
 549                return 0;
 550        }
 551        tipc_node_unlock(node);
 552        return -EINVAL;
 553}
 554
 555void tipc_node_unlock(struct tipc_node *node)
 556{
 557        LIST_HEAD(nsub_list);
 558        LIST_HEAD(conn_sks);
 559        struct sk_buff_head waiting_sks;
 560        u32 addr = 0;
 561        int flags = node->action_flags;
 562        u32 link_id = 0;
 563
 564        if (likely(!flags)) {
 565                spin_unlock_bh(&node->lock);
 566                return;
 567        }
 568
 569        addr = node->addr;
 570        link_id = node->link_id;
 571        __skb_queue_head_init(&waiting_sks);
 572
 573        if (flags & TIPC_WAKEUP_USERS)
 574                skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
 575
 576        if (flags & TIPC_NOTIFY_NODE_DOWN) {
 577                list_replace_init(&node->publ_list, &nsub_list);
 578                list_replace_init(&node->conn_sks, &conn_sks);
 579        }
 580        node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
 581                                TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
 582                                TIPC_NOTIFY_LINK_DOWN |
 583                                TIPC_WAKEUP_BCAST_USERS);
 584
 585        spin_unlock_bh(&node->lock);
 586
 587        while (!skb_queue_empty(&waiting_sks))
 588                tipc_sk_rcv(__skb_dequeue(&waiting_sks));
 589
 590        if (!list_empty(&conn_sks))
 591                tipc_node_abort_sock_conns(&conn_sks);
 592
 593        if (!list_empty(&nsub_list))
 594                tipc_publ_notify(&nsub_list, addr);
 595
 596        if (flags & TIPC_WAKEUP_BCAST_USERS)
 597                tipc_bclink_wakeup_users();
 598
 599        if (flags & TIPC_NOTIFY_NODE_UP)
 600                tipc_named_node_up(addr);
 601
 602        if (flags & TIPC_NOTIFY_LINK_UP)
 603                tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
 604                                     TIPC_NODE_SCOPE, link_id, addr);
 605
 606        if (flags & TIPC_NOTIFY_LINK_DOWN)
 607                tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
 608                                      link_id, addr);
 609}
 610
 611/* Caller should hold node lock for the passed node */
 612static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
 613{
 614        void *hdr;
 615        struct nlattr *attrs;
 616
 617        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
 618                          NLM_F_MULTI, TIPC_NL_NODE_GET);
 619        if (!hdr)
 620                return -EMSGSIZE;
 621
 622        attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
 623        if (!attrs)
 624                goto msg_full;
 625
 626        if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
 627                goto attr_msg_full;
 628        if (tipc_node_is_up(node))
 629                if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
 630                        goto attr_msg_full;
 631
 632        nla_nest_end(msg->skb, attrs);
 633        genlmsg_end(msg->skb, hdr);
 634
 635        return 0;
 636
 637attr_msg_full:
 638        nla_nest_cancel(msg->skb, attrs);
 639msg_full:
 640        genlmsg_cancel(msg->skb, hdr);
 641
 642        return -EMSGSIZE;
 643}
 644
 645int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 646{
 647        int err;
 648        int done = cb->args[0];
 649        int last_addr = cb->args[1];
 650        struct tipc_node *node;
 651        struct tipc_nl_msg msg;
 652
 653        if (done)
 654                return 0;
 655
 656        msg.skb = skb;
 657        msg.portid = NETLINK_CB(cb->skb).portid;
 658        msg.seq = cb->nlh->nlmsg_seq;
 659
 660        rcu_read_lock();
 661
 662        if (last_addr && !tipc_node_find(last_addr)) {
 663                rcu_read_unlock();
 664                /* We never set seq or call nl_dump_check_consistent() this
 665                 * means that setting prev_seq here will cause the consistence
 666                 * check to fail in the netlink callback handler. Resulting in
 667                 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
 668                 * the node state changed while we released the lock.
 669                 */
 670                cb->prev_seq = 1;
 671                return -EPIPE;
 672        }
 673
 674        list_for_each_entry_rcu(node, &tipc_node_list, list) {
 675                if (last_addr) {
 676                        if (node->addr == last_addr)
 677                                last_addr = 0;
 678                        else
 679                                continue;
 680                }
 681
 682                tipc_node_lock(node);
 683                err = __tipc_nl_add_node(&msg, node);
 684                if (err) {
 685                        last_addr = node->addr;
 686                        tipc_node_unlock(node);
 687                        goto out;
 688                }
 689
 690                tipc_node_unlock(node);
 691        }
 692        done = 1;
 693out:
 694        cb->args[0] = done;
 695        cb->args[1] = last_addr;
 696        rcu_read_unlock();
 697
 698        return skb->len;
 699}
 700