1/* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37#include "core.h" 38#include "config.h" 39#include "node.h" 40#include "cluster.h" 41#include "net.h" 42#include "addr.h" 43#include "node_subscr.h" 44#include "link.h" 45#include "port.h" 46#include "bearer.h" 47#include "name_distr.h" 48 49void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str); 50static void node_lost_contact(struct tipc_node *n_ptr); 51static void node_established_contact(struct tipc_node *n_ptr); 52 53struct tipc_node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 54 55static DEFINE_SPINLOCK(node_create_lock); 56 57u32 tipc_own_tag = 0; 58 59/** 60 * tipc_node_create - create neighboring node 61 * 62 * Currently, this routine is called by neighbor discovery code, which holds 63 * net_lock for reading only. We must take node_create_lock to ensure a node 64 * isn't created twice if two different bearers discover the node at the same 65 * time. (It would be preferable to switch to holding net_lock in write mode, 66 * but this is a non-trivial change.) 67 */ 68 69struct tipc_node *tipc_node_create(u32 addr) 70{ 71 struct cluster *c_ptr; 72 struct tipc_node *n_ptr; 73 struct tipc_node **curr_node; 74 75 spin_lock_bh(&node_create_lock); 76 77 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 78 if (addr < n_ptr->addr) 79 break; 80 if (addr == n_ptr->addr) { 81 spin_unlock_bh(&node_create_lock); 82 return n_ptr; 83 } 84 } 85 86 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 87 if (!n_ptr) { 88 spin_unlock_bh(&node_create_lock); 89 warn("Node creation failed, no memory\n"); 90 return NULL; 91 } 92 93 c_ptr = tipc_cltr_find(addr); 94 if (!c_ptr) { 95 c_ptr = tipc_cltr_create(addr); 96 } 97 if (!c_ptr) { 98 spin_unlock_bh(&node_create_lock); 99 kfree(n_ptr); 100 return NULL; 101 } 102 103 n_ptr->addr = addr; 104 spin_lock_init(&n_ptr->lock); 105 INIT_LIST_HEAD(&n_ptr->nsub); 106 n_ptr->owner = c_ptr; 107 tipc_cltr_attach_node(c_ptr, n_ptr); 108 n_ptr->last_router = -1; 109 110 /* Insert node into ordered list */ 111 for (curr_node = &tipc_nodes; *curr_node; 112 curr_node = &(*curr_node)->next) { 113 if (addr < (*curr_node)->addr) { 114 n_ptr->next = *curr_node; 115 break; 116 } 117 } 118 (*curr_node) = n_ptr; 119 spin_unlock_bh(&node_create_lock); 120 return n_ptr; 121} 122 123void tipc_node_delete(struct tipc_node *n_ptr) 124{ 125 if (!n_ptr) 126 return; 127 128 129 dbg("node %x deleted\n", n_ptr->addr); 130 kfree(n_ptr); 131} 132 133 134/** 135 * tipc_node_link_up - handle addition of link 136 * 137 * Link becomes active (alone or shared) or standby, depending on its priority. 138 */ 139 140void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) 141{ 142 struct link **active = &n_ptr->active_links[0]; 143 144 n_ptr->working_links++; 145 146 info("Established link <%s> on network plane %c\n", 147 l_ptr->name, l_ptr->b_ptr->net_plane); 148 149 if (!active[0]) { 150 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 151 active[0] = active[1] = l_ptr; 152 node_established_contact(n_ptr); 153 return; 154 } 155 if (l_ptr->priority < active[0]->priority) { 156 info("New link <%s> becomes standby\n", l_ptr->name); 157 return; 158 } 159 tipc_link_send_duplicate(active[0], l_ptr); 160 if (l_ptr->priority == active[0]->priority) { 161 active[0] = l_ptr; 162 return; 163 } 164 info("Old link <%s> becomes standby\n", active[0]->name); 165 if (active[1] != active[0]) 166 info("Old link <%s> becomes standby\n", active[1]->name); 167 active[0] = active[1] = l_ptr; 168} 169 170/** 171 * node_select_active_links - select active link 172 */ 173 174static void node_select_active_links(struct tipc_node *n_ptr) 175{ 176 struct link **active = &n_ptr->active_links[0]; 177 u32 i; 178 u32 highest_prio = 0; 179 180 active[0] = active[1] = NULL; 181 182 for (i = 0; i < MAX_BEARERS; i++) { 183 struct link *l_ptr = n_ptr->links[i]; 184 185 if (!l_ptr || !tipc_link_is_up(l_ptr) || 186 (l_ptr->priority < highest_prio)) 187 continue; 188 189 if (l_ptr->priority > highest_prio) { 190 highest_prio = l_ptr->priority; 191 active[0] = active[1] = l_ptr; 192 } else { 193 active[1] = l_ptr; 194 } 195 } 196} 197 198/** 199 * tipc_node_link_down - handle loss of link 200 */ 201 202void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) 203{ 204 struct link **active; 205 206 n_ptr->working_links--; 207 208 if (!tipc_link_is_active(l_ptr)) { 209 info("Lost standby link <%s> on network plane %c\n", 210 l_ptr->name, l_ptr->b_ptr->net_plane); 211 return; 212 } 213 info("Lost link <%s> on network plane %c\n", 214 l_ptr->name, l_ptr->b_ptr->net_plane); 215 216 active = &n_ptr->active_links[0]; 217 if (active[0] == l_ptr) 218 active[0] = active[1]; 219 if (active[1] == l_ptr) 220 active[1] = active[0]; 221 if (active[0] == l_ptr) 222 node_select_active_links(n_ptr); 223 if (tipc_node_is_up(n_ptr)) 224 tipc_link_changeover(l_ptr); 225 else 226 node_lost_contact(n_ptr); 227} 228 229int tipc_node_has_active_links(struct tipc_node *n_ptr) 230{ 231 return (n_ptr && 232 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 233} 234 235int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 236{ 237 return (n_ptr->working_links > 1); 238} 239 240static int tipc_node_has_active_routes(struct tipc_node *n_ptr) 241{ 242 return (n_ptr && (n_ptr->last_router >= 0)); 243} 244 245int tipc_node_is_up(struct tipc_node *n_ptr) 246{ 247 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr)); 248} 249 250struct tipc_node *tipc_node_attach_link(struct link *l_ptr) 251{ 252 struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr); 253 254 if (!n_ptr) 255 n_ptr = tipc_node_create(l_ptr->addr); 256 if (n_ptr) { 257 u32 bearer_id = l_ptr->b_ptr->identity; 258 char addr_string[16]; 259 260 if (n_ptr->link_cnt >= 2) { 261 err("Attempt to create third link to %s\n", 262 tipc_addr_string_fill(addr_string, n_ptr->addr)); 263 return NULL; 264 } 265 266 if (!n_ptr->links[bearer_id]) { 267 n_ptr->links[bearer_id] = l_ptr; 268 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++; 269 n_ptr->link_cnt++; 270 return n_ptr; 271 } 272 err("Attempt to establish second link on <%s> to %s\n", 273 l_ptr->b_ptr->publ.name, 274 tipc_addr_string_fill(addr_string, l_ptr->addr)); 275 } 276 return NULL; 277} 278 279void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) 280{ 281 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 282 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; 283 n_ptr->link_cnt--; 284} 285 286/* 287 * Routing table management - five cases to handle: 288 * 289 * 1: A link towards a zone/cluster external node comes up. 290 * => Send a multicast message updating routing tables of all 291 * system nodes within own cluster that the new destination 292 * can be reached via this node. 293 * (node.establishedContact()=>cluster.multicastNewRoute()) 294 * 295 * 2: A link towards a slave node comes up. 296 * => Send a multicast message updating routing tables of all 297 * system nodes within own cluster that the new destination 298 * can be reached via this node. 299 * (node.establishedContact()=>cluster.multicastNewRoute()) 300 * => Send a message to the slave node about existence 301 * of all system nodes within cluster: 302 * (node.establishedContact()=>cluster.sendLocalRoutes()) 303 * 304 * 3: A new cluster local system node becomes available. 305 * => Send message(s) to this particular node containing 306 * information about all cluster external and slave 307 * nodes which can be reached via this node. 308 * (node.establishedContact()==>network.sendExternalRoutes()) 309 * (node.establishedContact()==>network.sendSlaveRoutes()) 310 * => Send messages to all directly connected slave nodes 311 * containing information about the existence of the new node 312 * (node.establishedContact()=>cluster.multicastNewRoute()) 313 * 314 * 4: The link towards a zone/cluster external node or slave 315 * node goes down. 316 * => Send a multcast message updating routing tables of all 317 * nodes within cluster that the new destination can not any 318 * longer be reached via this node. 319 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 320 * 321 * 5: A cluster local system node becomes unavailable. 322 * => Remove all references to this node from the local 323 * routing tables. Note: This is a completely node 324 * local operation. 325 * (node.lostAllLinks()=>network.removeAsRouter()) 326 * => Send messages to all directly connected slave nodes 327 * containing information about loss of the node 328 * (node.establishedContact()=>cluster.multicastLostRoute()) 329 * 330 */ 331 332static void node_established_contact(struct tipc_node *n_ptr) 333{ 334 struct cluster *c_ptr; 335 336 dbg("node_established_contact:-> %x\n", n_ptr->addr); 337 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) { 338 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 339 } 340 341 /* Syncronize broadcast acks */ 342 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 343 344 if (is_slave(tipc_own_addr)) 345 return; 346 if (!in_own_cluster(n_ptr->addr)) { 347 /* Usage case 1 (see above) */ 348 c_ptr = tipc_cltr_find(tipc_own_addr); 349 if (!c_ptr) 350 c_ptr = tipc_cltr_create(tipc_own_addr); 351 if (c_ptr) 352 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 353 tipc_max_nodes); 354 return; 355 } 356 357 c_ptr = n_ptr->owner; 358 if (is_slave(n_ptr->addr)) { 359 /* Usage case 2 (see above) */ 360 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 361 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr); 362 return; 363 } 364 365 if (n_ptr->bclink.supported) { 366 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr); 367 if (n_ptr->addr < tipc_own_addr) 368 tipc_own_tag++; 369 } 370 371 /* Case 3 (see above) */ 372 tipc_net_send_external_routes(n_ptr->addr); 373 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr); 374 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 375 tipc_highest_allowed_slave); 376} 377 378static void node_lost_contact(struct tipc_node *n_ptr) 379{ 380 struct cluster *c_ptr; 381 struct tipc_node_subscr *ns, *tns; 382 char addr_string[16]; 383 u32 i; 384 385 /* Clean up broadcast reception remains */ 386 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 387 while (n_ptr->bclink.deferred_head) { 388 struct sk_buff* buf = n_ptr->bclink.deferred_head; 389 n_ptr->bclink.deferred_head = buf->next; 390 buf_discard(buf); 391 } 392 if (n_ptr->bclink.defragm) { 393 buf_discard(n_ptr->bclink.defragm); 394 n_ptr->bclink.defragm = NULL; 395 } 396 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 397 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 398 } 399 400 /* Update routing tables */ 401 if (is_slave(tipc_own_addr)) { 402 tipc_net_remove_as_router(n_ptr->addr); 403 } else { 404 if (!in_own_cluster(n_ptr->addr)) { 405 /* Case 4 (see above) */ 406 c_ptr = tipc_cltr_find(tipc_own_addr); 407 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 408 tipc_max_nodes); 409 } else { 410 /* Case 5 (see above) */ 411 c_ptr = tipc_cltr_find(n_ptr->addr); 412 if (is_slave(n_ptr->addr)) { 413 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 414 tipc_max_nodes); 415 } else { 416 if (n_ptr->bclink.supported) { 417 tipc_nmap_remove(&tipc_cltr_bcast_nodes, 418 n_ptr->addr); 419 if (n_ptr->addr < tipc_own_addr) 420 tipc_own_tag--; 421 } 422 tipc_net_remove_as_router(n_ptr->addr); 423 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 424 LOWEST_SLAVE, 425 tipc_highest_allowed_slave); 426 } 427 } 428 } 429 if (tipc_node_has_active_routes(n_ptr)) 430 return; 431 432 info("Lost contact with %s\n", 433 tipc_addr_string_fill(addr_string, n_ptr->addr)); 434 435 /* Abort link changeover */ 436 for (i = 0; i < MAX_BEARERS; i++) { 437 struct link *l_ptr = n_ptr->links[i]; 438 if (!l_ptr) 439 continue; 440 l_ptr->reset_checkpoint = l_ptr->next_in_no; 441 l_ptr->exp_msg_count = 0; 442 tipc_link_reset_fragments(l_ptr); 443 } 444 445 /* Notify subscribers */ 446 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 447 ns->node = NULL; 448 list_del_init(&ns->nodesub_list); 449 tipc_k_signal((Handler)ns->handle_node_down, 450 (unsigned long)ns->usr_handle); 451 } 452} 453 454/** 455 * tipc_node_select_next_hop - find the next-hop node for a message 456 * 457 * Called by when cluster local lookup has failed. 458 */ 459 460struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector) 461{ 462 struct tipc_node *n_ptr; 463 u32 router_addr; 464 465 if (!tipc_addr_domain_valid(addr)) 466 return NULL; 467 468 /* Look for direct link to destination processsor */ 469 n_ptr = tipc_node_find(addr); 470 if (n_ptr && tipc_node_has_active_links(n_ptr)) 471 return n_ptr; 472 473 /* Cluster local system nodes *must* have direct links */ 474 if (!is_slave(addr) && in_own_cluster(addr)) 475 return NULL; 476 477 /* Look for cluster local router with direct link to node */ 478 router_addr = tipc_node_select_router(n_ptr, selector); 479 if (router_addr) 480 return tipc_node_select(router_addr, selector); 481 482 /* Slave nodes can only be accessed within own cluster via a 483 known router with direct link -- if no router was found,give up */ 484 if (is_slave(addr)) 485 return NULL; 486 487 /* Inter zone/cluster -- find any direct link to remote cluster */ 488 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 489 n_ptr = tipc_net_select_remote_node(addr, selector); 490 if (n_ptr && tipc_node_has_active_links(n_ptr)) 491 return n_ptr; 492 493 /* Last resort -- look for any router to anywhere in remote zone */ 494 router_addr = tipc_net_select_router(addr, selector); 495 if (router_addr) 496 return tipc_node_select(router_addr, selector); 497 498 return NULL; 499} 500 501/** 502 * tipc_node_select_router - select router to reach specified node 503 * 504 * Uses a deterministic and fair algorithm for selecting router node. 505 */ 506 507u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref) 508{ 509 u32 ulim; 510 u32 mask; 511 u32 start; 512 u32 r; 513 514 if (!n_ptr) 515 return 0; 516 517 if (n_ptr->last_router < 0) 518 return 0; 519 ulim = ((n_ptr->last_router + 1) * 32) - 1; 520 521 /* Start entry must be random */ 522 mask = tipc_max_nodes; 523 while (mask > ulim) 524 mask >>= 1; 525 start = ref & mask; 526 r = start; 527 528 /* Lookup upwards with wrap-around */ 529 do { 530 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 531 break; 532 } while (++r <= ulim); 533 if (r > ulim) { 534 r = 1; 535 do { 536 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 537 break; 538 } while (++r < start); 539 assert(r != start); 540 } 541 assert(r && (r <= ulim)); 542 return tipc_addr(own_zone(), own_cluster(), r); 543} 544 545void tipc_node_add_router(struct tipc_node *n_ptr, u32 router) 546{ 547 u32 r_num = tipc_node(router); 548 549 n_ptr->routers[r_num / 32] = 550 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); 551 n_ptr->last_router = tipc_max_nodes / 32; 552 while ((--n_ptr->last_router >= 0) && 553 !n_ptr->routers[n_ptr->last_router]); 554} 555 556void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router) 557{ 558 u32 r_num = tipc_node(router); 559 560 if (n_ptr->last_router < 0) 561 return; /* No routes */ 562 563 n_ptr->routers[r_num / 32] = 564 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); 565 n_ptr->last_router = tipc_max_nodes / 32; 566 while ((--n_ptr->last_router >= 0) && 567 !n_ptr->routers[n_ptr->last_router]); 568 569 if (!tipc_node_is_up(n_ptr)) 570 node_lost_contact(n_ptr); 571} 572 573 574u32 tipc_available_nodes(const u32 domain) 575{ 576 struct tipc_node *n_ptr; 577 u32 cnt = 0; 578 579 read_lock_bh(&tipc_net_lock); 580 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 581 if (!tipc_in_scope(domain, n_ptr->addr)) 582 continue; 583 if (tipc_node_is_up(n_ptr)) 584 cnt++; 585 } 586 read_unlock_bh(&tipc_net_lock); 587 return cnt; 588} 589 590struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 591{ 592 u32 domain; 593 struct sk_buff *buf; 594 struct tipc_node *n_ptr; 595 struct tipc_node_info node_info; 596 u32 payload_size; 597 598 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 599 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 600 601 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 602 if (!tipc_addr_domain_valid(domain)) 603 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 604 " (network address)"); 605 606 read_lock_bh(&tipc_net_lock); 607 if (!tipc_nodes) { 608 read_unlock_bh(&tipc_net_lock); 609 return tipc_cfg_reply_none(); 610 } 611 612 /* For now, get space for all other nodes 613 (will need to modify this when slave nodes are supported */ 614 615 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 616 if (payload_size > 32768u) { 617 read_unlock_bh(&tipc_net_lock); 618 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 619 " (too many nodes)"); 620 } 621 buf = tipc_cfg_reply_alloc(payload_size); 622 if (!buf) { 623 read_unlock_bh(&tipc_net_lock); 624 return NULL; 625 } 626 627 /* Add TLVs for all nodes in scope */ 628 629 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 630 if (!tipc_in_scope(domain, n_ptr->addr)) 631 continue; 632 node_info.addr = htonl(n_ptr->addr); 633 node_info.up = htonl(tipc_node_is_up(n_ptr)); 634 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 635 &node_info, sizeof(node_info)); 636 } 637 638 read_unlock_bh(&tipc_net_lock); 639 return buf; 640} 641 642struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 643{ 644 u32 domain; 645 struct sk_buff *buf; 646 struct tipc_node *n_ptr; 647 struct tipc_link_info link_info; 648 u32 payload_size; 649 650 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 651 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 652 653 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 654 if (!tipc_addr_domain_valid(domain)) 655 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 656 " (network address)"); 657 658 if (tipc_mode != TIPC_NET_MODE) 659 return tipc_cfg_reply_none(); 660 661 read_lock_bh(&tipc_net_lock); 662 663 /* Get space for all unicast links + multicast link */ 664 665 payload_size = TLV_SPACE(sizeof(link_info)) * 666 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 667 if (payload_size > 32768u) { 668 read_unlock_bh(&tipc_net_lock); 669 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 670 " (too many links)"); 671 } 672 buf = tipc_cfg_reply_alloc(payload_size); 673 if (!buf) { 674 read_unlock_bh(&tipc_net_lock); 675 return NULL; 676 } 677 678 /* Add TLV for broadcast link */ 679 680 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 681 link_info.up = htonl(1); 682 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 683 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 684 685 /* Add TLVs for any other links in scope */ 686 687 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 688 u32 i; 689 690 if (!tipc_in_scope(domain, n_ptr->addr)) 691 continue; 692 tipc_node_lock(n_ptr); 693 for (i = 0; i < MAX_BEARERS; i++) { 694 if (!n_ptr->links[i]) 695 continue; 696 link_info.dest = htonl(n_ptr->addr); 697 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 698 strcpy(link_info.str, n_ptr->links[i]->name); 699 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 700 &link_info, sizeof(link_info)); 701 } 702 tipc_node_unlock(n_ptr); 703 } 704 705 read_unlock_bh(&tipc_net_lock); 706 return buf; 707} 708