1/* 2 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * operating system. DECnet is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * DECnet Routing Functions (Endnode and Router) 7 * 8 * Authors: Steve Whitehouse <SteveW@ACM.org> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 10 * 11 * Changes: 12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 13 * "return-to-sender" bits on outgoing 14 * packets. 15 * Steve Whitehouse : Timeouts for cached routes. 16 * Steve Whitehouse : Use dst cache for input routes too. 17 * Steve Whitehouse : Fixed error values in dn_send_skb. 18 * Steve Whitehouse : Rework routing functions to better fit 19 * DECnet routing design 20 * Alexey Kuznetsov : New SMP locking 21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 23 * Fixed possible skb leak in rtnetlink funcs. 24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 25 * Alexey Kuznetsov's finer grained locking 26 * from ipv4/route.c. 27 * Steve Whitehouse : Routing is now starting to look like a 28 * sensible set of code now, mainly due to 29 * my copying the IPv4 routing code. The 30 * hooks here are modified and will continue 31 * to evolve for a while. 32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 33 * stuff. Look out raw sockets your days 34 * are numbered! 35 * Steve Whitehouse : Added return-to-sender functions. Added 36 * backlog congestion level return codes. 37 * Steve Whitehouse : Fixed bug where routes were set up with 38 * no ref count on net devices. 39 * Steve Whitehouse : RCU for the route cache 40 * Steve Whitehouse : Preparations for the flow cache 41 * Steve Whitehouse : Prepare for nonlinear skbs 42 */ 43 44/****************************************************************************** 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 46 47 This program is free software; you can redistribute it and/or modify 48 it under the terms of the GNU General Public License as published by 49 the Free Software Foundation; either version 2 of the License, or 50 any later version. 51 52 This program is distributed in the hope that it will be useful, 53 but WITHOUT ANY WARRANTY; without even the implied warranty of 54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 55 GNU General Public License for more details. 56*******************************************************************************/ 57 58#include <linux/errno.h> 59#include <linux/types.h> 60#include <linux/socket.h> 61#include <linux/in.h> 62#include <linux/kernel.h> 63#include <linux/sockios.h> 64#include <linux/net.h> 65#include <linux/netdevice.h> 66#include <linux/inet.h> 67#include <linux/route.h> 68#include <linux/in_route.h> 69#include <linux/slab.h> 70#include <net/sock.h> 71#include <linux/mm.h> 72#include <linux/proc_fs.h> 73#include <linux/seq_file.h> 74#include <linux/init.h> 75#include <linux/rtnetlink.h> 76#include <linux/string.h> 77#include <linux/netfilter_decnet.h> 78#include <linux/rcupdate.h> 79#include <linux/times.h> 80#include <asm/errno.h> 81#include <net/net_namespace.h> 82#include <net/netlink.h> 83#include <net/neighbour.h> 84#include <net/dst.h> 85#include <net/flow.h> 86#include <net/fib_rules.h> 87#include <net/dn.h> 88#include <net/dn_dev.h> 89#include <net/dn_nsp.h> 90#include <net/dn_route.h> 91#include <net/dn_neigh.h> 92#include <net/dn_fib.h> 93 94struct dn_rt_hash_bucket 95{ 96 struct dn_route *chain; 97 spinlock_t lock; 98}; 99 100extern struct neigh_table dn_neigh_table; 101 102 103static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 104 105static const int dn_rt_min_delay = 2 * HZ; 106static const int dn_rt_max_delay = 10 * HZ; 107static const int dn_rt_mtu_expires = 10 * 60 * HZ; 108 109static unsigned long dn_rt_deadline; 110 111static int dn_dst_gc(struct dst_ops *ops); 112static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 113static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 114static void dn_dst_link_failure(struct sk_buff *); 115static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 116static int dn_route_input(struct sk_buff *); 117static void dn_run_flush(unsigned long dummy); 118 119static struct dn_rt_hash_bucket *dn_rt_hash_table; 120static unsigned dn_rt_hash_mask; 121 122static struct timer_list dn_route_timer; 123static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 124int decnet_dst_gc_interval = 2; 125 126static struct dst_ops dn_dst_ops = { 127 .family = PF_DECnet, 128 .protocol = cpu_to_be16(ETH_P_DNA_RT), 129 .gc_thresh = 128, 130 .gc = dn_dst_gc, 131 .check = dn_dst_check, 132 .negative_advice = dn_dst_negative_advice, 133 .link_failure = dn_dst_link_failure, 134 .update_pmtu = dn_dst_update_pmtu, 135 .entries = ATOMIC_INIT(0), 136}; 137 138static __inline__ unsigned dn_hash(__le16 src, __le16 dst) 139{ 140 __u16 tmp = (__u16 __force)(src ^ dst); 141 tmp ^= (tmp >> 3); 142 tmp ^= (tmp >> 5); 143 tmp ^= (tmp >> 10); 144 return dn_rt_hash_mask & (unsigned)tmp; 145} 146 147static inline void dnrt_free(struct dn_route *rt) 148{ 149 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 150} 151 152static inline void dnrt_drop(struct dn_route *rt) 153{ 154 dst_release(&rt->dst); 155 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 156} 157 158static void dn_dst_check_expire(unsigned long dummy) 159{ 160 int i; 161 struct dn_route *rt, **rtp; 162 unsigned long now = jiffies; 163 unsigned long expire = 120 * HZ; 164 165 for(i = 0; i <= dn_rt_hash_mask; i++) { 166 rtp = &dn_rt_hash_table[i].chain; 167 168 spin_lock(&dn_rt_hash_table[i].lock); 169 while((rt=*rtp) != NULL) { 170 if (atomic_read(&rt->dst.__refcnt) || 171 (now - rt->dst.lastuse) < expire) { 172 rtp = &rt->dst.dn_next; 173 continue; 174 } 175 *rtp = rt->dst.dn_next; 176 rt->dst.dn_next = NULL; 177 dnrt_free(rt); 178 } 179 spin_unlock(&dn_rt_hash_table[i].lock); 180 181 if ((jiffies - now) > 0) 182 break; 183 } 184 185 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 186} 187 188static int dn_dst_gc(struct dst_ops *ops) 189{ 190 struct dn_route *rt, **rtp; 191 int i; 192 unsigned long now = jiffies; 193 unsigned long expire = 10 * HZ; 194 195 for(i = 0; i <= dn_rt_hash_mask; i++) { 196 197 spin_lock_bh(&dn_rt_hash_table[i].lock); 198 rtp = &dn_rt_hash_table[i].chain; 199 200 while((rt=*rtp) != NULL) { 201 if (atomic_read(&rt->dst.__refcnt) || 202 (now - rt->dst.lastuse) < expire) { 203 rtp = &rt->dst.dn_next; 204 continue; 205 } 206 *rtp = rt->dst.dn_next; 207 rt->dst.dn_next = NULL; 208 dnrt_drop(rt); 209 break; 210 } 211 spin_unlock_bh(&dn_rt_hash_table[i].lock); 212 } 213 214 return 0; 215} 216 217/* 218 * The decnet standards don't impose a particular minimum mtu, what they 219 * do insist on is that the routing layer accepts a datagram of at least 220 * 230 bytes long. Here we have to subtract the routing header length from 221 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 222 * assume the worst and use a long header size. 223 * 224 * We update both the mtu and the advertised mss (i.e. the segment size we 225 * advertise to the other end). 226 */ 227static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 228{ 229 u32 min_mtu = 230; 230 struct dn_dev *dn = dst->neighbour ? 231 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 232 233 if (dn && dn->use_long == 0) 234 min_mtu -= 6; 235 else 236 min_mtu -= 21; 237 238 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { 239 if (!(dst_metric_locked(dst, RTAX_MTU))) { 240 dst->metrics[RTAX_MTU-1] = mtu; 241 dst_set_expires(dst, dn_rt_mtu_expires); 242 } 243 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 244 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 245 if (dst_metric(dst, RTAX_ADVMSS) > mss) 246 dst->metrics[RTAX_ADVMSS-1] = mss; 247 } 248 } 249} 250 251/* 252 * When a route has been marked obsolete. (e.g. routing cache flush) 253 */ 254static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 255{ 256 return NULL; 257} 258 259static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 260{ 261 dst_release(dst); 262 return NULL; 263} 264 265static void dn_dst_link_failure(struct sk_buff *skb) 266{ 267} 268 269static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 270{ 271 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 272 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 273 (fl1->mark ^ fl2->mark) | 274 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 275 (fl1->oif ^ fl2->oif) | 276 (fl1->iif ^ fl2->iif)) == 0; 277} 278 279static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 280{ 281 struct dn_route *rth, **rthp; 282 unsigned long now = jiffies; 283 284 rthp = &dn_rt_hash_table[hash].chain; 285 286 spin_lock_bh(&dn_rt_hash_table[hash].lock); 287 while((rth = *rthp) != NULL) { 288 if (compare_keys(&rth->fl, &rt->fl)) { 289 /* Put it first */ 290 *rthp = rth->dst.dn_next; 291 rcu_assign_pointer(rth->dst.dn_next, 292 dn_rt_hash_table[hash].chain); 293 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 294 295 dst_use(&rth->dst, now); 296 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 297 298 dnrt_drop(rt); 299 *rp = rth; 300 return 0; 301 } 302 rthp = &rth->dst.dn_next; 303 } 304 305 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); 306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 307 308 dst_use(&rt->dst, now); 309 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 310 *rp = rt; 311 return 0; 312} 313 314static void dn_run_flush(unsigned long dummy) 315{ 316 int i; 317 struct dn_route *rt, *next; 318 319 for(i = 0; i < dn_rt_hash_mask; i++) { 320 spin_lock_bh(&dn_rt_hash_table[i].lock); 321 322 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 323 goto nothing_to_declare; 324 325 for(; rt; rt=next) { 326 next = rt->dst.dn_next; 327 rt->dst.dn_next = NULL; 328 dst_free((struct dst_entry *)rt); 329 } 330 331nothing_to_declare: 332 spin_unlock_bh(&dn_rt_hash_table[i].lock); 333 } 334} 335 336static DEFINE_SPINLOCK(dn_rt_flush_lock); 337 338void dn_rt_cache_flush(int delay) 339{ 340 unsigned long now = jiffies; 341 int user_mode = !in_interrupt(); 342 343 if (delay < 0) 344 delay = dn_rt_min_delay; 345 346 spin_lock_bh(&dn_rt_flush_lock); 347 348 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 349 long tmo = (long)(dn_rt_deadline - now); 350 351 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 352 tmo = 0; 353 354 if (delay > tmo) 355 delay = tmo; 356 } 357 358 if (delay <= 0) { 359 spin_unlock_bh(&dn_rt_flush_lock); 360 dn_run_flush(0); 361 return; 362 } 363 364 if (dn_rt_deadline == 0) 365 dn_rt_deadline = now + dn_rt_max_delay; 366 367 dn_rt_flush_timer.expires = now + delay; 368 add_timer(&dn_rt_flush_timer); 369 spin_unlock_bh(&dn_rt_flush_lock); 370} 371 372/** 373 * dn_return_short - Return a short packet to its sender 374 * @skb: The packet to return 375 * 376 */ 377static int dn_return_short(struct sk_buff *skb) 378{ 379 struct dn_skb_cb *cb; 380 unsigned char *ptr; 381 __le16 *src; 382 __le16 *dst; 383 384 /* Add back headers */ 385 skb_push(skb, skb->data - skb_network_header(skb)); 386 387 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 388 return NET_RX_DROP; 389 390 cb = DN_SKB_CB(skb); 391 /* Skip packet length and point to flags */ 392 ptr = skb->data + 2; 393 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 394 395 dst = (__le16 *)ptr; 396 ptr += 2; 397 src = (__le16 *)ptr; 398 ptr += 2; 399 *ptr = 0; /* Zero hop count */ 400 401 swap(*src, *dst); 402 403 skb->pkt_type = PACKET_OUTGOING; 404 dn_rt_finish_output(skb, NULL, NULL); 405 return NET_RX_SUCCESS; 406} 407 408/** 409 * dn_return_long - Return a long packet to its sender 410 * @skb: The long format packet to return 411 * 412 */ 413static int dn_return_long(struct sk_buff *skb) 414{ 415 struct dn_skb_cb *cb; 416 unsigned char *ptr; 417 unsigned char *src_addr, *dst_addr; 418 unsigned char tmp[ETH_ALEN]; 419 420 /* Add back all headers */ 421 skb_push(skb, skb->data - skb_network_header(skb)); 422 423 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 424 return NET_RX_DROP; 425 426 cb = DN_SKB_CB(skb); 427 /* Ignore packet length and point to flags */ 428 ptr = skb->data + 2; 429 430 /* Skip padding */ 431 if (*ptr & DN_RT_F_PF) { 432 char padlen = (*ptr & ~DN_RT_F_PF); 433 ptr += padlen; 434 } 435 436 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 437 ptr += 2; 438 dst_addr = ptr; 439 ptr += 8; 440 src_addr = ptr; 441 ptr += 6; 442 *ptr = 0; /* Zero hop count */ 443 444 /* Swap source and destination */ 445 memcpy(tmp, src_addr, ETH_ALEN); 446 memcpy(src_addr, dst_addr, ETH_ALEN); 447 memcpy(dst_addr, tmp, ETH_ALEN); 448 449 skb->pkt_type = PACKET_OUTGOING; 450 dn_rt_finish_output(skb, dst_addr, src_addr); 451 return NET_RX_SUCCESS; 452} 453 454/** 455 * dn_route_rx_packet - Try and find a route for an incoming packet 456 * @skb: The packet to find a route for 457 * 458 * Returns: result of input function if route is found, error code otherwise 459 */ 460static int dn_route_rx_packet(struct sk_buff *skb) 461{ 462 struct dn_skb_cb *cb = DN_SKB_CB(skb); 463 int err; 464 465 if ((err = dn_route_input(skb)) == 0) 466 return dst_input(skb); 467 468 if (decnet_debug_level & 4) { 469 char *devname = skb->dev ? skb->dev->name : "???"; 470 struct dn_skb_cb *cb = DN_SKB_CB(skb); 471 printk(KERN_DEBUG 472 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 473 (int)cb->rt_flags, devname, skb->len, 474 le16_to_cpu(cb->src), le16_to_cpu(cb->dst), 475 err, skb->pkt_type); 476 } 477 478 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 479 switch(cb->rt_flags & DN_RT_PKT_MSK) { 480 case DN_RT_PKT_SHORT: 481 return dn_return_short(skb); 482 case DN_RT_PKT_LONG: 483 return dn_return_long(skb); 484 } 485 } 486 487 kfree_skb(skb); 488 return NET_RX_DROP; 489} 490 491static int dn_route_rx_long(struct sk_buff *skb) 492{ 493 struct dn_skb_cb *cb = DN_SKB_CB(skb); 494 unsigned char *ptr = skb->data; 495 496 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 497 goto drop_it; 498 499 skb_pull(skb, 20); 500 skb_reset_transport_header(skb); 501 502 /* Destination info */ 503 ptr += 2; 504 cb->dst = dn_eth2dn(ptr); 505 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 506 goto drop_it; 507 ptr += 6; 508 509 510 /* Source info */ 511 ptr += 2; 512 cb->src = dn_eth2dn(ptr); 513 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 514 goto drop_it; 515 ptr += 6; 516 /* Other junk */ 517 ptr++; 518 cb->hops = *ptr++; /* Visit Count */ 519 520 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, 521 dn_route_rx_packet); 522 523drop_it: 524 kfree_skb(skb); 525 return NET_RX_DROP; 526} 527 528 529 530static int dn_route_rx_short(struct sk_buff *skb) 531{ 532 struct dn_skb_cb *cb = DN_SKB_CB(skb); 533 unsigned char *ptr = skb->data; 534 535 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 536 goto drop_it; 537 538 skb_pull(skb, 5); 539 skb_reset_transport_header(skb); 540 541 cb->dst = *(__le16 *)ptr; 542 ptr += 2; 543 cb->src = *(__le16 *)ptr; 544 ptr += 2; 545 cb->hops = *ptr & 0x3f; 546 547 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, 548 dn_route_rx_packet); 549 550drop_it: 551 kfree_skb(skb); 552 return NET_RX_DROP; 553} 554 555static int dn_route_discard(struct sk_buff *skb) 556{ 557 /* 558 * I know we drop the packet here, but thats considered success in 559 * this case 560 */ 561 kfree_skb(skb); 562 return NET_RX_SUCCESS; 563} 564 565static int dn_route_ptp_hello(struct sk_buff *skb) 566{ 567 dn_dev_hello(skb); 568 dn_neigh_pointopoint_hello(skb); 569 return NET_RX_SUCCESS; 570} 571 572int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 573{ 574 struct dn_skb_cb *cb; 575 unsigned char flags = 0; 576 __u16 len = le16_to_cpu(*(__le16 *)skb->data); 577 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 578 unsigned char padlen = 0; 579 580 if (!net_eq(dev_net(dev), &init_net)) 581 goto dump_it; 582 583 if (dn == NULL) 584 goto dump_it; 585 586 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 587 goto out; 588 589 if (!pskb_may_pull(skb, 3)) 590 goto dump_it; 591 592 skb_pull(skb, 2); 593 594 if (len > skb->len) 595 goto dump_it; 596 597 skb_trim(skb, len); 598 599 flags = *skb->data; 600 601 cb = DN_SKB_CB(skb); 602 cb->stamp = jiffies; 603 cb->iif = dev->ifindex; 604 605 /* 606 * If we have padding, remove it. 607 */ 608 if (flags & DN_RT_F_PF) { 609 padlen = flags & ~DN_RT_F_PF; 610 if (!pskb_may_pull(skb, padlen + 1)) 611 goto dump_it; 612 skb_pull(skb, padlen); 613 flags = *skb->data; 614 } 615 616 skb_reset_network_header(skb); 617 618 /* 619 * Weed out future version DECnet 620 */ 621 if (flags & DN_RT_F_VER) 622 goto dump_it; 623 624 cb->rt_flags = flags; 625 626 if (decnet_debug_level & 1) 627 printk(KERN_DEBUG 628 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 629 (int)flags, (dev) ? dev->name : "???", len, skb->len, 630 padlen); 631 632 if (flags & DN_RT_PKT_CNTL) { 633 if (unlikely(skb_linearize(skb))) 634 goto dump_it; 635 636 switch(flags & DN_RT_CNTL_MSK) { 637 case DN_RT_PKT_INIT: 638 dn_dev_init_pkt(skb); 639 break; 640 case DN_RT_PKT_VERI: 641 dn_dev_veri_pkt(skb); 642 break; 643 } 644 645 if (dn->parms.state != DN_DEV_S_RU) 646 goto dump_it; 647 648 switch(flags & DN_RT_CNTL_MSK) { 649 case DN_RT_PKT_HELO: 650 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 651 skb, skb->dev, NULL, 652 dn_route_ptp_hello); 653 654 case DN_RT_PKT_L1RT: 655 case DN_RT_PKT_L2RT: 656 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, 657 skb, skb->dev, NULL, 658 dn_route_discard); 659 case DN_RT_PKT_ERTH: 660 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 661 skb, skb->dev, NULL, 662 dn_neigh_router_hello); 663 664 case DN_RT_PKT_EEDH: 665 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 666 skb, skb->dev, NULL, 667 dn_neigh_endnode_hello); 668 } 669 } else { 670 if (dn->parms.state != DN_DEV_S_RU) 671 goto dump_it; 672 673 skb_pull(skb, 1); /* Pull flags */ 674 675 switch(flags & DN_RT_PKT_MSK) { 676 case DN_RT_PKT_LONG: 677 return dn_route_rx_long(skb); 678 case DN_RT_PKT_SHORT: 679 return dn_route_rx_short(skb); 680 } 681 } 682 683dump_it: 684 kfree_skb(skb); 685out: 686 return NET_RX_DROP; 687} 688 689static int dn_output(struct sk_buff *skb) 690{ 691 struct dst_entry *dst = skb_dst(skb); 692 struct dn_route *rt = (struct dn_route *)dst; 693 struct net_device *dev = dst->dev; 694 struct dn_skb_cb *cb = DN_SKB_CB(skb); 695 struct neighbour *neigh; 696 697 int err = -EINVAL; 698 699 if ((neigh = dst->neighbour) == NULL) 700 goto error; 701 702 skb->dev = dev; 703 704 cb->src = rt->rt_saddr; 705 cb->dst = rt->rt_daddr; 706 707 /* 708 * Always set the Intra-Ethernet bit on all outgoing packets 709 * originated on this node. Only valid flag from upper layers 710 * is return-to-sender-requested. Set hop count to 0 too. 711 */ 712 cb->rt_flags &= ~DN_RT_F_RQR; 713 cb->rt_flags |= DN_RT_F_IE; 714 cb->hops = 0; 715 716 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, 717 neigh->output); 718 719error: 720 if (net_ratelimit()) 721 printk(KERN_DEBUG "dn_output: This should not happen\n"); 722 723 kfree_skb(skb); 724 725 return err; 726} 727 728static int dn_forward(struct sk_buff *skb) 729{ 730 struct dn_skb_cb *cb = DN_SKB_CB(skb); 731 struct dst_entry *dst = skb_dst(skb); 732 struct dn_dev *dn_db = dst->dev->dn_ptr; 733 struct dn_route *rt; 734 struct neighbour *neigh = dst->neighbour; 735 int header_len; 736#ifdef CONFIG_NETFILTER 737 struct net_device *dev = skb->dev; 738#endif 739 740 if (skb->pkt_type != PACKET_HOST) 741 goto drop; 742 743 /* Ensure that we have enough space for headers */ 744 rt = (struct dn_route *)skb_dst(skb); 745 header_len = dn_db->use_long ? 21 : 6; 746 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) 747 goto drop; 748 749 /* 750 * Hop count exceeded. 751 */ 752 if (++cb->hops > 30) 753 goto drop; 754 755 skb->dev = rt->dst.dev; 756 757 /* 758 * If packet goes out same interface it came in on, then set 759 * the Intra-Ethernet bit. This has no effect for short 760 * packets, so we don't need to test for them here. 761 */ 762 cb->rt_flags &= ~DN_RT_F_IE; 763 if (rt->rt_flags & RTCF_DOREDIRECT) 764 cb->rt_flags |= DN_RT_F_IE; 765 766 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, 767 neigh->output); 768 769drop: 770 kfree_skb(skb); 771 return NET_RX_DROP; 772} 773 774/* 775 * Used to catch bugs. This should never normally get 776 * called. 777 */ 778static int dn_rt_bug(struct sk_buff *skb) 779{ 780 if (net_ratelimit()) { 781 struct dn_skb_cb *cb = DN_SKB_CB(skb); 782 783 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 784 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); 785 } 786 787 kfree_skb(skb); 788 789 return NET_RX_DROP; 790} 791 792static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 793{ 794 struct dn_fib_info *fi = res->fi; 795 struct net_device *dev = rt->dst.dev; 796 struct neighbour *n; 797 unsigned mss; 798 799 if (fi) { 800 if (DN_FIB_RES_GW(*res) && 801 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 802 rt->rt_gateway = DN_FIB_RES_GW(*res); 803 memcpy(rt->dst.metrics, fi->fib_metrics, 804 sizeof(rt->dst.metrics)); 805 } 806 rt->rt_type = res->type; 807 808 if (dev != NULL && rt->dst.neighbour == NULL) { 809 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 810 if (IS_ERR(n)) 811 return PTR_ERR(n); 812 rt->dst.neighbour = n; 813 } 814 815 if (dst_metric(&rt->dst, RTAX_MTU) == 0 || 816 dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 817 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; 818 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); 819 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 || 820 dst_metric(&rt->dst, RTAX_ADVMSS) > mss) 821 rt->dst.metrics[RTAX_ADVMSS-1] = mss; 822 return 0; 823} 824 825static inline int dn_match_addr(__le16 addr1, __le16 addr2) 826{ 827 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2); 828 int match = 16; 829 while(tmp) { 830 tmp >>= 1; 831 match--; 832 } 833 return match; 834} 835 836static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 837{ 838 __le16 saddr = 0; 839 struct dn_dev *dn_db = dev->dn_ptr; 840 struct dn_ifaddr *ifa; 841 int best_match = 0; 842 int ret; 843 844 read_lock(&dev_base_lock); 845 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 846 if (ifa->ifa_scope > scope) 847 continue; 848 if (!daddr) { 849 saddr = ifa->ifa_local; 850 break; 851 } 852 ret = dn_match_addr(daddr, ifa->ifa_local); 853 if (ret > best_match) 854 saddr = ifa->ifa_local; 855 if (best_match == 0) 856 saddr = ifa->ifa_local; 857 } 858 read_unlock(&dev_base_lock); 859 860 return saddr; 861} 862 863static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 864{ 865 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 866} 867 868static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) 869{ 870 __le16 mask = dnet_make_mask(res->prefixlen); 871 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 872} 873 874static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 875{ 876 struct flowi fl = { .nl_u = { .dn_u = 877 { .daddr = oldflp->fld_dst, 878 .saddr = oldflp->fld_src, 879 .scope = RT_SCOPE_UNIVERSE, 880 } }, 881 .mark = oldflp->mark, 882 .iif = init_net.loopback_dev->ifindex, 883 .oif = oldflp->oif }; 884 struct dn_route *rt = NULL; 885 struct net_device *dev_out = NULL, *dev; 886 struct neighbour *neigh = NULL; 887 unsigned hash; 888 unsigned flags = 0; 889 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 890 int err; 891 int free_res = 0; 892 __le16 gateway = 0; 893 894 if (decnet_debug_level & 16) 895 printk(KERN_DEBUG 896 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 897 " iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst), 898 le16_to_cpu(oldflp->fld_src), 899 oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif); 900 901 /* If we have an output interface, verify its a DECnet device */ 902 if (oldflp->oif) { 903 dev_out = dev_get_by_index(&init_net, oldflp->oif); 904 err = -ENODEV; 905 if (dev_out && dev_out->dn_ptr == NULL) { 906 dev_put(dev_out); 907 dev_out = NULL; 908 } 909 if (dev_out == NULL) 910 goto out; 911 } 912 913 /* If we have a source address, verify that its a local address */ 914 if (oldflp->fld_src) { 915 err = -EADDRNOTAVAIL; 916 917 if (dev_out) { 918 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 919 goto source_ok; 920 dev_put(dev_out); 921 goto out; 922 } 923 rcu_read_lock(); 924 for_each_netdev_rcu(&init_net, dev) { 925 if (!dev->dn_ptr) 926 continue; 927 if (!dn_dev_islocal(dev, oldflp->fld_src)) 928 continue; 929 if ((dev->flags & IFF_LOOPBACK) && 930 oldflp->fld_dst && 931 !dn_dev_islocal(dev, oldflp->fld_dst)) 932 continue; 933 934 dev_out = dev; 935 break; 936 } 937 rcu_read_unlock(); 938 if (dev_out == NULL) 939 goto out; 940 dev_hold(dev_out); 941source_ok: 942 ; 943 } 944 945 /* No destination? Assume its local */ 946 if (!fl.fld_dst) { 947 fl.fld_dst = fl.fld_src; 948 949 err = -EADDRNOTAVAIL; 950 if (dev_out) 951 dev_put(dev_out); 952 dev_out = init_net.loopback_dev; 953 dev_hold(dev_out); 954 if (!fl.fld_dst) { 955 fl.fld_dst = 956 fl.fld_src = dnet_select_source(dev_out, 0, 957 RT_SCOPE_HOST); 958 if (!fl.fld_dst) 959 goto out; 960 } 961 fl.oif = init_net.loopback_dev->ifindex; 962 res.type = RTN_LOCAL; 963 goto make_route; 964 } 965 966 if (decnet_debug_level & 16) 967 printk(KERN_DEBUG 968 "dn_route_output_slow: initial checks complete." 969 " dst=%o4x src=%04x oif=%d try_hard=%d\n", 970 le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src), 971 fl.oif, try_hard); 972 973 /* 974 * N.B. If the kernel is compiled without router support then 975 * dn_fib_lookup() will evaluate to non-zero so this if () block 976 * will always be executed. 977 */ 978 err = -ESRCH; 979 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { 980 struct dn_dev *dn_db; 981 if (err != -ESRCH) 982 goto out; 983 /* 984 * Here the fallback is basically the standard algorithm for 985 * routing in endnodes which is described in the DECnet routing 986 * docs 987 * 988 * If we are not trying hard, look in neighbour cache. 989 * The result is tested to ensure that if a specific output 990 * device/source address was requested, then we honour that 991 * here 992 */ 993 if (!try_hard) { 994 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst); 995 if (neigh) { 996 if ((oldflp->oif && 997 (neigh->dev->ifindex != oldflp->oif)) || 998 (oldflp->fld_src && 999 (!dn_dev_islocal(neigh->dev, 1000 oldflp->fld_src)))) { 1001 neigh_release(neigh); 1002 neigh = NULL; 1003 } else { 1004 if (dev_out) 1005 dev_put(dev_out); 1006 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { 1007 dev_out = init_net.loopback_dev; 1008 res.type = RTN_LOCAL; 1009 } else { 1010 dev_out = neigh->dev; 1011 } 1012 dev_hold(dev_out); 1013 goto select_source; 1014 } 1015 } 1016 } 1017 1018 /* Not there? Perhaps its a local address */ 1019 if (dev_out == NULL) 1020 dev_out = dn_dev_get_default(); 1021 err = -ENODEV; 1022 if (dev_out == NULL) 1023 goto out; 1024 dn_db = dev_out->dn_ptr; 1025 /* Possible improvement - check all devices for local addr */ 1026 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1027 dev_put(dev_out); 1028 dev_out = init_net.loopback_dev; 1029 dev_hold(dev_out); 1030 res.type = RTN_LOCAL; 1031 goto select_source; 1032 } 1033 /* Not local either.... try sending it to the default router */ 1034 neigh = neigh_clone(dn_db->router); 1035 BUG_ON(neigh && neigh->dev != dev_out); 1036 1037 /* Ok then, we assume its directly connected and move on */ 1038select_source: 1039 if (neigh) 1040 gateway = ((struct dn_neigh *)neigh)->addr; 1041 if (gateway == 0) 1042 gateway = fl.fld_dst; 1043 if (fl.fld_src == 0) { 1044 fl.fld_src = dnet_select_source(dev_out, gateway, 1045 res.type == RTN_LOCAL ? 1046 RT_SCOPE_HOST : 1047 RT_SCOPE_LINK); 1048 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1049 goto e_addr; 1050 } 1051 fl.oif = dev_out->ifindex; 1052 goto make_route; 1053 } 1054 free_res = 1; 1055 1056 if (res.type == RTN_NAT) 1057 goto e_inval; 1058 1059 if (res.type == RTN_LOCAL) { 1060 if (!fl.fld_src) 1061 fl.fld_src = fl.fld_dst; 1062 if (dev_out) 1063 dev_put(dev_out); 1064 dev_out = init_net.loopback_dev; 1065 dev_hold(dev_out); 1066 fl.oif = dev_out->ifindex; 1067 if (res.fi) 1068 dn_fib_info_put(res.fi); 1069 res.fi = NULL; 1070 goto make_route; 1071 } 1072 1073 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1074 dn_fib_select_multipath(&fl, &res); 1075 1076 /* 1077 * We could add some logic to deal with default routes here and 1078 * get rid of some of the special casing above. 1079 */ 1080 1081 if (!fl.fld_src) 1082 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1083 1084 if (dev_out) 1085 dev_put(dev_out); 1086 dev_out = DN_FIB_RES_DEV(res); 1087 dev_hold(dev_out); 1088 fl.oif = dev_out->ifindex; 1089 gateway = DN_FIB_RES_GW(res); 1090 1091make_route: 1092 if (dev_out->flags & IFF_LOOPBACK) 1093 flags |= RTCF_LOCAL; 1094 1095 rt = dst_alloc(&dn_dst_ops); 1096 if (rt == NULL) 1097 goto e_nobufs; 1098 1099 atomic_set(&rt->dst.__refcnt, 1); 1100 rt->dst.flags = DST_HOST; 1101 1102 rt->fl.fld_src = oldflp->fld_src; 1103 rt->fl.fld_dst = oldflp->fld_dst; 1104 rt->fl.oif = oldflp->oif; 1105 rt->fl.iif = 0; 1106 rt->fl.mark = oldflp->mark; 1107 1108 rt->rt_saddr = fl.fld_src; 1109 rt->rt_daddr = fl.fld_dst; 1110 rt->rt_gateway = gateway ? gateway : fl.fld_dst; 1111 rt->rt_local_src = fl.fld_src; 1112 1113 rt->rt_dst_map = fl.fld_dst; 1114 rt->rt_src_map = fl.fld_src; 1115 1116 rt->dst.dev = dev_out; 1117 dev_hold(dev_out); 1118 rt->dst.neighbour = neigh; 1119 neigh = NULL; 1120 1121 rt->dst.lastuse = jiffies; 1122 rt->dst.output = dn_output; 1123 rt->dst.input = dn_rt_bug; 1124 rt->rt_flags = flags; 1125 if (flags & RTCF_LOCAL) 1126 rt->dst.input = dn_nsp_rx; 1127 1128 err = dn_rt_set_next_hop(rt, &res); 1129 if (err) 1130 goto e_neighbour; 1131 1132 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1133 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1134 1135done: 1136 if (neigh) 1137 neigh_release(neigh); 1138 if (free_res) 1139 dn_fib_res_put(&res); 1140 if (dev_out) 1141 dev_put(dev_out); 1142out: 1143 return err; 1144 1145e_addr: 1146 err = -EADDRNOTAVAIL; 1147 goto done; 1148e_inval: 1149 err = -EINVAL; 1150 goto done; 1151e_nobufs: 1152 err = -ENOBUFS; 1153 goto done; 1154e_neighbour: 1155 dst_free(&rt->dst); 1156 goto e_nobufs; 1157} 1158 1159 1160/* 1161 * N.B. The flags may be moved into the flowi at some future stage. 1162 */ 1163static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) 1164{ 1165 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); 1166 struct dn_route *rt = NULL; 1167 1168 if (!(flags & MSG_TRYHARD)) { 1169 rcu_read_lock_bh(); 1170 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; 1171 rt = rcu_dereference_bh(rt->dst.dn_next)) { 1172 if ((flp->fld_dst == rt->fl.fld_dst) && 1173 (flp->fld_src == rt->fl.fld_src) && 1174 (flp->mark == rt->fl.mark) && 1175 (rt->fl.iif == 0) && 1176 (rt->fl.oif == flp->oif)) { 1177 dst_use(&rt->dst, jiffies); 1178 rcu_read_unlock_bh(); 1179 *pprt = &rt->dst; 1180 return 0; 1181 } 1182 } 1183 rcu_read_unlock_bh(); 1184 } 1185 1186 return dn_route_output_slow(pprt, flp, flags); 1187} 1188 1189static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) 1190{ 1191 int err; 1192 1193 err = __dn_route_output_key(pprt, flp, flags); 1194 if (err == 0 && flp->proto) { 1195 err = xfrm_lookup(&init_net, pprt, flp, NULL, 0); 1196 } 1197 return err; 1198} 1199 1200int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) 1201{ 1202 int err; 1203 1204 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1205 if (err == 0 && fl->proto) { 1206 err = xfrm_lookup(&init_net, pprt, fl, sk, 1207 (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT); 1208 } 1209 return err; 1210} 1211 1212static int dn_route_input_slow(struct sk_buff *skb) 1213{ 1214 struct dn_route *rt = NULL; 1215 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1216 struct net_device *in_dev = skb->dev; 1217 struct net_device *out_dev = NULL; 1218 struct dn_dev *dn_db; 1219 struct neighbour *neigh = NULL; 1220 unsigned hash; 1221 int flags = 0; 1222 __le16 gateway = 0; 1223 __le16 local_src = 0; 1224 struct flowi fl = { .nl_u = { .dn_u = 1225 { .daddr = cb->dst, 1226 .saddr = cb->src, 1227 .scope = RT_SCOPE_UNIVERSE, 1228 } }, 1229 .mark = skb->mark, 1230 .iif = skb->dev->ifindex }; 1231 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1232 int err = -EINVAL; 1233 int free_res = 0; 1234 1235 dev_hold(in_dev); 1236 1237 if ((dn_db = in_dev->dn_ptr) == NULL) 1238 goto out; 1239 1240 /* Zero source addresses are not allowed */ 1241 if (fl.fld_src == 0) 1242 goto out; 1243 1244 /* 1245 * In this case we've just received a packet from a source 1246 * outside ourselves pretending to come from us. We don't 1247 * allow it any further to prevent routing loops, spoofing and 1248 * other nasties. Loopback packets already have the dst attached 1249 * so this only affects packets which have originated elsewhere. 1250 */ 1251 err = -ENOTUNIQ; 1252 if (dn_dev_islocal(in_dev, cb->src)) 1253 goto out; 1254 1255 err = dn_fib_lookup(&fl, &res); 1256 if (err) { 1257 if (err != -ESRCH) 1258 goto out; 1259 /* 1260 * Is the destination us ? 1261 */ 1262 if (!dn_dev_islocal(in_dev, cb->dst)) 1263 goto e_inval; 1264 1265 res.type = RTN_LOCAL; 1266 } else { 1267 __le16 src_map = fl.fld_src; 1268 free_res = 1; 1269 1270 out_dev = DN_FIB_RES_DEV(res); 1271 if (out_dev == NULL) { 1272 if (net_ratelimit()) 1273 printk(KERN_CRIT "Bug in dn_route_input_slow() " 1274 "No output device\n"); 1275 goto e_inval; 1276 } 1277 dev_hold(out_dev); 1278 1279 if (res.r) 1280 src_map = fl.fld_src; /* no NAT support for now */ 1281 1282 gateway = DN_FIB_RES_GW(res); 1283 if (res.type == RTN_NAT) { 1284 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); 1285 dn_fib_res_put(&res); 1286 free_res = 0; 1287 if (dn_fib_lookup(&fl, &res)) 1288 goto e_inval; 1289 free_res = 1; 1290 if (res.type != RTN_UNICAST) 1291 goto e_inval; 1292 flags |= RTCF_DNAT; 1293 gateway = fl.fld_dst; 1294 } 1295 fl.fld_src = src_map; 1296 } 1297 1298 switch(res.type) { 1299 case RTN_UNICAST: 1300 /* 1301 * Forwarding check here, we only check for forwarding 1302 * being turned off, if you want to only forward intra 1303 * area, its up to you to set the routing tables up 1304 * correctly. 1305 */ 1306 if (dn_db->parms.forwarding == 0) 1307 goto e_inval; 1308 1309 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1310 dn_fib_select_multipath(&fl, &res); 1311 1312 /* 1313 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1314 * flag as a hint to set the intra-ethernet bit when 1315 * forwarding. If we've got NAT in operation, we don't do 1316 * this optimisation. 1317 */ 1318 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1319 flags |= RTCF_DOREDIRECT; 1320 1321 local_src = DN_FIB_RES_PREFSRC(res); 1322 1323 case RTN_BLACKHOLE: 1324 case RTN_UNREACHABLE: 1325 break; 1326 case RTN_LOCAL: 1327 flags |= RTCF_LOCAL; 1328 fl.fld_src = cb->dst; 1329 fl.fld_dst = cb->src; 1330 1331 /* Routing tables gave us a gateway */ 1332 if (gateway) 1333 goto make_route; 1334 1335 /* Packet was intra-ethernet, so we know its on-link */ 1336 if (cb->rt_flags & DN_RT_F_IE) { 1337 gateway = cb->src; 1338 flags |= RTCF_DIRECTSRC; 1339 goto make_route; 1340 } 1341 1342 /* Use the default router if there is one */ 1343 neigh = neigh_clone(dn_db->router); 1344 if (neigh) { 1345 gateway = ((struct dn_neigh *)neigh)->addr; 1346 goto make_route; 1347 } 1348 1349 /* Close eyes and pray */ 1350 gateway = cb->src; 1351 flags |= RTCF_DIRECTSRC; 1352 goto make_route; 1353 default: 1354 goto e_inval; 1355 } 1356 1357make_route: 1358 rt = dst_alloc(&dn_dst_ops); 1359 if (rt == NULL) 1360 goto e_nobufs; 1361 1362 rt->rt_saddr = fl.fld_src; 1363 rt->rt_daddr = fl.fld_dst; 1364 rt->rt_gateway = fl.fld_dst; 1365 if (gateway) 1366 rt->rt_gateway = gateway; 1367 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1368 1369 rt->rt_dst_map = fl.fld_dst; 1370 rt->rt_src_map = fl.fld_src; 1371 1372 rt->fl.fld_src = cb->src; 1373 rt->fl.fld_dst = cb->dst; 1374 rt->fl.oif = 0; 1375 rt->fl.iif = in_dev->ifindex; 1376 rt->fl.mark = fl.mark; 1377 1378 rt->dst.flags = DST_HOST; 1379 rt->dst.neighbour = neigh; 1380 rt->dst.dev = out_dev; 1381 rt->dst.lastuse = jiffies; 1382 rt->dst.output = dn_rt_bug; 1383 switch(res.type) { 1384 case RTN_UNICAST: 1385 rt->dst.input = dn_forward; 1386 break; 1387 case RTN_LOCAL: 1388 rt->dst.output = dn_output; 1389 rt->dst.input = dn_nsp_rx; 1390 rt->dst.dev = in_dev; 1391 flags |= RTCF_LOCAL; 1392 break; 1393 default: 1394 case RTN_UNREACHABLE: 1395 case RTN_BLACKHOLE: 1396 rt->dst.input = dst_discard; 1397 } 1398 rt->rt_flags = flags; 1399 if (rt->dst.dev) 1400 dev_hold(rt->dst.dev); 1401 1402 err = dn_rt_set_next_hop(rt, &res); 1403 if (err) 1404 goto e_neighbour; 1405 1406 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1407 dn_insert_route(rt, hash, &rt); 1408 skb_dst_set(skb, &rt->dst); 1409 1410done: 1411 if (neigh) 1412 neigh_release(neigh); 1413 if (free_res) 1414 dn_fib_res_put(&res); 1415 dev_put(in_dev); 1416 if (out_dev) 1417 dev_put(out_dev); 1418out: 1419 return err; 1420 1421e_inval: 1422 err = -EINVAL; 1423 goto done; 1424 1425e_nobufs: 1426 err = -ENOBUFS; 1427 goto done; 1428 1429e_neighbour: 1430 dst_free(&rt->dst); 1431 goto done; 1432} 1433 1434static int dn_route_input(struct sk_buff *skb) 1435{ 1436 struct dn_route *rt; 1437 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1438 unsigned hash = dn_hash(cb->src, cb->dst); 1439 1440 if (skb_dst(skb)) 1441 return 0; 1442 1443 rcu_read_lock(); 1444 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1445 rt = rcu_dereference(rt->dst.dn_next)) { 1446 if ((rt->fl.fld_src == cb->src) && 1447 (rt->fl.fld_dst == cb->dst) && 1448 (rt->fl.oif == 0) && 1449 (rt->fl.mark == skb->mark) && 1450 (rt->fl.iif == cb->iif)) { 1451 dst_use(&rt->dst, jiffies); 1452 rcu_read_unlock(); 1453 skb_dst_set(skb, (struct dst_entry *)rt); 1454 return 0; 1455 } 1456 } 1457 rcu_read_unlock(); 1458 1459 return dn_route_input_slow(skb); 1460} 1461 1462static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1463 int event, int nowait, unsigned int flags) 1464{ 1465 struct dn_route *rt = (struct dn_route *)skb_dst(skb); 1466 struct rtmsg *r; 1467 struct nlmsghdr *nlh; 1468 unsigned char *b = skb_tail_pointer(skb); 1469 long expires; 1470 1471 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1472 r = NLMSG_DATA(nlh); 1473 r->rtm_family = AF_DECnet; 1474 r->rtm_dst_len = 16; 1475 r->rtm_src_len = 0; 1476 r->rtm_tos = 0; 1477 r->rtm_table = RT_TABLE_MAIN; 1478 RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 1479 r->rtm_type = rt->rt_type; 1480 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1481 r->rtm_scope = RT_SCOPE_UNIVERSE; 1482 r->rtm_protocol = RTPROT_UNSPEC; 1483 if (rt->rt_flags & RTCF_NOTIFY) 1484 r->rtm_flags |= RTM_F_NOTIFY; 1485 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1486 if (rt->fl.fld_src) { 1487 r->rtm_src_len = 16; 1488 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1489 } 1490 if (rt->dst.dev) 1491 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); 1492 /* 1493 * Note to self - change this if input routes reverse direction when 1494 * they deal only with inputs and not with replies like they do 1495 * currently. 1496 */ 1497 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1498 if (rt->rt_daddr != rt->rt_gateway) 1499 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1500 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) 1501 goto rtattr_failure; 1502 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; 1503 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1504 rt->dst.error) < 0) 1505 goto rtattr_failure; 1506 if (rt->fl.iif) 1507 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1508 1509 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1510 return skb->len; 1511 1512nlmsg_failure: 1513rtattr_failure: 1514 nlmsg_trim(skb, b); 1515 return -1; 1516} 1517 1518/* 1519 * This is called by both endnodes and routers now. 1520 */ 1521static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1522{ 1523 struct net *net = sock_net(in_skb->sk); 1524 struct rtattr **rta = arg; 1525 struct rtmsg *rtm = NLMSG_DATA(nlh); 1526 struct dn_route *rt = NULL; 1527 struct dn_skb_cb *cb; 1528 int err; 1529 struct sk_buff *skb; 1530 struct flowi fl; 1531 1532 if (!net_eq(net, &init_net)) 1533 return -EINVAL; 1534 1535 memset(&fl, 0, sizeof(fl)); 1536 fl.proto = DNPROTO_NSP; 1537 1538 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1539 if (skb == NULL) 1540 return -ENOBUFS; 1541 skb_reset_mac_header(skb); 1542 cb = DN_SKB_CB(skb); 1543 1544 if (rta[RTA_SRC-1]) 1545 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); 1546 if (rta[RTA_DST-1]) 1547 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); 1548 if (rta[RTA_IIF-1]) 1549 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1550 1551 if (fl.iif) { 1552 struct net_device *dev; 1553 if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) { 1554 kfree_skb(skb); 1555 return -ENODEV; 1556 } 1557 if (!dev->dn_ptr) { 1558 dev_put(dev); 1559 kfree_skb(skb); 1560 return -ENODEV; 1561 } 1562 skb->protocol = htons(ETH_P_DNA_RT); 1563 skb->dev = dev; 1564 cb->src = fl.fld_src; 1565 cb->dst = fl.fld_dst; 1566 local_bh_disable(); 1567 err = dn_route_input(skb); 1568 local_bh_enable(); 1569 memset(cb, 0, sizeof(struct dn_skb_cb)); 1570 rt = (struct dn_route *)skb_dst(skb); 1571 if (!err && -rt->dst.error) 1572 err = rt->dst.error; 1573 } else { 1574 int oif = 0; 1575 if (rta[RTA_OIF - 1]) 1576 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1577 fl.oif = oif; 1578 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); 1579 } 1580 1581 if (skb->dev) 1582 dev_put(skb->dev); 1583 skb->dev = NULL; 1584 if (err) 1585 goto out_free; 1586 skb_dst_set(skb, &rt->dst); 1587 if (rtm->rtm_flags & RTM_F_NOTIFY) 1588 rt->rt_flags |= RTCF_NOTIFY; 1589 1590 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1591 1592 if (err == 0) 1593 goto out_free; 1594 if (err < 0) { 1595 err = -EMSGSIZE; 1596 goto out_free; 1597 } 1598 1599 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 1600 1601out_free: 1602 kfree_skb(skb); 1603 return err; 1604} 1605 1606/* 1607 * For routers, this is called from dn_fib_dump, but for endnodes its 1608 * called directly from the rtnetlink dispatch table. 1609 */ 1610int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1611{ 1612 struct net *net = sock_net(skb->sk); 1613 struct dn_route *rt; 1614 int h, s_h; 1615 int idx, s_idx; 1616 1617 if (!net_eq(net, &init_net)) 1618 return 0; 1619 1620 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1621 return -EINVAL; 1622 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1623 return 0; 1624 1625 s_h = cb->args[0]; 1626 s_idx = idx = cb->args[1]; 1627 for(h = 0; h <= dn_rt_hash_mask; h++) { 1628 if (h < s_h) 1629 continue; 1630 if (h > s_h) 1631 s_idx = 0; 1632 rcu_read_lock_bh(); 1633 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; 1634 rt; 1635 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { 1636 if (idx < s_idx) 1637 continue; 1638 skb_dst_set(skb, dst_clone(&rt->dst)); 1639 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1640 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1641 1, NLM_F_MULTI) <= 0) { 1642 skb_dst_drop(skb); 1643 rcu_read_unlock_bh(); 1644 goto done; 1645 } 1646 skb_dst_drop(skb); 1647 } 1648 rcu_read_unlock_bh(); 1649 } 1650 1651done: 1652 cb->args[0] = h; 1653 cb->args[1] = idx; 1654 return skb->len; 1655} 1656 1657#ifdef CONFIG_PROC_FS 1658struct dn_rt_cache_iter_state { 1659 int bucket; 1660}; 1661 1662static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1663{ 1664 struct dn_route *rt = NULL; 1665 struct dn_rt_cache_iter_state *s = seq->private; 1666 1667 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1668 rcu_read_lock_bh(); 1669 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); 1670 if (rt) 1671 break; 1672 rcu_read_unlock_bh(); 1673 } 1674 return rt; 1675} 1676 1677static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1678{ 1679 struct dn_rt_cache_iter_state *s = seq->private; 1680 1681 rt = rt->dst.dn_next; 1682 while(!rt) { 1683 rcu_read_unlock_bh(); 1684 if (--s->bucket < 0) 1685 break; 1686 rcu_read_lock_bh(); 1687 rt = dn_rt_hash_table[s->bucket].chain; 1688 } 1689 return rcu_dereference_bh(rt); 1690} 1691 1692static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1693{ 1694 struct dn_route *rt = dn_rt_cache_get_first(seq); 1695 1696 if (rt) { 1697 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1698 --*pos; 1699 } 1700 return *pos ? NULL : rt; 1701} 1702 1703static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1704{ 1705 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1706 ++*pos; 1707 return rt; 1708} 1709 1710static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1711{ 1712 if (v) 1713 rcu_read_unlock_bh(); 1714} 1715 1716static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1717{ 1718 struct dn_route *rt = v; 1719 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1720 1721 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1722 rt->dst.dev ? rt->dst.dev->name : "*", 1723 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), 1724 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), 1725 atomic_read(&rt->dst.__refcnt), 1726 rt->dst.__use, 1727 (int) dst_metric(&rt->dst, RTAX_RTT)); 1728 return 0; 1729} 1730 1731static const struct seq_operations dn_rt_cache_seq_ops = { 1732 .start = dn_rt_cache_seq_start, 1733 .next = dn_rt_cache_seq_next, 1734 .stop = dn_rt_cache_seq_stop, 1735 .show = dn_rt_cache_seq_show, 1736}; 1737 1738static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) 1739{ 1740 return seq_open_private(file, &dn_rt_cache_seq_ops, 1741 sizeof(struct dn_rt_cache_iter_state)); 1742} 1743 1744static const struct file_operations dn_rt_cache_seq_fops = { 1745 .owner = THIS_MODULE, 1746 .open = dn_rt_cache_seq_open, 1747 .read = seq_read, 1748 .llseek = seq_lseek, 1749 .release = seq_release_private, 1750}; 1751 1752#endif /* CONFIG_PROC_FS */ 1753 1754void __init dn_route_init(void) 1755{ 1756 int i, goal, order; 1757 1758 dn_dst_ops.kmem_cachep = 1759 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1760 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1761 setup_timer(&dn_route_timer, dn_dst_check_expire, 0); 1762 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1763 add_timer(&dn_route_timer); 1764 1765 goal = totalram_pages >> (26 - PAGE_SHIFT); 1766 1767 for(order = 0; (1UL << order) < goal; order++) 1768 /* NOTHING */; 1769 1770 /* 1771 * Only want 1024 entries max, since the table is very, very unlikely 1772 * to be larger than that. 1773 */ 1774 while(order && ((((1UL << order) * PAGE_SIZE) / 1775 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1776 order--; 1777 1778 do { 1779 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1780 sizeof(struct dn_rt_hash_bucket); 1781 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1782 dn_rt_hash_mask--; 1783 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1784 __get_free_pages(GFP_ATOMIC, order); 1785 } while (dn_rt_hash_table == NULL && --order > 0); 1786 1787 if (!dn_rt_hash_table) 1788 panic("Failed to allocate DECnet route cache hash table\n"); 1789 1790 printk(KERN_INFO 1791 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1792 dn_rt_hash_mask, 1793 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1794 1795 dn_rt_hash_mask--; 1796 for(i = 0; i <= dn_rt_hash_mask; i++) { 1797 spin_lock_init(&dn_rt_hash_table[i].lock); 1798 dn_rt_hash_table[i].chain = NULL; 1799 } 1800 1801 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1802 1803 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1804 1805#ifdef CONFIG_DECNET_ROUTER 1806 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); 1807#else 1808 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, 1809 dn_cache_dump); 1810#endif 1811} 1812 1813void __exit dn_route_cleanup(void) 1814{ 1815 del_timer(&dn_route_timer); 1816 dn_run_flush(0); 1817 1818 proc_net_remove(&init_net, "decnet_cache"); 1819} 1820