1/* 2 * X.25 Packet Layer release 002 3 * 4 * This is ALPHA test software. This code may break your machine, 5 * randomly fail to work with new releases, misbehave and/or generally 6 * screw up. It might even work. 7 * 8 * This code REQUIRES 2.1.15 or higher 9 * 10 * This module: 11 * This module is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * History 17 * X.25 001 Jonathan Naylor Started coding. 18 * X.25 002 Jonathan Naylor Centralised disconnect handling. 19 * New timer architecture. 20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. 21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of 22 * facilities negotiation and increased 23 * the throughput upper limit. 24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups 25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). 26 * Fixed x25_output() related skb leakage. 27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. 28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. 29 * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN 30 * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to 31 * x25_proc.c, using seq_file 32 * 2005-04-02 Shaun Pereira Selective sub address matching 33 * with call user data 34 * 2005-04-15 Shaun Pereira Fast select with no restriction on 35 * response 36 */ 37 38#include <linux/module.h> 39#include <linux/capability.h> 40#include <linux/errno.h> 41#include <linux/kernel.h> 42#include <linux/sched.h> 43#include <linux/smp_lock.h> 44#include <linux/timer.h> 45#include <linux/string.h> 46#include <linux/net.h> 47#include <linux/netdevice.h> 48#include <linux/if_arp.h> 49#include <linux/skbuff.h> 50#include <linux/slab.h> 51#include <net/sock.h> 52#include <net/tcp_states.h> 53#include <asm/uaccess.h> 54#include <linux/fcntl.h> 55#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 56#include <linux/notifier.h> 57#include <linux/init.h> 58#include <linux/compat.h> 59#include <linux/ctype.h> 60 61#include <net/x25.h> 62#include <net/compat.h> 63 64int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; 65int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; 66int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; 67int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; 68int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; 69int sysctl_x25_forward = 0; 70 71HLIST_HEAD(x25_list); 72DEFINE_RWLOCK(x25_list_lock); 73 74static const struct proto_ops x25_proto_ops; 75 76static struct x25_address null_x25_address = {" "}; 77 78#ifdef CONFIG_COMPAT 79struct compat_x25_subscrip_struct { 80 char device[200-sizeof(compat_ulong_t)]; 81 compat_ulong_t global_facil_mask; 82 compat_uint_t extended; 83}; 84#endif 85 86 87int x25_parse_address_block(struct sk_buff *skb, 88 struct x25_address *called_addr, 89 struct x25_address *calling_addr) 90{ 91 unsigned char len; 92 int needed; 93 int rc; 94 95 if (skb->len < 1) { 96 /* packet has no address block */ 97 rc = 0; 98 goto empty; 99 } 100 101 len = *skb->data; 102 needed = 1 + (len >> 4) + (len & 0x0f); 103 104 if (skb->len < needed) { 105 /* packet is too short to hold the addresses it claims 106 to hold */ 107 rc = -1; 108 goto empty; 109 } 110 111 return x25_addr_ntoa(skb->data, called_addr, calling_addr); 112 113empty: 114 *called_addr->x25_addr = 0; 115 *calling_addr->x25_addr = 0; 116 117 return rc; 118} 119 120 121int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 122 struct x25_address *calling_addr) 123{ 124 unsigned int called_len, calling_len; 125 char *called, *calling; 126 unsigned int i; 127 128 called_len = (*p >> 0) & 0x0F; 129 calling_len = (*p >> 4) & 0x0F; 130 131 called = called_addr->x25_addr; 132 calling = calling_addr->x25_addr; 133 p++; 134 135 for (i = 0; i < (called_len + calling_len); i++) { 136 if (i < called_len) { 137 if (i % 2 != 0) { 138 *called++ = ((*p >> 0) & 0x0F) + '0'; 139 p++; 140 } else { 141 *called++ = ((*p >> 4) & 0x0F) + '0'; 142 } 143 } else { 144 if (i % 2 != 0) { 145 *calling++ = ((*p >> 0) & 0x0F) + '0'; 146 p++; 147 } else { 148 *calling++ = ((*p >> 4) & 0x0F) + '0'; 149 } 150 } 151 } 152 153 *called = *calling = '\0'; 154 155 return 1 + (called_len + calling_len + 1) / 2; 156} 157 158int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, 159 struct x25_address *calling_addr) 160{ 161 unsigned int called_len, calling_len; 162 char *called, *calling; 163 int i; 164 165 called = called_addr->x25_addr; 166 calling = calling_addr->x25_addr; 167 168 called_len = strlen(called); 169 calling_len = strlen(calling); 170 171 *p++ = (calling_len << 4) | (called_len << 0); 172 173 for (i = 0; i < (called_len + calling_len); i++) { 174 if (i < called_len) { 175 if (i % 2 != 0) { 176 *p |= (*called++ - '0') << 0; 177 p++; 178 } else { 179 *p = 0x00; 180 *p |= (*called++ - '0') << 4; 181 } 182 } else { 183 if (i % 2 != 0) { 184 *p |= (*calling++ - '0') << 0; 185 p++; 186 } else { 187 *p = 0x00; 188 *p |= (*calling++ - '0') << 4; 189 } 190 } 191 } 192 193 return 1 + (called_len + calling_len + 1) / 2; 194} 195 196/* 197 * Socket removal during an interrupt is now safe. 198 */ 199static void x25_remove_socket(struct sock *sk) 200{ 201 write_lock_bh(&x25_list_lock); 202 sk_del_node_init(sk); 203 write_unlock_bh(&x25_list_lock); 204} 205 206/* 207 * Kill all bound sockets on a dropped device. 208 */ 209static void x25_kill_by_device(struct net_device *dev) 210{ 211 struct sock *s; 212 struct hlist_node *node; 213 214 write_lock_bh(&x25_list_lock); 215 216 sk_for_each(s, node, &x25_list) 217 if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) 218 x25_disconnect(s, ENETUNREACH, 0, 0); 219 220 write_unlock_bh(&x25_list_lock); 221} 222 223/* 224 * Handle device status changes. 225 */ 226static int x25_device_event(struct notifier_block *this, unsigned long event, 227 void *ptr) 228{ 229 struct net_device *dev = ptr; 230 struct x25_neigh *nb; 231 232 if (!net_eq(dev_net(dev), &init_net)) 233 return NOTIFY_DONE; 234 235 if (dev->type == ARPHRD_X25 236#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 237 || dev->type == ARPHRD_ETHER 238#endif 239 ) { 240 switch (event) { 241 case NETDEV_UP: 242 x25_link_device_up(dev); 243 break; 244 case NETDEV_GOING_DOWN: 245 nb = x25_get_neigh(dev); 246 if (nb) { 247 x25_terminate_link(nb); 248 x25_neigh_put(nb); 249 } 250 break; 251 case NETDEV_DOWN: 252 x25_kill_by_device(dev); 253 x25_route_device_down(dev); 254 x25_link_device_down(dev); 255 break; 256 } 257 } 258 259 return NOTIFY_DONE; 260} 261 262/* 263 * Add a socket to the bound sockets list. 264 */ 265static void x25_insert_socket(struct sock *sk) 266{ 267 write_lock_bh(&x25_list_lock); 268 sk_add_node(sk, &x25_list); 269 write_unlock_bh(&x25_list_lock); 270} 271 272/* 273 * Find a socket that wants to accept the Call Request we just 274 * received. Check the full list for an address/cud match. 275 * If no cuds match return the next_best thing, an address match. 276 * Note: if a listening socket has cud set it must only get calls 277 * with matching cud. 278 */ 279static struct sock *x25_find_listener(struct x25_address *addr, 280 struct sk_buff *skb) 281{ 282 struct sock *s; 283 struct sock *next_best; 284 struct hlist_node *node; 285 286 read_lock_bh(&x25_list_lock); 287 next_best = NULL; 288 289 sk_for_each(s, node, &x25_list) 290 if ((!strcmp(addr->x25_addr, 291 x25_sk(s)->source_addr.x25_addr) || 292 !strcmp(addr->x25_addr, 293 null_x25_address.x25_addr)) && 294 s->sk_state == TCP_LISTEN) { 295 /* 296 * Found a listening socket, now check the incoming 297 * call user data vs this sockets call user data 298 */ 299 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 300 if((memcmp(x25_sk(s)->calluserdata.cuddata, 301 skb->data, 302 x25_sk(s)->cudmatchlength)) == 0) { 303 sock_hold(s); 304 goto found; 305 } 306 } else 307 next_best = s; 308 } 309 if (next_best) { 310 s = next_best; 311 sock_hold(s); 312 goto found; 313 } 314 s = NULL; 315found: 316 read_unlock_bh(&x25_list_lock); 317 return s; 318} 319 320/* 321 * Find a connected X.25 socket given my LCI and neighbour. 322 */ 323static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) 324{ 325 struct sock *s; 326 struct hlist_node *node; 327 328 sk_for_each(s, node, &x25_list) 329 if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { 330 sock_hold(s); 331 goto found; 332 } 333 s = NULL; 334found: 335 return s; 336} 337 338struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) 339{ 340 struct sock *s; 341 342 read_lock_bh(&x25_list_lock); 343 s = __x25_find_socket(lci, nb); 344 read_unlock_bh(&x25_list_lock); 345 return s; 346} 347 348/* 349 * Find a unique LCI for a given device. 350 */ 351static unsigned int x25_new_lci(struct x25_neigh *nb) 352{ 353 unsigned int lci = 1; 354 struct sock *sk; 355 356 read_lock_bh(&x25_list_lock); 357 358 while ((sk = __x25_find_socket(lci, nb)) != NULL) { 359 sock_put(sk); 360 if (++lci == 4096) { 361 lci = 0; 362 break; 363 } 364 } 365 366 read_unlock_bh(&x25_list_lock); 367 return lci; 368} 369 370/* 371 * Deferred destroy. 372 */ 373static void __x25_destroy_socket(struct sock *); 374 375/* 376 * handler for deferred kills. 377 */ 378static void x25_destroy_timer(unsigned long data) 379{ 380 x25_destroy_socket_from_timer((struct sock *)data); 381} 382 383/* 384 * This is called from user mode and the timers. Thus it protects itself 385 * against interrupt users but doesn't worry about being called during 386 * work. Once it is removed from the queue no interrupt or bottom half 387 * will touch it and we are (fairly 8-) ) safe. 388 * Not static as it's used by the timer 389 */ 390static void __x25_destroy_socket(struct sock *sk) 391{ 392 struct sk_buff *skb; 393 394 x25_stop_heartbeat(sk); 395 x25_stop_timer(sk); 396 397 x25_remove_socket(sk); 398 x25_clear_queues(sk); /* Flush the queues */ 399 400 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 401 if (skb->sk != sk) { /* A pending connection */ 402 /* 403 * Queue the unaccepted socket for death 404 */ 405 skb->sk->sk_state = TCP_LISTEN; 406 sock_set_flag(skb->sk, SOCK_DEAD); 407 x25_start_heartbeat(skb->sk); 408 x25_sk(skb->sk)->state = X25_STATE_0; 409 } 410 411 kfree_skb(skb); 412 } 413 414 if (sk_has_allocations(sk)) { 415 /* Defer: outstanding buffers */ 416 sk->sk_timer.expires = jiffies + 10 * HZ; 417 sk->sk_timer.function = x25_destroy_timer; 418 sk->sk_timer.data = (unsigned long)sk; 419 add_timer(&sk->sk_timer); 420 } else { 421 /* drop last reference so sock_put will free */ 422 __sock_put(sk); 423 } 424} 425 426void x25_destroy_socket_from_timer(struct sock *sk) 427{ 428 sock_hold(sk); 429 bh_lock_sock(sk); 430 __x25_destroy_socket(sk); 431 bh_unlock_sock(sk); 432 sock_put(sk); 433} 434 435static void x25_destroy_socket(struct sock *sk) 436{ 437 sock_hold(sk); 438 lock_sock(sk); 439 __x25_destroy_socket(sk); 440 release_sock(sk); 441 sock_put(sk); 442} 443 444/* 445 * Handling for system calls applied via the various interfaces to a 446 * X.25 socket object. 447 */ 448 449static int x25_setsockopt(struct socket *sock, int level, int optname, 450 char __user *optval, unsigned int optlen) 451{ 452 int opt; 453 struct sock *sk = sock->sk; 454 int rc = -ENOPROTOOPT; 455 456 if (level != SOL_X25 || optname != X25_QBITINCL) 457 goto out; 458 459 rc = -EINVAL; 460 if (optlen < sizeof(int)) 461 goto out; 462 463 rc = -EFAULT; 464 if (get_user(opt, (int __user *)optval)) 465 goto out; 466 467 if (opt) 468 set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 469 else 470 clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 471 rc = 0; 472out: 473 return rc; 474} 475 476static int x25_getsockopt(struct socket *sock, int level, int optname, 477 char __user *optval, int __user *optlen) 478{ 479 struct sock *sk = sock->sk; 480 int val, len, rc = -ENOPROTOOPT; 481 482 if (level != SOL_X25 || optname != X25_QBITINCL) 483 goto out; 484 485 rc = -EFAULT; 486 if (get_user(len, optlen)) 487 goto out; 488 489 len = min_t(unsigned int, len, sizeof(int)); 490 491 rc = -EINVAL; 492 if (len < 0) 493 goto out; 494 495 rc = -EFAULT; 496 if (put_user(len, optlen)) 497 goto out; 498 499 val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); 500 rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; 501out: 502 return rc; 503} 504 505static int x25_listen(struct socket *sock, int backlog) 506{ 507 struct sock *sk = sock->sk; 508 int rc = -EOPNOTSUPP; 509 510 lock_kernel(); 511 if (sk->sk_state != TCP_LISTEN) { 512 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); 513 sk->sk_max_ack_backlog = backlog; 514 sk->sk_state = TCP_LISTEN; 515 rc = 0; 516 } 517 unlock_kernel(); 518 519 return rc; 520} 521 522static struct proto x25_proto = { 523 .name = "X25", 524 .owner = THIS_MODULE, 525 .obj_size = sizeof(struct x25_sock), 526}; 527 528static struct sock *x25_alloc_socket(struct net *net) 529{ 530 struct x25_sock *x25; 531 struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); 532 533 if (!sk) 534 goto out; 535 536 sock_init_data(NULL, sk); 537 538 x25 = x25_sk(sk); 539 skb_queue_head_init(&x25->ack_queue); 540 skb_queue_head_init(&x25->fragment_queue); 541 skb_queue_head_init(&x25->interrupt_in_queue); 542 skb_queue_head_init(&x25->interrupt_out_queue); 543out: 544 return sk; 545} 546 547static int x25_create(struct net *net, struct socket *sock, int protocol, 548 int kern) 549{ 550 struct sock *sk; 551 struct x25_sock *x25; 552 int rc = -EAFNOSUPPORT; 553 554 if (!net_eq(net, &init_net)) 555 goto out; 556 557 rc = -ESOCKTNOSUPPORT; 558 if (sock->type != SOCK_SEQPACKET) 559 goto out; 560 561 rc = -EINVAL; 562 if (protocol) 563 goto out; 564 565 rc = -ENOBUFS; 566 if ((sk = x25_alloc_socket(net)) == NULL) 567 goto out; 568 569 x25 = x25_sk(sk); 570 571 sock_init_data(sock, sk); 572 573 x25_init_timers(sk); 574 575 sock->ops = &x25_proto_ops; 576 sk->sk_protocol = protocol; 577 sk->sk_backlog_rcv = x25_backlog_rcv; 578 579 x25->t21 = sysctl_x25_call_request_timeout; 580 x25->t22 = sysctl_x25_reset_request_timeout; 581 x25->t23 = sysctl_x25_clear_request_timeout; 582 x25->t2 = sysctl_x25_ack_holdback_timeout; 583 x25->state = X25_STATE_0; 584 x25->cudmatchlength = 0; 585 set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ 586 /* on call accept */ 587 588 x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; 589 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 590 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 592 x25->facilities.throughput = 0; /* by default don't negotiate 593 throughput */ 594 x25->facilities.reverse = X25_DEFAULT_REVERSE; 595 x25->dte_facilities.calling_len = 0; 596 x25->dte_facilities.called_len = 0; 597 memset(x25->dte_facilities.called_ae, '\0', 598 sizeof(x25->dte_facilities.called_ae)); 599 memset(x25->dte_facilities.calling_ae, '\0', 600 sizeof(x25->dte_facilities.calling_ae)); 601 602 rc = 0; 603out: 604 return rc; 605} 606 607static struct sock *x25_make_new(struct sock *osk) 608{ 609 struct sock *sk = NULL; 610 struct x25_sock *x25, *ox25; 611 612 if (osk->sk_type != SOCK_SEQPACKET) 613 goto out; 614 615 if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) 616 goto out; 617 618 x25 = x25_sk(sk); 619 620 sk->sk_type = osk->sk_type; 621 sk->sk_priority = osk->sk_priority; 622 sk->sk_protocol = osk->sk_protocol; 623 sk->sk_rcvbuf = osk->sk_rcvbuf; 624 sk->sk_sndbuf = osk->sk_sndbuf; 625 sk->sk_state = TCP_ESTABLISHED; 626 sk->sk_backlog_rcv = osk->sk_backlog_rcv; 627 sock_copy_flags(sk, osk); 628 629 ox25 = x25_sk(osk); 630 x25->t21 = ox25->t21; 631 x25->t22 = ox25->t22; 632 x25->t23 = ox25->t23; 633 x25->t2 = ox25->t2; 634 x25->flags = ox25->flags; 635 x25->facilities = ox25->facilities; 636 x25->dte_facilities = ox25->dte_facilities; 637 x25->cudmatchlength = ox25->cudmatchlength; 638 639 clear_bit(X25_INTERRUPT_FLAG, &x25->flags); 640 x25_init_timers(sk); 641out: 642 return sk; 643} 644 645static int x25_release(struct socket *sock) 646{ 647 struct sock *sk = sock->sk; 648 struct x25_sock *x25; 649 650 lock_kernel(); 651 if (!sk) 652 goto out; 653 654 x25 = x25_sk(sk); 655 656 switch (x25->state) { 657 658 case X25_STATE_0: 659 case X25_STATE_2: 660 x25_disconnect(sk, 0, 0, 0); 661 x25_destroy_socket(sk); 662 goto out; 663 664 case X25_STATE_1: 665 case X25_STATE_3: 666 case X25_STATE_4: 667 x25_clear_queues(sk); 668 x25_write_internal(sk, X25_CLEAR_REQUEST); 669 x25_start_t23timer(sk); 670 x25->state = X25_STATE_2; 671 sk->sk_state = TCP_CLOSE; 672 sk->sk_shutdown |= SEND_SHUTDOWN; 673 sk->sk_state_change(sk); 674 sock_set_flag(sk, SOCK_DEAD); 675 sock_set_flag(sk, SOCK_DESTROY); 676 break; 677 } 678 679 sock_orphan(sk); 680out: 681 unlock_kernel(); 682 return 0; 683} 684 685static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 686{ 687 struct sock *sk = sock->sk; 688 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 689 int len, i, rc = 0; 690 691 lock_kernel(); 692 if (!sock_flag(sk, SOCK_ZAPPED) || 693 addr_len != sizeof(struct sockaddr_x25) || 694 addr->sx25_family != AF_X25) { 695 rc = -EINVAL; 696 goto out; 697 } 698 699 len = strlen(addr->sx25_addr.x25_addr); 700 for (i = 0; i < len; i++) { 701 if (!isdigit(addr->sx25_addr.x25_addr[i])) { 702 rc = -EINVAL; 703 goto out; 704 } 705 } 706 707 x25_sk(sk)->source_addr = addr->sx25_addr; 708 x25_insert_socket(sk); 709 sock_reset_flag(sk, SOCK_ZAPPED); 710 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 711out: 712 unlock_kernel(); 713 return rc; 714} 715 716static int x25_wait_for_connection_establishment(struct sock *sk) 717{ 718 DECLARE_WAITQUEUE(wait, current); 719 int rc; 720 721 add_wait_queue_exclusive(sk_sleep(sk), &wait); 722 for (;;) { 723 __set_current_state(TASK_INTERRUPTIBLE); 724 rc = -ERESTARTSYS; 725 if (signal_pending(current)) 726 break; 727 rc = sock_error(sk); 728 if (rc) { 729 sk->sk_socket->state = SS_UNCONNECTED; 730 break; 731 } 732 rc = 0; 733 if (sk->sk_state != TCP_ESTABLISHED) { 734 release_sock(sk); 735 schedule(); 736 lock_sock(sk); 737 } else 738 break; 739 } 740 __set_current_state(TASK_RUNNING); 741 remove_wait_queue(sk_sleep(sk), &wait); 742 return rc; 743} 744 745static int x25_connect(struct socket *sock, struct sockaddr *uaddr, 746 int addr_len, int flags) 747{ 748 struct sock *sk = sock->sk; 749 struct x25_sock *x25 = x25_sk(sk); 750 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 751 struct x25_route *rt; 752 int rc = 0; 753 754 lock_kernel(); 755 lock_sock(sk); 756 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 757 sock->state = SS_CONNECTED; 758 goto out; /* Connect completed during a ERESTARTSYS event */ 759 } 760 761 rc = -ECONNREFUSED; 762 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 763 sock->state = SS_UNCONNECTED; 764 goto out; 765 } 766 767 rc = -EISCONN; /* No reconnect on a seqpacket socket */ 768 if (sk->sk_state == TCP_ESTABLISHED) 769 goto out; 770 771 sk->sk_state = TCP_CLOSE; 772 sock->state = SS_UNCONNECTED; 773 774 rc = -EINVAL; 775 if (addr_len != sizeof(struct sockaddr_x25) || 776 addr->sx25_family != AF_X25) 777 goto out; 778 779 rc = -ENETUNREACH; 780 rt = x25_get_route(&addr->sx25_addr); 781 if (!rt) 782 goto out; 783 784 x25->neighbour = x25_get_neigh(rt->dev); 785 if (!x25->neighbour) 786 goto out_put_route; 787 788 x25_limit_facilities(&x25->facilities, x25->neighbour); 789 790 x25->lci = x25_new_lci(x25->neighbour); 791 if (!x25->lci) 792 goto out_put_neigh; 793 794 rc = -EINVAL; 795 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ 796 goto out_put_neigh; 797 798 if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) 799 memset(&x25->source_addr, '\0', X25_ADDR_LEN); 800 801 x25->dest_addr = addr->sx25_addr; 802 803 /* Move to connecting socket, start sending Connect Requests */ 804 sock->state = SS_CONNECTING; 805 sk->sk_state = TCP_SYN_SENT; 806 807 x25->state = X25_STATE_1; 808 809 x25_write_internal(sk, X25_CALL_REQUEST); 810 811 x25_start_heartbeat(sk); 812 x25_start_t21timer(sk); 813 814 /* Now the loop */ 815 rc = -EINPROGRESS; 816 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) 817 goto out_put_neigh; 818 819 rc = x25_wait_for_connection_establishment(sk); 820 if (rc) 821 goto out_put_neigh; 822 823 sock->state = SS_CONNECTED; 824 rc = 0; 825out_put_neigh: 826 if (rc) 827 x25_neigh_put(x25->neighbour); 828out_put_route: 829 x25_route_put(rt); 830out: 831 release_sock(sk); 832 unlock_kernel(); 833 return rc; 834} 835 836static int x25_wait_for_data(struct sock *sk, long timeout) 837{ 838 DECLARE_WAITQUEUE(wait, current); 839 int rc = 0; 840 841 add_wait_queue_exclusive(sk_sleep(sk), &wait); 842 for (;;) { 843 __set_current_state(TASK_INTERRUPTIBLE); 844 if (sk->sk_shutdown & RCV_SHUTDOWN) 845 break; 846 rc = -ERESTARTSYS; 847 if (signal_pending(current)) 848 break; 849 rc = -EAGAIN; 850 if (!timeout) 851 break; 852 rc = 0; 853 if (skb_queue_empty(&sk->sk_receive_queue)) { 854 release_sock(sk); 855 timeout = schedule_timeout(timeout); 856 lock_sock(sk); 857 } else 858 break; 859 } 860 __set_current_state(TASK_RUNNING); 861 remove_wait_queue(sk_sleep(sk), &wait); 862 return rc; 863} 864 865static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 866{ 867 struct sock *sk = sock->sk; 868 struct sock *newsk; 869 struct sk_buff *skb; 870 int rc = -EINVAL; 871 872 lock_kernel(); 873 if (!sk || sk->sk_state != TCP_LISTEN) 874 goto out; 875 876 rc = -EOPNOTSUPP; 877 if (sk->sk_type != SOCK_SEQPACKET) 878 goto out; 879 880 lock_sock(sk); 881 rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); 882 if (rc) 883 goto out2; 884 skb = skb_dequeue(&sk->sk_receive_queue); 885 rc = -EINVAL; 886 if (!skb->sk) 887 goto out2; 888 newsk = skb->sk; 889 sock_graft(newsk, newsock); 890 891 /* Now attach up the new socket */ 892 skb->sk = NULL; 893 kfree_skb(skb); 894 sk->sk_ack_backlog--; 895 newsock->state = SS_CONNECTED; 896 rc = 0; 897out2: 898 release_sock(sk); 899out: 900 unlock_kernel(); 901 return rc; 902} 903 904static int x25_getname(struct socket *sock, struct sockaddr *uaddr, 905 int *uaddr_len, int peer) 906{ 907 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; 908 struct sock *sk = sock->sk; 909 struct x25_sock *x25 = x25_sk(sk); 910 int rc = 0; 911 912 lock_kernel(); 913 if (peer) { 914 if (sk->sk_state != TCP_ESTABLISHED) { 915 rc = -ENOTCONN; 916 goto out; 917 } 918 sx25->sx25_addr = x25->dest_addr; 919 } else 920 sx25->sx25_addr = x25->source_addr; 921 922 sx25->sx25_family = AF_X25; 923 *uaddr_len = sizeof(*sx25); 924 925out: 926 unlock_kernel(); 927 return rc; 928} 929 930static unsigned int x25_datagram_poll(struct file *file, struct socket *sock, 931 poll_table *wait) 932{ 933 int rc; 934 935 lock_kernel(); 936 rc = datagram_poll(file, sock, wait); 937 unlock_kernel(); 938 939 return rc; 940} 941 942int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, 943 unsigned int lci) 944{ 945 struct sock *sk; 946 struct sock *make; 947 struct x25_sock *makex25; 948 struct x25_address source_addr, dest_addr; 949 struct x25_facilities facilities; 950 struct x25_dte_facilities dte_facilities; 951 int len, addr_len, rc; 952 953 /* 954 * Remove the LCI and frame type. 955 */ 956 skb_pull(skb, X25_STD_MIN_LEN); 957 958 /* 959 * Extract the X.25 addresses and convert them to ASCII strings, 960 * and remove them. 961 * 962 * Address block is mandatory in call request packets 963 */ 964 addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); 965 if (addr_len <= 0) 966 goto out_clear_request; 967 skb_pull(skb, addr_len); 968 969 /* 970 * Get the length of the facilities, skip past them for the moment 971 * get the call user data because this is needed to determine 972 * the correct listener 973 * 974 * Facilities length is mandatory in call request packets 975 */ 976 if (skb->len < 1) 977 goto out_clear_request; 978 len = skb->data[0] + 1; 979 if (skb->len < len) 980 goto out_clear_request; 981 skb_pull(skb,len); 982 983 /* 984 * Find a listener for the particular address/cud pair. 985 */ 986 sk = x25_find_listener(&source_addr,skb); 987 skb_push(skb,len); 988 989 if (sk != NULL && sk_acceptq_is_full(sk)) { 990 goto out_sock_put; 991 } 992 993 /* 994 * We dont have any listeners for this incoming call. 995 * Try forwarding it. 996 */ 997 if (sk == NULL) { 998 skb_push(skb, addr_len + X25_STD_MIN_LEN); 999 if (sysctl_x25_forward && 1000 x25_forward_call(&dest_addr, nb, skb, lci) > 0) 1001 { 1002 /* Call was forwarded, dont process it any more */ 1003 kfree_skb(skb); 1004 rc = 1; 1005 goto out; 1006 } else { 1007 /* No listeners, can't forward, clear the call */ 1008 goto out_clear_request; 1009 } 1010 } 1011 1012 /* 1013 * Try to reach a compromise on the requested facilities. 1014 */ 1015 len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); 1016 if (len == -1) 1017 goto out_sock_put; 1018 1019 /* 1020 * current neighbour/link might impose additional limits 1021 * on certain facilties 1022 */ 1023 1024 x25_limit_facilities(&facilities, nb); 1025 1026 /* 1027 * Try to create a new socket. 1028 */ 1029 make = x25_make_new(sk); 1030 if (!make) 1031 goto out_sock_put; 1032 1033 /* 1034 * Remove the facilities 1035 */ 1036 skb_pull(skb, len); 1037 1038 skb->sk = make; 1039 make->sk_state = TCP_ESTABLISHED; 1040 1041 makex25 = x25_sk(make); 1042 makex25->lci = lci; 1043 makex25->dest_addr = dest_addr; 1044 makex25->source_addr = source_addr; 1045 makex25->neighbour = nb; 1046 makex25->facilities = facilities; 1047 makex25->dte_facilities= dte_facilities; 1048 makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; 1049 /* ensure no reverse facil on accept */ 1050 makex25->vc_facil_mask &= ~X25_MASK_REVERSE; 1051 /* ensure no calling address extension on accept */ 1052 makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; 1053 makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; 1054 1055 /* Normally all calls are accepted immediately */ 1056 if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { 1057 x25_write_internal(make, X25_CALL_ACCEPTED); 1058 makex25->state = X25_STATE_3; 1059 } 1060 1061 /* 1062 * Incoming Call User Data. 1063 */ 1064 skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); 1065 makex25->calluserdata.cudlength = skb->len; 1066 1067 sk->sk_ack_backlog++; 1068 1069 x25_insert_socket(make); 1070 1071 skb_queue_head(&sk->sk_receive_queue, skb); 1072 1073 x25_start_heartbeat(make); 1074 1075 if (!sock_flag(sk, SOCK_DEAD)) 1076 sk->sk_data_ready(sk, skb->len); 1077 rc = 1; 1078 sock_put(sk); 1079out: 1080 return rc; 1081out_sock_put: 1082 sock_put(sk); 1083out_clear_request: 1084 rc = 0; 1085 x25_transmit_clear_request(nb, lci, 0x01); 1086 goto out; 1087} 1088 1089static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, 1090 struct msghdr *msg, size_t len) 1091{ 1092 struct sock *sk = sock->sk; 1093 struct x25_sock *x25 = x25_sk(sk); 1094 struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; 1095 struct sockaddr_x25 sx25; 1096 struct sk_buff *skb; 1097 unsigned char *asmptr; 1098 int noblock = msg->msg_flags & MSG_DONTWAIT; 1099 size_t size; 1100 int qbit = 0, rc = -EINVAL; 1101 1102 lock_kernel(); 1103 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) 1104 goto out; 1105 1106 /* we currently don't support segmented records at the user interface */ 1107 if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) 1108 goto out; 1109 1110 rc = -EADDRNOTAVAIL; 1111 if (sock_flag(sk, SOCK_ZAPPED)) 1112 goto out; 1113 1114 rc = -EPIPE; 1115 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1116 send_sig(SIGPIPE, current, 0); 1117 goto out; 1118 } 1119 1120 rc = -ENETUNREACH; 1121 if (!x25->neighbour) 1122 goto out; 1123 1124 if (usx25) { 1125 rc = -EINVAL; 1126 if (msg->msg_namelen < sizeof(sx25)) 1127 goto out; 1128 memcpy(&sx25, usx25, sizeof(sx25)); 1129 rc = -EISCONN; 1130 if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) 1131 goto out; 1132 rc = -EINVAL; 1133 if (sx25.sx25_family != AF_X25) 1134 goto out; 1135 } else { 1136 rc = -ENOTCONN; 1137 if (sk->sk_state != TCP_ESTABLISHED) 1138 goto out; 1139 1140 sx25.sx25_family = AF_X25; 1141 sx25.sx25_addr = x25->dest_addr; 1142 } 1143 1144 /* Sanity check the packet size */ 1145 if (len > 65535) { 1146 rc = -EMSGSIZE; 1147 goto out; 1148 } 1149 1150 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); 1151 1152 /* Build a packet */ 1153 SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); 1154 1155 if ((msg->msg_flags & MSG_OOB) && len > 32) 1156 len = 32; 1157 1158 size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; 1159 1160 skb = sock_alloc_send_skb(sk, size, noblock, &rc); 1161 if (!skb) 1162 goto out; 1163 X25_SKB_CB(skb)->flags = msg->msg_flags; 1164 1165 skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); 1166 1167 /* 1168 * Put the data on the end 1169 */ 1170 SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); 1171 1172 skb_reset_transport_header(skb); 1173 skb_put(skb, len); 1174 1175 rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1176 if (rc) 1177 goto out_kfree_skb; 1178 1179 /* 1180 * If the Q BIT Include socket option is in force, the first 1181 * byte of the user data is the logical value of the Q Bit. 1182 */ 1183 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1184 qbit = skb->data[0]; 1185 skb_pull(skb, 1); 1186 } 1187 1188 /* 1189 * Push down the X.25 header 1190 */ 1191 SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); 1192 1193 if (msg->msg_flags & MSG_OOB) { 1194 if (x25->neighbour->extended) { 1195 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1196 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; 1197 *asmptr++ = (x25->lci >> 0) & 0xFF; 1198 *asmptr++ = X25_INTERRUPT; 1199 } else { 1200 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1201 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; 1202 *asmptr++ = (x25->lci >> 0) & 0xFF; 1203 *asmptr++ = X25_INTERRUPT; 1204 } 1205 } else { 1206 if (x25->neighbour->extended) { 1207 /* Build an Extended X.25 header */ 1208 asmptr = skb_push(skb, X25_EXT_MIN_LEN); 1209 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; 1210 *asmptr++ = (x25->lci >> 0) & 0xFF; 1211 *asmptr++ = X25_DATA; 1212 *asmptr++ = X25_DATA; 1213 } else { 1214 /* Build an Standard X.25 header */ 1215 asmptr = skb_push(skb, X25_STD_MIN_LEN); 1216 *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; 1217 *asmptr++ = (x25->lci >> 0) & 0xFF; 1218 *asmptr++ = X25_DATA; 1219 } 1220 1221 if (qbit) 1222 skb->data[0] |= X25_Q_BIT; 1223 } 1224 1225 SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); 1226 SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); 1227 1228 rc = -ENOTCONN; 1229 if (sk->sk_state != TCP_ESTABLISHED) 1230 goto out_kfree_skb; 1231 1232 if (msg->msg_flags & MSG_OOB) 1233 skb_queue_tail(&x25->interrupt_out_queue, skb); 1234 else { 1235 rc = x25_output(sk, skb); 1236 len = rc; 1237 if (rc < 0) 1238 kfree_skb(skb); 1239 else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) 1240 len++; 1241 } 1242 1243 /* 1244 * lock_sock() is currently only used to serialize this x25_kick() 1245 * against input-driven x25_kick() calls. It currently only blocks 1246 * incoming packets for this socket and does not protect against 1247 * any other socket state changes and is not called from anywhere 1248 * else. As x25_kick() cannot block and as long as all socket 1249 * operations are BKL-wrapped, we don't need take to care about 1250 * purging the backlog queue in x25_release(). 1251 * 1252 * Using lock_sock() to protect all socket operations entirely 1253 * (and making the whole x25 stack SMP aware) unfortunately would 1254 * require major changes to {send,recv}msg and skb allocation methods. 1255 * -> 2.5 ;) 1256 */ 1257 lock_sock(sk); 1258 x25_kick(sk); 1259 release_sock(sk); 1260 rc = len; 1261out: 1262 unlock_kernel(); 1263 return rc; 1264out_kfree_skb: 1265 kfree_skb(skb); 1266 goto out; 1267} 1268 1269 1270static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, 1271 struct msghdr *msg, size_t size, 1272 int flags) 1273{ 1274 struct sock *sk = sock->sk; 1275 struct x25_sock *x25 = x25_sk(sk); 1276 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1277 size_t copied; 1278 int qbit; 1279 struct sk_buff *skb; 1280 unsigned char *asmptr; 1281 int rc = -ENOTCONN; 1282 1283 lock_kernel(); 1284 /* 1285 * This works for seqpacket too. The receiver has ordered the queue for 1286 * us! We do one quick check first though 1287 */ 1288 if (sk->sk_state != TCP_ESTABLISHED) 1289 goto out; 1290 1291 if (flags & MSG_OOB) { 1292 rc = -EINVAL; 1293 if (sock_flag(sk, SOCK_URGINLINE) || 1294 !skb_peek(&x25->interrupt_in_queue)) 1295 goto out; 1296 1297 skb = skb_dequeue(&x25->interrupt_in_queue); 1298 1299 skb_pull(skb, X25_STD_MIN_LEN); 1300 1301 /* 1302 * No Q bit information on Interrupt data. 1303 */ 1304 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1305 asmptr = skb_push(skb, 1); 1306 *asmptr = 0x00; 1307 } 1308 1309 msg->msg_flags |= MSG_OOB; 1310 } else { 1311 /* Now we can treat all alike */ 1312 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1313 flags & MSG_DONTWAIT, &rc); 1314 if (!skb) 1315 goto out; 1316 1317 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; 1318 1319 skb_pull(skb, x25->neighbour->extended ? 1320 X25_EXT_MIN_LEN : X25_STD_MIN_LEN); 1321 1322 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1323 asmptr = skb_push(skb, 1); 1324 *asmptr = qbit; 1325 } 1326 } 1327 1328 skb_reset_transport_header(skb); 1329 copied = skb->len; 1330 1331 if (copied > size) { 1332 copied = size; 1333 msg->msg_flags |= MSG_TRUNC; 1334 } 1335 1336 /* Currently, each datagram always contains a complete record */ 1337 msg->msg_flags |= MSG_EOR; 1338 1339 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1340 if (rc) 1341 goto out_free_dgram; 1342 1343 if (sx25) { 1344 sx25->sx25_family = AF_X25; 1345 sx25->sx25_addr = x25->dest_addr; 1346 } 1347 1348 msg->msg_namelen = sizeof(struct sockaddr_x25); 1349 1350 lock_sock(sk); 1351 x25_check_rbuf(sk); 1352 release_sock(sk); 1353 rc = copied; 1354out_free_dgram: 1355 skb_free_datagram(sk, skb); 1356out: 1357 unlock_kernel(); 1358 return rc; 1359} 1360 1361 1362static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1363{ 1364 struct sock *sk = sock->sk; 1365 struct x25_sock *x25 = x25_sk(sk); 1366 void __user *argp = (void __user *)arg; 1367 int rc; 1368 1369 lock_kernel(); 1370 switch (cmd) { 1371 case TIOCOUTQ: { 1372 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1373 1374 if (amount < 0) 1375 amount = 0; 1376 rc = put_user(amount, (unsigned int __user *)argp); 1377 break; 1378 } 1379 1380 case TIOCINQ: { 1381 struct sk_buff *skb; 1382 int amount = 0; 1383 /* 1384 * These two are safe on a single CPU system as 1385 * only user tasks fiddle here 1386 */ 1387 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1388 amount = skb->len; 1389 rc = put_user(amount, (unsigned int __user *)argp); 1390 break; 1391 } 1392 1393 case SIOCGSTAMP: 1394 rc = -EINVAL; 1395 if (sk) 1396 rc = sock_get_timestamp(sk, 1397 (struct timeval __user *)argp); 1398 break; 1399 case SIOCGSTAMPNS: 1400 rc = -EINVAL; 1401 if (sk) 1402 rc = sock_get_timestampns(sk, 1403 (struct timespec __user *)argp); 1404 break; 1405 case SIOCGIFADDR: 1406 case SIOCSIFADDR: 1407 case SIOCGIFDSTADDR: 1408 case SIOCSIFDSTADDR: 1409 case SIOCGIFBRDADDR: 1410 case SIOCSIFBRDADDR: 1411 case SIOCGIFNETMASK: 1412 case SIOCSIFNETMASK: 1413 case SIOCGIFMETRIC: 1414 case SIOCSIFMETRIC: 1415 rc = -EINVAL; 1416 break; 1417 case SIOCADDRT: 1418 case SIOCDELRT: 1419 rc = -EPERM; 1420 if (!capable(CAP_NET_ADMIN)) 1421 break; 1422 rc = x25_route_ioctl(cmd, argp); 1423 break; 1424 case SIOCX25GSUBSCRIP: 1425 rc = x25_subscr_ioctl(cmd, argp); 1426 break; 1427 case SIOCX25SSUBSCRIP: 1428 rc = -EPERM; 1429 if (!capable(CAP_NET_ADMIN)) 1430 break; 1431 rc = x25_subscr_ioctl(cmd, argp); 1432 break; 1433 case SIOCX25GFACILITIES: { 1434 struct x25_facilities fac = x25->facilities; 1435 rc = copy_to_user(argp, &fac, 1436 sizeof(fac)) ? -EFAULT : 0; 1437 break; 1438 } 1439 1440 case SIOCX25SFACILITIES: { 1441 struct x25_facilities facilities; 1442 rc = -EFAULT; 1443 if (copy_from_user(&facilities, argp, 1444 sizeof(facilities))) 1445 break; 1446 rc = -EINVAL; 1447 if (sk->sk_state != TCP_LISTEN && 1448 sk->sk_state != TCP_CLOSE) 1449 break; 1450 if (facilities.pacsize_in < X25_PS16 || 1451 facilities.pacsize_in > X25_PS4096) 1452 break; 1453 if (facilities.pacsize_out < X25_PS16 || 1454 facilities.pacsize_out > X25_PS4096) 1455 break; 1456 if (facilities.winsize_in < 1 || 1457 facilities.winsize_in > 127) 1458 break; 1459 if (facilities.throughput) { 1460 int out = facilities.throughput & 0xf0; 1461 int in = facilities.throughput & 0x0f; 1462 if (!out) 1463 facilities.throughput |= 1464 X25_DEFAULT_THROUGHPUT << 4; 1465 else if (out < 0x30 || out > 0xD0) 1466 break; 1467 if (!in) 1468 facilities.throughput |= 1469 X25_DEFAULT_THROUGHPUT; 1470 else if (in < 0x03 || in > 0x0D) 1471 break; 1472 } 1473 if (facilities.reverse && 1474 (facilities.reverse & 0x81) != 0x81) 1475 break; 1476 x25->facilities = facilities; 1477 rc = 0; 1478 break; 1479 } 1480 1481 case SIOCX25GDTEFACILITIES: { 1482 rc = copy_to_user(argp, &x25->dte_facilities, 1483 sizeof(x25->dte_facilities)); 1484 if (rc) 1485 rc = -EFAULT; 1486 break; 1487 } 1488 1489 case SIOCX25SDTEFACILITIES: { 1490 struct x25_dte_facilities dtefacs; 1491 rc = -EFAULT; 1492 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1493 break; 1494 rc = -EINVAL; 1495 if (sk->sk_state != TCP_LISTEN && 1496 sk->sk_state != TCP_CLOSE) 1497 break; 1498 if (dtefacs.calling_len > X25_MAX_AE_LEN) 1499 break; 1500 if (dtefacs.calling_ae == NULL) 1501 break; 1502 if (dtefacs.called_len > X25_MAX_AE_LEN) 1503 break; 1504 if (dtefacs.called_ae == NULL) 1505 break; 1506 x25->dte_facilities = dtefacs; 1507 rc = 0; 1508 break; 1509 } 1510 1511 case SIOCX25GCALLUSERDATA: { 1512 struct x25_calluserdata cud = x25->calluserdata; 1513 rc = copy_to_user(argp, &cud, 1514 sizeof(cud)) ? -EFAULT : 0; 1515 break; 1516 } 1517 1518 case SIOCX25SCALLUSERDATA: { 1519 struct x25_calluserdata calluserdata; 1520 1521 rc = -EFAULT; 1522 if (copy_from_user(&calluserdata, argp, 1523 sizeof(calluserdata))) 1524 break; 1525 rc = -EINVAL; 1526 if (calluserdata.cudlength > X25_MAX_CUD_LEN) 1527 break; 1528 x25->calluserdata = calluserdata; 1529 rc = 0; 1530 break; 1531 } 1532 1533 case SIOCX25GCAUSEDIAG: { 1534 struct x25_causediag causediag; 1535 causediag = x25->causediag; 1536 rc = copy_to_user(argp, &causediag, 1537 sizeof(causediag)) ? -EFAULT : 0; 1538 break; 1539 } 1540 1541 case SIOCX25SCAUSEDIAG: { 1542 struct x25_causediag causediag; 1543 rc = -EFAULT; 1544 if (copy_from_user(&causediag, argp, sizeof(causediag))) 1545 break; 1546 x25->causediag = causediag; 1547 rc = 0; 1548 break; 1549 1550 } 1551 1552 case SIOCX25SCUDMATCHLEN: { 1553 struct x25_subaddr sub_addr; 1554 rc = -EINVAL; 1555 if(sk->sk_state != TCP_CLOSE) 1556 break; 1557 rc = -EFAULT; 1558 if (copy_from_user(&sub_addr, argp, 1559 sizeof(sub_addr))) 1560 break; 1561 rc = -EINVAL; 1562 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1563 break; 1564 x25->cudmatchlength = sub_addr.cudmatchlength; 1565 rc = 0; 1566 break; 1567 } 1568 1569 case SIOCX25CALLACCPTAPPRV: { 1570 rc = -EINVAL; 1571 if (sk->sk_state != TCP_CLOSE) 1572 break; 1573 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); 1574 rc = 0; 1575 break; 1576 } 1577 1578 case SIOCX25SENDCALLACCPT: { 1579 rc = -EINVAL; 1580 if (sk->sk_state != TCP_ESTABLISHED) 1581 break; 1582 /* must call accptapprv above */ 1583 if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) 1584 break; 1585 x25_write_internal(sk, X25_CALL_ACCEPTED); 1586 x25->state = X25_STATE_3; 1587 rc = 0; 1588 break; 1589 } 1590 1591 default: 1592 rc = -ENOIOCTLCMD; 1593 break; 1594 } 1595 unlock_kernel(); 1596 1597 return rc; 1598} 1599 1600static const struct net_proto_family x25_family_ops = { 1601 .family = AF_X25, 1602 .create = x25_create, 1603 .owner = THIS_MODULE, 1604}; 1605 1606#ifdef CONFIG_COMPAT 1607static int compat_x25_subscr_ioctl(unsigned int cmd, 1608 struct compat_x25_subscrip_struct __user *x25_subscr32) 1609{ 1610 struct compat_x25_subscrip_struct x25_subscr; 1611 struct x25_neigh *nb; 1612 struct net_device *dev; 1613 int rc = -EINVAL; 1614 1615 rc = -EFAULT; 1616 if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) 1617 goto out; 1618 1619 rc = -EINVAL; 1620 dev = x25_dev_get(x25_subscr.device); 1621 if (dev == NULL) 1622 goto out; 1623 1624 nb = x25_get_neigh(dev); 1625 if (nb == NULL) 1626 goto out_dev_put; 1627 1628 dev_put(dev); 1629 1630 if (cmd == SIOCX25GSUBSCRIP) { 1631 x25_subscr.extended = nb->extended; 1632 x25_subscr.global_facil_mask = nb->global_facil_mask; 1633 rc = copy_to_user(x25_subscr32, &x25_subscr, 1634 sizeof(*x25_subscr32)) ? -EFAULT : 0; 1635 } else { 1636 rc = -EINVAL; 1637 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { 1638 rc = 0; 1639 nb->extended = x25_subscr.extended; 1640 nb->global_facil_mask = x25_subscr.global_facil_mask; 1641 } 1642 } 1643 x25_neigh_put(nb); 1644out: 1645 return rc; 1646out_dev_put: 1647 dev_put(dev); 1648 goto out; 1649} 1650 1651static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, 1652 unsigned long arg) 1653{ 1654 void __user *argp = compat_ptr(arg); 1655 struct sock *sk = sock->sk; 1656 1657 int rc = -ENOIOCTLCMD; 1658 1659 switch(cmd) { 1660 case TIOCOUTQ: 1661 case TIOCINQ: 1662 rc = x25_ioctl(sock, cmd, (unsigned long)argp); 1663 break; 1664 case SIOCGSTAMP: 1665 rc = -EINVAL; 1666 lock_kernel(); 1667 if (sk) 1668 rc = compat_sock_get_timestamp(sk, 1669 (struct timeval __user*)argp); 1670 unlock_kernel(); 1671 break; 1672 case SIOCGSTAMPNS: 1673 rc = -EINVAL; 1674 lock_kernel(); 1675 if (sk) 1676 rc = compat_sock_get_timestampns(sk, 1677 (struct timespec __user*)argp); 1678 unlock_kernel(); 1679 break; 1680 case SIOCGIFADDR: 1681 case SIOCSIFADDR: 1682 case SIOCGIFDSTADDR: 1683 case SIOCSIFDSTADDR: 1684 case SIOCGIFBRDADDR: 1685 case SIOCSIFBRDADDR: 1686 case SIOCGIFNETMASK: 1687 case SIOCSIFNETMASK: 1688 case SIOCGIFMETRIC: 1689 case SIOCSIFMETRIC: 1690 rc = -EINVAL; 1691 break; 1692 case SIOCADDRT: 1693 case SIOCDELRT: 1694 rc = -EPERM; 1695 if (!capable(CAP_NET_ADMIN)) 1696 break; 1697 lock_kernel(); 1698 rc = x25_route_ioctl(cmd, argp); 1699 unlock_kernel(); 1700 break; 1701 case SIOCX25GSUBSCRIP: 1702 lock_kernel(); 1703 rc = compat_x25_subscr_ioctl(cmd, argp); 1704 unlock_kernel(); 1705 break; 1706 case SIOCX25SSUBSCRIP: 1707 rc = -EPERM; 1708 if (!capable(CAP_NET_ADMIN)) 1709 break; 1710 lock_kernel(); 1711 rc = compat_x25_subscr_ioctl(cmd, argp); 1712 unlock_kernel(); 1713 break; 1714 case SIOCX25GFACILITIES: 1715 case SIOCX25SFACILITIES: 1716 case SIOCX25GDTEFACILITIES: 1717 case SIOCX25SDTEFACILITIES: 1718 case SIOCX25GCALLUSERDATA: 1719 case SIOCX25SCALLUSERDATA: 1720 case SIOCX25GCAUSEDIAG: 1721 case SIOCX25SCAUSEDIAG: 1722 case SIOCX25SCUDMATCHLEN: 1723 case SIOCX25CALLACCPTAPPRV: 1724 case SIOCX25SENDCALLACCPT: 1725 rc = x25_ioctl(sock, cmd, (unsigned long)argp); 1726 break; 1727 default: 1728 rc = -ENOIOCTLCMD; 1729 break; 1730 } 1731 return rc; 1732} 1733#endif 1734 1735static const struct proto_ops x25_proto_ops = { 1736 .family = AF_X25, 1737 .owner = THIS_MODULE, 1738 .release = x25_release, 1739 .bind = x25_bind, 1740 .connect = x25_connect, 1741 .socketpair = sock_no_socketpair, 1742 .accept = x25_accept, 1743 .getname = x25_getname, 1744 .poll = x25_datagram_poll, 1745 .ioctl = x25_ioctl, 1746#ifdef CONFIG_COMPAT 1747 .compat_ioctl = compat_x25_ioctl, 1748#endif 1749 .listen = x25_listen, 1750 .shutdown = sock_no_shutdown, 1751 .setsockopt = x25_setsockopt, 1752 .getsockopt = x25_getsockopt, 1753 .sendmsg = x25_sendmsg, 1754 .recvmsg = x25_recvmsg, 1755 .mmap = sock_no_mmap, 1756 .sendpage = sock_no_sendpage, 1757}; 1758 1759static struct packet_type x25_packet_type __read_mostly = { 1760 .type = cpu_to_be16(ETH_P_X25), 1761 .func = x25_lapb_receive_frame, 1762}; 1763 1764static struct notifier_block x25_dev_notifier = { 1765 .notifier_call = x25_device_event, 1766}; 1767 1768void x25_kill_by_neigh(struct x25_neigh *nb) 1769{ 1770 struct sock *s; 1771 struct hlist_node *node; 1772 1773 write_lock_bh(&x25_list_lock); 1774 1775 sk_for_each(s, node, &x25_list) 1776 if (x25_sk(s)->neighbour == nb) 1777 x25_disconnect(s, ENETUNREACH, 0, 0); 1778 1779 write_unlock_bh(&x25_list_lock); 1780 1781 /* Remove any related forwards */ 1782 x25_clear_forward_by_dev(nb->dev); 1783} 1784 1785static int __init x25_init(void) 1786{ 1787 int rc = proto_register(&x25_proto, 0); 1788 1789 if (rc != 0) 1790 goto out; 1791 1792 rc = sock_register(&x25_family_ops); 1793 if (rc != 0) 1794 goto out_proto; 1795 1796 dev_add_pack(&x25_packet_type); 1797 1798 rc = register_netdevice_notifier(&x25_dev_notifier); 1799 if (rc != 0) 1800 goto out_sock; 1801 1802 printk(KERN_INFO "X.25 for Linux Version 0.2\n"); 1803 1804 x25_register_sysctl(); 1805 rc = x25_proc_init(); 1806 if (rc != 0) 1807 goto out_dev; 1808out: 1809 return rc; 1810out_dev: 1811 unregister_netdevice_notifier(&x25_dev_notifier); 1812out_sock: 1813 sock_unregister(AF_X25); 1814out_proto: 1815 proto_unregister(&x25_proto); 1816 goto out; 1817} 1818module_init(x25_init); 1819 1820static void __exit x25_exit(void) 1821{ 1822 x25_proc_exit(); 1823 x25_link_free(); 1824 x25_route_free(); 1825 1826 x25_unregister_sysctl(); 1827 1828 unregister_netdevice_notifier(&x25_dev_notifier); 1829 1830 dev_remove_pack(&x25_packet_type); 1831 1832 sock_unregister(AF_X25); 1833 proto_unregister(&x25_proto); 1834} 1835module_exit(x25_exit); 1836 1837MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); 1838MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); 1839MODULE_LICENSE("GPL"); 1840MODULE_ALIAS_NETPROTO(PF_X25); 1841