1/* 2 * OpenVPN -- An application to securely tunnel IP networks 3 * over a single TCP/UDP port, with support for SSL/TLS-based 4 * session authentication and key exchange, 5 * packet encryption, packet authentication, and 6 * packet compression. 7 * 8 * Copyright (C) 2002-2010 OpenVPN Technologies, Inc. <sales@openvpn.net> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 12 * as published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program (see the file COPYING included with this 21 * distribution); if not, write to the Free Software Foundation, Inc., 22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25/** 26 * @file Header file for server-mode related structures and functions. 27 */ 28 29#ifndef MULTI_H 30#define MULTI_H 31 32#if P2MP_SERVER 33 34#include "init.h" 35#include "forward.h" 36#include "mroute.h" 37#include "mbuf.h" 38#include "list.h" 39#include "schedule.h" 40#include "pool.h" 41#include "mudp.h" 42#include "mtcp.h" 43#include "perf.h" 44 45/* 46 * Walk (don't run) through the routing table, 47 * deleting old entries, and possibly multi_instance 48 * structs as well which have been marked for deletion. 49 */ 50struct multi_reap 51{ 52 int bucket_base; 53 int buckets_per_pass; 54 time_t last_call; 55}; 56 57 58/** 59 * Server-mode state structure for one single VPN tunnel. 60 * 61 * This structure is used by OpenVPN processes running in server-mode to 62 * store state information related to one single VPN tunnel. 63 * 64 * The @ref tunnel_state "Structure of VPN tunnel state storage" related 65 * page describes the role the structure plays when OpenVPN is running in 66 * server-mode. 67 */ 68struct multi_instance { 69 struct schedule_entry se; /* this must be the first element of the structure */ 70 struct gc_arena gc; 71 bool defined; 72 bool halt; 73 int refcount; 74 int route_count; /* number of routes (including cached routes) owned by this instance */ 75 time_t created; /**< Time at which a VPN tunnel instance 76 * was created. This parameter is set 77 * by the \c multi_create_instance() 78 * function. */ 79 struct timeval wakeup; /* absolute time */ 80 struct mroute_addr real; /**< External network address of the 81 * remote peer. */ 82 ifconfig_pool_handle vaddr_handle; 83 const char *msg_prefix; 84 85 /* queued outgoing data in Server/TCP mode */ 86 unsigned int tcp_rwflags; 87 struct mbuf_set *tcp_link_out_deferred; 88 bool socket_set_called; 89 90 in_addr_t reporting_addr; /* IP address shown in status listing */ 91 92 bool did_open_context; 93 bool did_real_hash; 94 bool did_iter; 95#ifdef MANAGEMENT_DEF_AUTH 96 bool did_cid_hash; 97 struct buffer_list *cc_config; 98#endif 99 bool connection_established_flag; 100 bool did_iroutes; 101 int n_clients_delta; /* added to multi_context.n_clients when instance is closed */ 102 103 struct context context; /**< The context structure storing state 104 * for this VPN tunnel. */ 105}; 106 107 108/** 109 * Main OpenVPN server state structure. 110 * 111 * This structure is used by OpenVPN processes running in server-mode to 112 * store all the VPN tunnel and process-wide state. 113 * 114 * The @ref tunnel_state "Structure of VPN tunnel state storage" related 115 * page describes the role the structure plays when OpenVPN is running in 116 * server-mode. 117 */ 118struct multi_context { 119# define MC_UNDEF 0 120# define MC_SINGLE_THREADED (1<<0) 121# define MC_MULTI_THREADED_MASTER (1<<1) 122# define MC_MULTI_THREADED_WORKER (1<<2) 123# define MC_MULTI_THREADED_SCHEDULER (1<<3) 124# define MC_WORK_THREAD (MC_MULTI_THREADED_WORKER|MC_MULTI_THREADED_SCHEDULER) 125 int thread_mode; 126 127 struct hash *hash; /**< VPN tunnel instances indexed by real 128 * address of the remote peer. */ 129 struct hash *vhash; /**< VPN tunnel instances indexed by 130 * virtual address of remote hosts. */ 131 struct hash *iter; /**< VPN tunnel instances indexed by real 132 * address of the remote peer, optimized 133 * for iteration. */ 134 struct schedule *schedule; 135 struct mbuf_set *mbuf; /**< Set of buffers for passing data 136 * channel packets between VPN tunnel 137 * instances. */ 138 struct multi_tcp *mtcp; /**< State specific to OpenVPN using TCP 139 * as external transport. */ 140 struct ifconfig_pool *ifconfig_pool; 141 struct frequency_limit *new_connection_limiter; 142 struct mroute_helper *route_helper; 143 struct multi_reap *reaper; 144 struct mroute_addr local; 145 bool enable_c2c; 146 int max_clients; 147 int tcp_queue_limit; 148 int status_file_version; 149 int n_clients; /* current number of authenticated clients */ 150 151#ifdef MANAGEMENT_DEF_AUTH 152 struct hash *cid_hash; 153 unsigned long cid_counter; 154#endif 155 156 struct multi_instance *pending; 157 struct multi_instance *earliest_wakeup; 158 struct multi_instance **mpp_touched; 159 struct context_buffers *context_buffers; 160 time_t per_second_trigger; 161 162 struct context top; /**< Storage structure for process-wide 163 * configuration. */ 164 165 /* 166 * Timer object for stale route check 167 */ 168 struct event_timeout stale_routes_check_et; 169}; 170 171/* 172 * Host route 173 */ 174struct multi_route 175{ 176 struct mroute_addr addr; 177 struct multi_instance *instance; 178 179# define MULTI_ROUTE_CACHE (1<<0) 180# define MULTI_ROUTE_AGEABLE (1<<1) 181 unsigned int flags; 182 183 unsigned int cache_generation; 184 time_t last_reference; 185}; 186 187 188/**************************************************************************/ 189/** 190 * Main event loop for OpenVPN in server mode. 191 * @ingroup eventloop 192 * 193 * This function calls the appropriate main event loop function depending 194 * on the transport protocol used: 195 * - \c tunnel_server_udp() 196 * - \c tunnel_server_tcp() 197 * 198 * @param top - Top-level context structure. 199 */ 200void tunnel_server (struct context *top); 201 202 203const char *multi_instance_string (const struct multi_instance *mi, bool null, struct gc_arena *gc); 204 205/* 206 * Called by mtcp.c, mudp.c, or other (to be written) protocol drivers 207 */ 208 209void multi_init (struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode); 210void multi_uninit (struct multi_context *m); 211 212void multi_top_init (struct multi_context *m, const struct context *top, const bool alloc_buffers); 213void multi_top_free (struct multi_context *m); 214 215struct multi_instance *multi_create_instance (struct multi_context *m, const struct mroute_addr *real); 216void multi_close_instance (struct multi_context *m, struct multi_instance *mi, bool shutdown); 217 218bool multi_process_timeout (struct multi_context *m, const unsigned int mpp_flags); 219 220#define MPP_PRE_SELECT (1<<0) 221#define MPP_CONDITIONAL_PRE_SELECT (1<<1) 222#define MPP_CLOSE_ON_SIGNAL (1<<2) 223#define MPP_RECORD_TOUCH (1<<3) 224 225 226/**************************************************************************/ 227/** 228 * Perform postprocessing of a VPN tunnel instance. 229 * 230 * After some VPN tunnel activity has taken place, the VPN tunnel's state 231 * may need updating and some follow-up action may be required. This 232 * function controls the necessary postprocessing. It is called by many 233 * other functions that handle VPN tunnel related activity, such as \c 234 * multi_process_incoming_link(), \c multi_process_outgoing_link(), \c 235 * multi_process_incoming_tun(), \c multi_process_outgoing_tun(), and \c 236 * multi_process_timeout(), among others. 237 * 238 * @param m - The single \c multi_context structure. 239 * @param mi - The \c multi_instance of the VPN tunnel to be 240 * postprocessed. 241 * @param flags - Fast I/O optimization flags. 242 * 243 * @return 244 * - True, if the VPN tunnel instance \a mi was not closed due to a 245 * signal during processing. 246 * - False, if the VPN tunnel instance \a mi was closed. 247 */ 248bool multi_process_post (struct multi_context *m, struct multi_instance *mi, const unsigned int flags); 249 250 251/**************************************************************************/ 252/** 253 * Demultiplex and process a packet received over the external network 254 * interface. 255 * @ingroup external_multiplexer 256 * 257 * This function determines which VPN tunnel instance the incoming packet 258 * is associated with, and then calls \c process_incoming_link() to handle 259 * it. Afterwards, if the packet is destined for a broadcast/multicast 260 * address or a remote host reachable through a different VPN tunnel, this 261 * function takes care of sending it they are. 262 * 263 * @note This function is only used by OpenVPN processes which are running 264 * in server mode, and can therefore sustain multiple active VPN 265 * tunnels. 266 * 267 * @param m - The single \c multi_context structure. 268 * @param instance - The VPN tunnel state structure associated with 269 * the incoming packet, if known, as is the case 270 * when using TCP transport. Otherwise NULL, as is 271 * the case when using UDP transport. 272 * @param mpp_flags - Fast I/O optimization flags. 273 */ 274bool multi_process_incoming_link (struct multi_context *m, struct multi_instance *instance, const unsigned int mpp_flags); 275 276 277/** 278 * Determine the destination VPN tunnel of a packet received over the 279 * virtual tun/tap network interface and then process it accordingly. 280 * @ingroup internal_multiplexer 281 * 282 * This function determines which VPN tunnel instance the packet is 283 * destined for, and then calls \c process_outgoing_tun() to handle it. 284 * 285 * @note This function is only used by OpenVPN processes which are running 286 * in server mode, and can therefore sustain multiple active VPN 287 * tunnels. 288 * 289 * @param m - The single \c multi_context structure. 290 * @param mpp_flags - Fast I/O optimization flags. 291 */ 292bool multi_process_incoming_tun (struct multi_context *m, const unsigned int mpp_flags); 293 294 295void multi_process_drop_outgoing_tun (struct multi_context *m, const unsigned int mpp_flags); 296 297void multi_print_status (struct multi_context *m, struct status_output *so, const int version); 298 299struct multi_instance *multi_get_queue (struct mbuf_set *ms); 300 301void multi_add_mbuf (struct multi_context *m, 302 struct multi_instance *mi, 303 struct mbuf_buffer *mb); 304 305void multi_ifconfig_pool_persist (struct multi_context *m, bool force); 306 307bool multi_process_signal (struct multi_context *m); 308 309void multi_close_instance_on_signal (struct multi_context *m, struct multi_instance *mi); 310 311void init_management_callback_multi (struct multi_context *m); 312void uninit_management_callback_multi (struct multi_context *m); 313 314/* 315 * Return true if our output queue is not full 316 */ 317static inline bool 318multi_output_queue_ready (const struct multi_context *m, 319 const struct multi_instance *mi) 320{ 321 if (mi->tcp_link_out_deferred) 322 return mbuf_len (mi->tcp_link_out_deferred) <= m->tcp_queue_limit; 323 else 324 return true; 325} 326 327/* 328 * Determine which instance has pending output 329 * and prepare the output for sending in 330 * the to_link buffer. 331 */ 332static inline struct multi_instance * 333multi_process_outgoing_link_pre (struct multi_context *m) 334{ 335 struct multi_instance *mi = NULL; 336 337 if (m->pending) 338 mi = m->pending; 339 else if (mbuf_defined (m->mbuf)) 340 mi = multi_get_queue (m->mbuf); 341 return mi; 342} 343 344/* 345 * Per-client route quota management 346 */ 347 348void route_quota_exceeded (const struct multi_context *m, const struct multi_instance *mi); 349 350static inline void 351route_quota_inc (struct multi_instance *mi) 352{ 353 ++mi->route_count; 354} 355 356static inline void 357route_quota_dec (struct multi_instance *mi) 358{ 359 --mi->route_count; 360} 361 362/* can we add a new route? */ 363static inline bool 364route_quota_test (const struct multi_context *m, const struct multi_instance *mi) 365{ 366 if (mi->route_count >= mi->context.options.max_routes_per_client) 367 { 368 route_quota_exceeded (m, mi); 369 return false; 370 } 371 else 372 return true; 373} 374 375/* 376 * Instance reference counting 377 */ 378 379static inline void 380multi_instance_inc_refcount (struct multi_instance *mi) 381{ 382 ++mi->refcount; 383} 384 385static inline void 386multi_instance_dec_refcount (struct multi_instance *mi) 387{ 388 if (--mi->refcount <= 0) 389 { 390 gc_free (&mi->gc); 391 free (mi); 392 } 393} 394 395static inline void 396multi_route_del (struct multi_route *route) 397{ 398 struct multi_instance *mi = route->instance; 399 route_quota_dec (mi); 400 multi_instance_dec_refcount (mi); 401 free (route); 402} 403 404static inline bool 405multi_route_defined (const struct multi_context *m, 406 const struct multi_route *r) 407{ 408 if (r->instance->halt) 409 return false; 410 else if ((r->flags & MULTI_ROUTE_CACHE) 411 && r->cache_generation != m->route_helper->cache_generation) 412 return false; 413 else if ((r->flags & MULTI_ROUTE_AGEABLE) 414 && r->last_reference + m->route_helper->ageable_ttl_secs < now) 415 return false; 416 else 417 return true; 418} 419 420/* 421 * Set a msg() function prefix with our current client instance ID. 422 */ 423 424static inline void 425set_prefix (struct multi_instance *mi) 426{ 427#ifdef MULTI_DEBUG_EVENT_LOOP 428 if (mi->msg_prefix) 429 printf ("[%s]\n", mi->msg_prefix); 430#endif 431 msg_set_prefix (mi->msg_prefix); 432} 433 434static inline void 435clear_prefix (void) 436{ 437#ifdef MULTI_DEBUG_EVENT_LOOP 438 printf ("[NULL]\n"); 439#endif 440 msg_set_prefix (NULL); 441} 442 443/* 444 * Instance Reaper 445 * 446 * Reaper constants. The reaper is the process where the virtual address 447 * and virtual route hash table is scanned for dead entries which are 448 * then removed. The hash table could potentially be quite large, so we 449 * don't want to reap in a single pass. 450 */ 451 452#define REAP_MAX_WAKEUP 10 /* Do reap pass at least once per n seconds */ 453#define REAP_DIVISOR 256 /* How many passes to cover whole hash table */ 454#define REAP_MIN 16 /* Minimum number of buckets per pass */ 455#define REAP_MAX 1024 /* Maximum number of buckets per pass */ 456 457/* 458 * Mark a cached host route for deletion after this 459 * many seconds without any references. 460 */ 461#define MULTI_CACHE_ROUTE_TTL 60 462 463static inline void 464multi_reap_process (const struct multi_context *m) 465{ 466 void multi_reap_process_dowork (const struct multi_context *m); 467 if (m->reaper->last_call != now) 468 multi_reap_process_dowork (m); 469} 470 471static inline void 472multi_process_per_second_timers (struct multi_context *m) 473{ 474 if (m->per_second_trigger != now) 475 { 476 void multi_process_per_second_timers_dowork (struct multi_context *m); 477 multi_process_per_second_timers_dowork (m); 478 m->per_second_trigger = now; 479 } 480} 481 482/* 483 * Compute earliest timeout expiry from the set of 484 * all instances. Output: 485 * 486 * m->earliest_wakeup : instance needing the earliest service. 487 * dest : earliest timeout as a delta in relation 488 * to current time. 489 */ 490static inline void 491multi_get_timeout (struct multi_context *m, struct timeval *dest) 492{ 493 struct timeval tv, current; 494 495 CLEAR (tv); 496 m->earliest_wakeup = (struct multi_instance *) schedule_get_earliest_wakeup (m->schedule, &tv); 497 if (m->earliest_wakeup) 498 { 499 ASSERT (!openvpn_gettimeofday (¤t, NULL)); 500 tv_delta (dest, ¤t, &tv); 501 if (dest->tv_sec >= REAP_MAX_WAKEUP) 502 { 503 m->earliest_wakeup = NULL; 504 dest->tv_sec = REAP_MAX_WAKEUP; 505 dest->tv_usec = 0; 506 } 507 } 508 else 509 { 510 dest->tv_sec = REAP_MAX_WAKEUP; 511 dest->tv_usec = 0; 512 } 513} 514 515 516/** 517 * Send a packet over the virtual tun/tap network interface to its locally 518 * reachable destination. 519 * @ingroup internal_multiplexer 520 * 521 * This function calls \c process_outgoing_tun() to perform the actual 522 * sending of the packet. Afterwards, it calls \c multi_process_post() to 523 * perform server-mode postprocessing. 524 * 525 * @param m - The single \c multi_context structure. 526 * @param mpp_flags - Fast I/O optimization flags. 527 * 528 * @return 529 * - True, if the \c multi_instance associated with the packet sent was 530 * not closed due to a signal during processing. 531 * - Falls, if the \c multi_instance was closed. 532 */ 533static inline bool 534multi_process_outgoing_tun (struct multi_context *m, const unsigned int mpp_flags) 535{ 536 struct multi_instance *mi = m->pending; 537 bool ret = true; 538 539 ASSERT (mi); 540#ifdef MULTI_DEBUG_EVENT_LOOP 541 printf ("%s -> TUN len=%d\n", 542 id(mi), 543 mi->context.c2.to_tun.len); 544#endif 545 set_prefix (mi); 546 process_outgoing_tun (&mi->context); 547 ret = multi_process_post (m, mi, mpp_flags); 548 clear_prefix (); 549 return ret; 550} 551 552 553 554static inline bool 555multi_process_outgoing_link_dowork (struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags) 556{ 557 bool ret = true; 558 set_prefix (mi); 559 process_outgoing_link (&mi->context); 560 ret = multi_process_post (m, mi, mpp_flags); 561 clear_prefix (); 562 return ret; 563} 564 565/* 566 * Check for signals. 567 */ 568#define MULTI_CHECK_SIG(m) EVENT_LOOP_CHECK_SIGNAL (&(m)->top, multi_process_signal, (m)) 569 570static inline void 571multi_set_pending (struct multi_context *m, struct multi_instance *mi) 572{ 573 m->pending = mi; 574} 575 576static inline void 577multi_release_io_lock (struct multi_context *m) 578{ 579} 580 581#endif /* P2MP_SERVER */ 582#endif /* MULTI_H */ 583